xref: /freebsd/sys/netinet/sctputil.c (revision 4310d6deb27da04d3fe079a0584edd557a764e21)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #endif
45 #include <netinet/sctp_header.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_bsd_addr.h>
53 
54 
55 #ifndef KTR_SCTP
56 #define KTR_SCTP KTR_SUBSYS
57 #endif
58 
59 extern struct sctp_cc_functions sctp_cc_functions[];
60 extern struct sctp_ss_functions sctp_ss_functions[];
61 
62 void
63 sctp_sblog(struct sockbuf *sb,
64     struct sctp_tcb *stcb, int from, int incr)
65 {
66 	struct sctp_cwnd_log sctp_clog;
67 
68 	sctp_clog.x.sb.stcb = stcb;
69 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
70 	if (stcb)
71 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
72 	else
73 		sctp_clog.x.sb.stcb_sbcc = 0;
74 	sctp_clog.x.sb.incr = incr;
75 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
76 	    SCTP_LOG_EVENT_SB,
77 	    from,
78 	    sctp_clog.x.misc.log1,
79 	    sctp_clog.x.misc.log2,
80 	    sctp_clog.x.misc.log3,
81 	    sctp_clog.x.misc.log4);
82 }
83 
84 void
85 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
86 {
87 	struct sctp_cwnd_log sctp_clog;
88 
89 	sctp_clog.x.close.inp = (void *)inp;
90 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
91 	if (stcb) {
92 		sctp_clog.x.close.stcb = (void *)stcb;
93 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
94 	} else {
95 		sctp_clog.x.close.stcb = 0;
96 		sctp_clog.x.close.state = 0;
97 	}
98 	sctp_clog.x.close.loc = loc;
99 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
100 	    SCTP_LOG_EVENT_CLOSE,
101 	    0,
102 	    sctp_clog.x.misc.log1,
103 	    sctp_clog.x.misc.log2,
104 	    sctp_clog.x.misc.log3,
105 	    sctp_clog.x.misc.log4);
106 }
107 
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
204     int from)
205 {
206 	struct sctp_cwnd_log sctp_clog;
207 
208 	memset(&sctp_clog, 0, sizeof(sctp_clog));
209 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
210 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
211 	sctp_clog.x.fr.tsn = tsn;
212 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
213 	    SCTP_LOG_EVENT_FR,
214 	    from,
215 	    sctp_clog.x.misc.log1,
216 	    sctp_clog.x.misc.log2,
217 	    sctp_clog.x.misc.log3,
218 	    sctp_clog.x.misc.log4);
219 }
220 
221 void
222 sctp_log_mb(struct mbuf *m, int from)
223 {
224 	struct sctp_cwnd_log sctp_clog;
225 
226 	sctp_clog.x.mb.mp = m;
227 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog.x.mb.ext = 0;
235 		sctp_clog.x.mb.refcnt = 0;
236 	}
237 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238 	    SCTP_LOG_EVENT_MBUF,
239 	    from,
240 	    sctp_clog.x.misc.log1,
241 	    sctp_clog.x.misc.log2,
242 	    sctp_clog.x.misc.log3,
243 	    sctp_clog.x.misc.log4);
244 }
245 
246 void
247 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
248     int from)
249 {
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	if (control == NULL) {
253 		SCTP_PRINTF("Gak log of NULL?\n");
254 		return;
255 	}
256 	sctp_clog.x.strlog.stcb = control->stcb;
257 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
258 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
259 	sctp_clog.x.strlog.strm = control->sinfo_stream;
260 	if (poschk != NULL) {
261 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
262 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
263 	} else {
264 		sctp_clog.x.strlog.e_tsn = 0;
265 		sctp_clog.x.strlog.e_sseq = 0;
266 	}
267 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
268 	    SCTP_LOG_EVENT_STRM,
269 	    from,
270 	    sctp_clog.x.misc.log1,
271 	    sctp_clog.x.misc.log2,
272 	    sctp_clog.x.misc.log3,
273 	    sctp_clog.x.misc.log4);
274 }
275 
276 void
277 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
278 {
279 	struct sctp_cwnd_log sctp_clog;
280 
281 	sctp_clog.x.cwnd.net = net;
282 	if (stcb->asoc.send_queue_cnt > 255)
283 		sctp_clog.x.cwnd.cnt_in_send = 255;
284 	else
285 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
286 	if (stcb->asoc.stream_queue_cnt > 255)
287 		sctp_clog.x.cwnd.cnt_in_str = 255;
288 	else
289 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
290 
291 	if (net) {
292 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
293 		sctp_clog.x.cwnd.inflight = net->flight_size;
294 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
295 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
296 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
297 	}
298 	if (SCTP_CWNDLOG_PRESEND == from) {
299 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
300 	}
301 	sctp_clog.x.cwnd.cwnd_augment = augment;
302 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
303 	    SCTP_LOG_EVENT_CWND,
304 	    from,
305 	    sctp_clog.x.misc.log1,
306 	    sctp_clog.x.misc.log2,
307 	    sctp_clog.x.misc.log3,
308 	    sctp_clog.x.misc.log4);
309 }
310 
311 void
312 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
313 {
314 	struct sctp_cwnd_log sctp_clog;
315 
316 	memset(&sctp_clog, 0, sizeof(sctp_clog));
317 	if (inp) {
318 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
319 
320 	} else {
321 		sctp_clog.x.lock.sock = (void *)NULL;
322 	}
323 	sctp_clog.x.lock.inp = (void *)inp;
324 	if (stcb) {
325 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
326 	} else {
327 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
328 	}
329 	if (inp) {
330 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
331 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
332 	} else {
333 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
334 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
335 	}
336 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
337 	if (inp && (inp->sctp_socket)) {
338 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
339 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
340 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
341 	} else {
342 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
343 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
344 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
345 	}
346 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
347 	    SCTP_LOG_LOCK_EVENT,
348 	    from,
349 	    sctp_clog.x.misc.log1,
350 	    sctp_clog.x.misc.log2,
351 	    sctp_clog.x.misc.log3,
352 	    sctp_clog.x.misc.log4);
353 }
354 
355 void
356 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
357 {
358 	struct sctp_cwnd_log sctp_clog;
359 
360 	memset(&sctp_clog, 0, sizeof(sctp_clog));
361 	sctp_clog.x.cwnd.net = net;
362 	sctp_clog.x.cwnd.cwnd_new_value = error;
363 	sctp_clog.x.cwnd.inflight = net->flight_size;
364 	sctp_clog.x.cwnd.cwnd_augment = burst;
365 	if (stcb->asoc.send_queue_cnt > 255)
366 		sctp_clog.x.cwnd.cnt_in_send = 255;
367 	else
368 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
369 	if (stcb->asoc.stream_queue_cnt > 255)
370 		sctp_clog.x.cwnd.cnt_in_str = 255;
371 	else
372 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
373 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
374 	    SCTP_LOG_EVENT_MAXBURST,
375 	    from,
376 	    sctp_clog.x.misc.log1,
377 	    sctp_clog.x.misc.log2,
378 	    sctp_clog.x.misc.log3,
379 	    sctp_clog.x.misc.log4);
380 }
381 
382 void
383 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
384 {
385 	struct sctp_cwnd_log sctp_clog;
386 
387 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
388 	sctp_clog.x.rwnd.send_size = snd_size;
389 	sctp_clog.x.rwnd.overhead = overhead;
390 	sctp_clog.x.rwnd.new_rwnd = 0;
391 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
392 	    SCTP_LOG_EVENT_RWND,
393 	    from,
394 	    sctp_clog.x.misc.log1,
395 	    sctp_clog.x.misc.log2,
396 	    sctp_clog.x.misc.log3,
397 	    sctp_clog.x.misc.log4);
398 }
399 
400 void
401 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
402 {
403 	struct sctp_cwnd_log sctp_clog;
404 
405 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
406 	sctp_clog.x.rwnd.send_size = flight_size;
407 	sctp_clog.x.rwnd.overhead = overhead;
408 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
409 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
410 	    SCTP_LOG_EVENT_RWND,
411 	    from,
412 	    sctp_clog.x.misc.log1,
413 	    sctp_clog.x.misc.log2,
414 	    sctp_clog.x.misc.log3,
415 	    sctp_clog.x.misc.log4);
416 }
417 
418 void
419 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
420 {
421 	struct sctp_cwnd_log sctp_clog;
422 
423 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
424 	sctp_clog.x.mbcnt.size_change = book;
425 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
426 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
427 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
428 	    SCTP_LOG_EVENT_MBCNT,
429 	    from,
430 	    sctp_clog.x.misc.log1,
431 	    sctp_clog.x.misc.log2,
432 	    sctp_clog.x.misc.log3,
433 	    sctp_clog.x.misc.log4);
434 }
435 
436 void
437 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
438 {
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	    SCTP_LOG_MISC_EVENT,
441 	    from,
442 	    a, b, c, d);
443 }
444 
445 void
446 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
447 {
448 	struct sctp_cwnd_log sctp_clog;
449 
450 	sctp_clog.x.wake.stcb = (void *)stcb;
451 	sctp_clog.x.wake.wake_cnt = wake_cnt;
452 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
453 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
454 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
455 
456 	if (stcb->asoc.stream_queue_cnt < 0xff)
457 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
458 	else
459 		sctp_clog.x.wake.stream_qcnt = 0xff;
460 
461 	if (stcb->asoc.chunks_on_out_queue < 0xff)
462 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
463 	else
464 		sctp_clog.x.wake.chunks_on_oque = 0xff;
465 
466 	sctp_clog.x.wake.sctpflags = 0;
467 	/* set in the defered mode stuff */
468 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
469 		sctp_clog.x.wake.sctpflags |= 1;
470 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
471 		sctp_clog.x.wake.sctpflags |= 2;
472 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
473 		sctp_clog.x.wake.sctpflags |= 4;
474 	/* what about the sb */
475 	if (stcb->sctp_socket) {
476 		struct socket *so = stcb->sctp_socket;
477 
478 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
479 	} else {
480 		sctp_clog.x.wake.sbflags = 0xff;
481 	}
482 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
483 	    SCTP_LOG_EVENT_WAKE,
484 	    from,
485 	    sctp_clog.x.misc.log1,
486 	    sctp_clog.x.misc.log2,
487 	    sctp_clog.x.misc.log3,
488 	    sctp_clog.x.misc.log4);
489 }
490 
491 void
492 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
493 {
494 	struct sctp_cwnd_log sctp_clog;
495 
496 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
497 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
498 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
499 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
500 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
501 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
502 	sctp_clog.x.blk.sndlen = sendlen;
503 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
504 	    SCTP_LOG_EVENT_BLOCK,
505 	    from,
506 	    sctp_clog.x.misc.log1,
507 	    sctp_clog.x.misc.log2,
508 	    sctp_clog.x.misc.log3,
509 	    sctp_clog.x.misc.log4);
510 }
511 
512 int
513 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
514 {
515 	/* May need to fix this if ktrdump does not work */
516 	return (0);
517 }
518 
519 #ifdef SCTP_AUDITING_ENABLED
520 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
521 static int sctp_audit_indx = 0;
522 
523 static
524 void
525 sctp_print_audit_report(void)
526 {
527 	int i;
528 	int cnt;
529 
530 	cnt = 0;
531 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
532 		if ((sctp_audit_data[i][0] == 0xe0) &&
533 		    (sctp_audit_data[i][1] == 0x01)) {
534 			cnt = 0;
535 			SCTP_PRINTF("\n");
536 		} else if (sctp_audit_data[i][0] == 0xf0) {
537 			cnt = 0;
538 			SCTP_PRINTF("\n");
539 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
540 		    (sctp_audit_data[i][1] == 0x01)) {
541 			SCTP_PRINTF("\n");
542 			cnt = 0;
543 		}
544 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
545 		    (uint32_t) sctp_audit_data[i][1]);
546 		cnt++;
547 		if ((cnt % 14) == 0)
548 			SCTP_PRINTF("\n");
549 	}
550 	for (i = 0; i < sctp_audit_indx; i++) {
551 		if ((sctp_audit_data[i][0] == 0xe0) &&
552 		    (sctp_audit_data[i][1] == 0x01)) {
553 			cnt = 0;
554 			SCTP_PRINTF("\n");
555 		} else if (sctp_audit_data[i][0] == 0xf0) {
556 			cnt = 0;
557 			SCTP_PRINTF("\n");
558 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
559 		    (sctp_audit_data[i][1] == 0x01)) {
560 			SCTP_PRINTF("\n");
561 			cnt = 0;
562 		}
563 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
564 		    (uint32_t) sctp_audit_data[i][1]);
565 		cnt++;
566 		if ((cnt % 14) == 0)
567 			SCTP_PRINTF("\n");
568 	}
569 	SCTP_PRINTF("\n");
570 }
571 
572 void
573 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
574     struct sctp_nets *net)
575 {
576 	int resend_cnt, tot_out, rep, tot_book_cnt;
577 	struct sctp_nets *lnet;
578 	struct sctp_tmit_chunk *chk;
579 
580 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
581 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
582 	sctp_audit_indx++;
583 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
584 		sctp_audit_indx = 0;
585 	}
586 	if (inp == NULL) {
587 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
588 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
589 		sctp_audit_indx++;
590 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
591 			sctp_audit_indx = 0;
592 		}
593 		return;
594 	}
595 	if (stcb == NULL) {
596 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
597 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
598 		sctp_audit_indx++;
599 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
600 			sctp_audit_indx = 0;
601 		}
602 		return;
603 	}
604 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
605 	sctp_audit_data[sctp_audit_indx][1] =
606 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
607 	sctp_audit_indx++;
608 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609 		sctp_audit_indx = 0;
610 	}
611 	rep = 0;
612 	tot_book_cnt = 0;
613 	resend_cnt = tot_out = 0;
614 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
615 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
616 			resend_cnt++;
617 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
618 			tot_out += chk->book_size;
619 			tot_book_cnt++;
620 		}
621 	}
622 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
623 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
624 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
625 		sctp_audit_indx++;
626 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
627 			sctp_audit_indx = 0;
628 		}
629 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
630 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
631 		rep = 1;
632 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
633 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
634 		sctp_audit_data[sctp_audit_indx][1] =
635 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
636 		sctp_audit_indx++;
637 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
638 			sctp_audit_indx = 0;
639 		}
640 	}
641 	if (tot_out != stcb->asoc.total_flight) {
642 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
643 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
644 		sctp_audit_indx++;
645 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
646 			sctp_audit_indx = 0;
647 		}
648 		rep = 1;
649 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
650 		    (int)stcb->asoc.total_flight);
651 		stcb->asoc.total_flight = tot_out;
652 	}
653 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
654 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
655 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
656 		sctp_audit_indx++;
657 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
658 			sctp_audit_indx = 0;
659 		}
660 		rep = 1;
661 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
662 
663 		stcb->asoc.total_flight_count = tot_book_cnt;
664 	}
665 	tot_out = 0;
666 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
667 		tot_out += lnet->flight_size;
668 	}
669 	if (tot_out != stcb->asoc.total_flight) {
670 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
671 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
672 		sctp_audit_indx++;
673 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
674 			sctp_audit_indx = 0;
675 		}
676 		rep = 1;
677 		SCTP_PRINTF("real flight:%d net total was %d\n",
678 		    stcb->asoc.total_flight, tot_out);
679 		/* now corrective action */
680 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681 
682 			tot_out = 0;
683 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
684 				if ((chk->whoTo == lnet) &&
685 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
686 					tot_out += chk->book_size;
687 				}
688 			}
689 			if (lnet->flight_size != tot_out) {
690 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
691 				    lnet, lnet->flight_size,
692 				    tot_out);
693 				lnet->flight_size = tot_out;
694 			}
695 		}
696 	}
697 	if (rep) {
698 		sctp_print_audit_report();
699 	}
700 }
701 
702 void
703 sctp_audit_log(uint8_t ev, uint8_t fd)
704 {
705 
706 	sctp_audit_data[sctp_audit_indx][0] = ev;
707 	sctp_audit_data[sctp_audit_indx][1] = fd;
708 	sctp_audit_indx++;
709 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
710 		sctp_audit_indx = 0;
711 	}
712 }
713 
714 #endif
715 
716 /*
717  * sctp_stop_timers_for_shutdown() should be called
718  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
719  * state to make sure that all timers are stopped.
720  */
721 void
722 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
723 {
724 	struct sctp_association *asoc;
725 	struct sctp_nets *net;
726 
727 	asoc = &stcb->asoc;
728 
729 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
730 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
731 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
732 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
733 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
734 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
735 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
736 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
737 	}
738 }
739 
740 /*
741  * a list of sizes based on typical mtu's, used only if next hop size not
742  * returned.
743  */
744 static uint32_t sctp_mtu_sizes[] = {
745 	68,
746 	296,
747 	508,
748 	512,
749 	544,
750 	576,
751 	1006,
752 	1492,
753 	1500,
754 	1536,
755 	2002,
756 	2048,
757 	4352,
758 	4464,
759 	8166,
760 	17914,
761 	32000,
762 	65535
763 };
764 
765 /*
766  * Return the largest MTU smaller than val. If there is no
767  * entry, just return val.
768  */
769 uint32_t
770 sctp_get_prev_mtu(uint32_t val)
771 {
772 	uint32_t i;
773 
774 	if (val <= sctp_mtu_sizes[0]) {
775 		return (val);
776 	}
777 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
778 		if (val <= sctp_mtu_sizes[i]) {
779 			break;
780 		}
781 	}
782 	return (sctp_mtu_sizes[i - 1]);
783 }
784 
785 /*
786  * Return the smallest MTU larger than val. If there is no
787  * entry, just return val.
788  */
789 uint32_t
790 sctp_get_next_mtu(uint32_t val)
791 {
792 	/* select another MTU that is just bigger than this one */
793 	uint32_t i;
794 
795 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
796 		if (val < sctp_mtu_sizes[i]) {
797 			return (sctp_mtu_sizes[i]);
798 		}
799 	}
800 	return (val);
801 }
802 
803 void
804 sctp_fill_random_store(struct sctp_pcb *m)
805 {
806 	/*
807 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
808 	 * our counter. The result becomes our good random numbers and we
809 	 * then setup to give these out. Note that we do no locking to
810 	 * protect this. This is ok, since if competing folks call this we
811 	 * will get more gobbled gook in the random store which is what we
812 	 * want. There is a danger that two guys will use the same random
813 	 * numbers, but thats ok too since that is random as well :->
814 	 */
815 	m->store_at = 0;
816 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
817 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
818 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
819 	m->random_counter++;
820 }
821 
822 uint32_t
823 sctp_select_initial_TSN(struct sctp_pcb *inp)
824 {
825 	/*
826 	 * A true implementation should use random selection process to get
827 	 * the initial stream sequence number, using RFC1750 as a good
828 	 * guideline
829 	 */
830 	uint32_t x, *xp;
831 	uint8_t *p;
832 	int store_at, new_store;
833 
834 	if (inp->initial_sequence_debug != 0) {
835 		uint32_t ret;
836 
837 		ret = inp->initial_sequence_debug;
838 		inp->initial_sequence_debug++;
839 		return (ret);
840 	}
841 retry:
842 	store_at = inp->store_at;
843 	new_store = store_at + sizeof(uint32_t);
844 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
845 		new_store = 0;
846 	}
847 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
848 		goto retry;
849 	}
850 	if (new_store == 0) {
851 		/* Refill the random store */
852 		sctp_fill_random_store(inp);
853 	}
854 	p = &inp->random_store[store_at];
855 	xp = (uint32_t *) p;
856 	x = *xp;
857 	return (x);
858 }
859 
860 uint32_t
861 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
862 {
863 	uint32_t x;
864 	struct timeval now;
865 
866 	if (check) {
867 		(void)SCTP_GETTIME_TIMEVAL(&now);
868 	}
869 	for (;;) {
870 		x = sctp_select_initial_TSN(&inp->sctp_ep);
871 		if (x == 0) {
872 			/* we never use 0 */
873 			continue;
874 		}
875 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
876 			break;
877 		}
878 	}
879 	return (x);
880 }
881 
882 int
883 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
884     uint32_t override_tag, uint32_t vrf_id)
885 {
886 	struct sctp_association *asoc;
887 
888 	/*
889 	 * Anything set to zero is taken care of by the allocation routine's
890 	 * bzero
891 	 */
892 
893 	/*
894 	 * Up front select what scoping to apply on addresses I tell my peer
895 	 * Not sure what to do with these right now, we will need to come up
896 	 * with a way to set them. We may need to pass them through from the
897 	 * caller in the sctp_aloc_assoc() function.
898 	 */
899 	int i;
900 
901 	asoc = &stcb->asoc;
902 	/* init all variables to a known value. */
903 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
904 	asoc->max_burst = m->sctp_ep.max_burst;
905 	asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
906 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
907 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
908 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
909 	asoc->ecn_allowed = m->sctp_ecn_enable;
910 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
911 	asoc->sctp_cmt_pf = (uint8_t) 0;
912 	asoc->sctp_frag_point = m->sctp_frag_point;
913 	asoc->sctp_features = m->sctp_features;
914 	asoc->default_dscp = m->sctp_ep.default_dscp;
915 #ifdef INET6
916 	if (m->sctp_ep.default_flowlabel) {
917 		asoc->default_flowlabel = m->sctp_ep.default_flowlabel;
918 	} else {
919 		if (m->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
920 			asoc->default_flowlabel = sctp_select_initial_TSN(&m->sctp_ep);
921 			asoc->default_flowlabel &= 0x000fffff;
922 			asoc->default_flowlabel |= 0x80000000;
923 		} else {
924 			asoc->default_flowlabel = 0;
925 		}
926 	}
927 #endif
928 	asoc->sb_send_resv = 0;
929 	if (override_tag) {
930 		asoc->my_vtag = override_tag;
931 	} else {
932 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
933 	}
934 	/* Get the nonce tags */
935 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
936 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
937 	asoc->vrf_id = vrf_id;
938 
939 #ifdef SCTP_ASOCLOG_OF_TSNS
940 	asoc->tsn_in_at = 0;
941 	asoc->tsn_out_at = 0;
942 	asoc->tsn_in_wrapped = 0;
943 	asoc->tsn_out_wrapped = 0;
944 	asoc->cumack_log_at = 0;
945 	asoc->cumack_log_atsnt = 0;
946 #endif
947 #ifdef SCTP_FS_SPEC_LOG
948 	asoc->fs_index = 0;
949 #endif
950 	asoc->refcnt = 0;
951 	asoc->assoc_up_sent = 0;
952 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
953 	    sctp_select_initial_TSN(&m->sctp_ep);
954 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
955 	/* we are optimisitic here */
956 	asoc->peer_supports_pktdrop = 1;
957 	asoc->peer_supports_nat = 0;
958 	asoc->sent_queue_retran_cnt = 0;
959 
960 	/* for CMT */
961 	asoc->last_net_cmt_send_started = NULL;
962 
963 	/* This will need to be adjusted */
964 	asoc->last_acked_seq = asoc->init_seq_number - 1;
965 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
966 	asoc->asconf_seq_in = asoc->last_acked_seq;
967 
968 	/* here we are different, we hold the next one we expect */
969 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
970 
971 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
972 	asoc->initial_rto = m->sctp_ep.initial_rto;
973 
974 	asoc->max_init_times = m->sctp_ep.max_init_times;
975 	asoc->max_send_times = m->sctp_ep.max_send_times;
976 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
977 	asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold;
978 	asoc->free_chunk_cnt = 0;
979 
980 	asoc->iam_blocking = 0;
981 	asoc->context = m->sctp_context;
982 	asoc->local_strreset_support = m->local_strreset_support;
983 	asoc->def_send = m->def_send;
984 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
985 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
986 	asoc->pr_sctp_cnt = 0;
987 	asoc->total_output_queue_size = 0;
988 
989 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
990 		struct in6pcb *inp6;
991 
992 		/* Its a V6 socket */
993 		inp6 = (struct in6pcb *)m;
994 		asoc->ipv6_addr_legal = 1;
995 		/* Now look at the binding flag to see if V4 will be legal */
996 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
997 			asoc->ipv4_addr_legal = 1;
998 		} else {
999 			/* V4 addresses are NOT legal on the association */
1000 			asoc->ipv4_addr_legal = 0;
1001 		}
1002 	} else {
1003 		/* Its a V4 socket, no - V6 */
1004 		asoc->ipv4_addr_legal = 1;
1005 		asoc->ipv6_addr_legal = 0;
1006 	}
1007 
1008 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1009 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1010 
1011 	asoc->smallest_mtu = m->sctp_frag_point;
1012 	asoc->minrto = m->sctp_ep.sctp_minrto;
1013 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1014 
1015 	asoc->locked_on_sending = NULL;
1016 	asoc->stream_locked_on = 0;
1017 	asoc->ecn_echo_cnt_onq = 0;
1018 	asoc->stream_locked = 0;
1019 
1020 	asoc->send_sack = 1;
1021 
1022 	LIST_INIT(&asoc->sctp_restricted_addrs);
1023 
1024 	TAILQ_INIT(&asoc->nets);
1025 	TAILQ_INIT(&asoc->pending_reply_queue);
1026 	TAILQ_INIT(&asoc->asconf_ack_sent);
1027 	/* Setup to fill the hb random cache at first HB */
1028 	asoc->hb_random_idx = 4;
1029 
1030 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1031 
1032 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1033 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1034 
1035 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1036 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1037 
1038 	/*
1039 	 * Now the stream parameters, here we allocate space for all streams
1040 	 * that we request by default.
1041 	 */
1042 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1043 	    m->sctp_ep.pre_open_stream_count;
1044 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1045 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1046 	    SCTP_M_STRMO);
1047 	if (asoc->strmout == NULL) {
1048 		/* big trouble no memory */
1049 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1050 		return (ENOMEM);
1051 	}
1052 	for (i = 0; i < asoc->streamoutcnt; i++) {
1053 		/*
1054 		 * inbound side must be set to 0xffff, also NOTE when we get
1055 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1056 		 * count (streamoutcnt) but first check if we sent to any of
1057 		 * the upper streams that were dropped (if some were). Those
1058 		 * that were dropped must be notified to the upper layer as
1059 		 * failed to send.
1060 		 */
1061 		asoc->strmout[i].next_sequence_sent = 0x0;
1062 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1063 		asoc->strmout[i].stream_no = i;
1064 		asoc->strmout[i].last_msg_incomplete = 0;
1065 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1066 	}
1067 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1068 
1069 	/* Now the mapping array */
1070 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1071 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1072 	    SCTP_M_MAP);
1073 	if (asoc->mapping_array == NULL) {
1074 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1075 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1076 		return (ENOMEM);
1077 	}
1078 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1079 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1080 	    SCTP_M_MAP);
1081 	if (asoc->nr_mapping_array == NULL) {
1082 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1083 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1084 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1085 		return (ENOMEM);
1086 	}
1087 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1088 
1089 	/* Now the init of the other outqueues */
1090 	TAILQ_INIT(&asoc->free_chunks);
1091 	TAILQ_INIT(&asoc->control_send_queue);
1092 	TAILQ_INIT(&asoc->asconf_send_queue);
1093 	TAILQ_INIT(&asoc->send_queue);
1094 	TAILQ_INIT(&asoc->sent_queue);
1095 	TAILQ_INIT(&asoc->reasmqueue);
1096 	TAILQ_INIT(&asoc->resetHead);
1097 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1098 	TAILQ_INIT(&asoc->asconf_queue);
1099 	/* authentication fields */
1100 	asoc->authinfo.random = NULL;
1101 	asoc->authinfo.active_keyid = 0;
1102 	asoc->authinfo.assoc_key = NULL;
1103 	asoc->authinfo.assoc_keyid = 0;
1104 	asoc->authinfo.recv_key = NULL;
1105 	asoc->authinfo.recv_keyid = 0;
1106 	LIST_INIT(&asoc->shared_keys);
1107 	asoc->marked_retrans = 0;
1108 	asoc->port = m->sctp_ep.port;
1109 	asoc->timoinit = 0;
1110 	asoc->timodata = 0;
1111 	asoc->timosack = 0;
1112 	asoc->timoshutdown = 0;
1113 	asoc->timoheartbeat = 0;
1114 	asoc->timocookie = 0;
1115 	asoc->timoshutdownack = 0;
1116 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1117 	asoc->discontinuity_time = asoc->start_time;
1118 	/*
1119 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1120 	 * freed later when the association is freed.
1121 	 */
1122 	return (0);
1123 }
1124 
1125 void
1126 sctp_print_mapping_array(struct sctp_association *asoc)
1127 {
1128 	unsigned int i, limit;
1129 
1130 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1131 	    asoc->mapping_array_size,
1132 	    asoc->mapping_array_base_tsn,
1133 	    asoc->cumulative_tsn,
1134 	    asoc->highest_tsn_inside_map,
1135 	    asoc->highest_tsn_inside_nr_map);
1136 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1137 		if (asoc->mapping_array[limit - 1] != 0) {
1138 			break;
1139 		}
1140 	}
1141 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1142 	for (i = 0; i < limit; i++) {
1143 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1144 	}
1145 	if (limit % 16)
1146 		SCTP_PRINTF("\n");
1147 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1148 		if (asoc->nr_mapping_array[limit - 1]) {
1149 			break;
1150 		}
1151 	}
1152 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1153 	for (i = 0; i < limit; i++) {
1154 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1155 	}
1156 	if (limit % 16)
1157 		SCTP_PRINTF("\n");
1158 }
1159 
1160 int
1161 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1162 {
1163 	/* mapping array needs to grow */
1164 	uint8_t *new_array1, *new_array2;
1165 	uint32_t new_size;
1166 
1167 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1168 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1169 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1170 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1171 		/* can't get more, forget it */
1172 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1173 		if (new_array1) {
1174 			SCTP_FREE(new_array1, SCTP_M_MAP);
1175 		}
1176 		if (new_array2) {
1177 			SCTP_FREE(new_array2, SCTP_M_MAP);
1178 		}
1179 		return (-1);
1180 	}
1181 	memset(new_array1, 0, new_size);
1182 	memset(new_array2, 0, new_size);
1183 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1184 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1185 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1186 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1187 	asoc->mapping_array = new_array1;
1188 	asoc->nr_mapping_array = new_array2;
1189 	asoc->mapping_array_size = new_size;
1190 	return (0);
1191 }
1192 
1193 
1194 static void
1195 sctp_iterator_work(struct sctp_iterator *it)
1196 {
1197 	int iteration_count = 0;
1198 	int inp_skip = 0;
1199 	int first_in = 1;
1200 	struct sctp_inpcb *tinp;
1201 
1202 	SCTP_INP_INFO_RLOCK();
1203 	SCTP_ITERATOR_LOCK();
1204 	if (it->inp) {
1205 		SCTP_INP_RLOCK(it->inp);
1206 		SCTP_INP_DECR_REF(it->inp);
1207 	}
1208 	if (it->inp == NULL) {
1209 		/* iterator is complete */
1210 done_with_iterator:
1211 		SCTP_ITERATOR_UNLOCK();
1212 		SCTP_INP_INFO_RUNLOCK();
1213 		if (it->function_atend != NULL) {
1214 			(*it->function_atend) (it->pointer, it->val);
1215 		}
1216 		SCTP_FREE(it, SCTP_M_ITER);
1217 		return;
1218 	}
1219 select_a_new_ep:
1220 	if (first_in) {
1221 		first_in = 0;
1222 	} else {
1223 		SCTP_INP_RLOCK(it->inp);
1224 	}
1225 	while (((it->pcb_flags) &&
1226 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1227 	    ((it->pcb_features) &&
1228 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1229 		/* endpoint flags or features don't match, so keep looking */
1230 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1231 			SCTP_INP_RUNLOCK(it->inp);
1232 			goto done_with_iterator;
1233 		}
1234 		tinp = it->inp;
1235 		it->inp = LIST_NEXT(it->inp, sctp_list);
1236 		SCTP_INP_RUNLOCK(tinp);
1237 		if (it->inp == NULL) {
1238 			goto done_with_iterator;
1239 		}
1240 		SCTP_INP_RLOCK(it->inp);
1241 	}
1242 	/* now go through each assoc which is in the desired state */
1243 	if (it->done_current_ep == 0) {
1244 		if (it->function_inp != NULL)
1245 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1246 		it->done_current_ep = 1;
1247 	}
1248 	if (it->stcb == NULL) {
1249 		/* run the per instance function */
1250 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1251 	}
1252 	if ((inp_skip) || it->stcb == NULL) {
1253 		if (it->function_inp_end != NULL) {
1254 			inp_skip = (*it->function_inp_end) (it->inp,
1255 			    it->pointer,
1256 			    it->val);
1257 		}
1258 		SCTP_INP_RUNLOCK(it->inp);
1259 		goto no_stcb;
1260 	}
1261 	while (it->stcb) {
1262 		SCTP_TCB_LOCK(it->stcb);
1263 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1264 			/* not in the right state... keep looking */
1265 			SCTP_TCB_UNLOCK(it->stcb);
1266 			goto next_assoc;
1267 		}
1268 		/* see if we have limited out the iterator loop */
1269 		iteration_count++;
1270 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1271 			/* Pause to let others grab the lock */
1272 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1273 			SCTP_TCB_UNLOCK(it->stcb);
1274 			SCTP_INP_INCR_REF(it->inp);
1275 			SCTP_INP_RUNLOCK(it->inp);
1276 			SCTP_ITERATOR_UNLOCK();
1277 			SCTP_INP_INFO_RUNLOCK();
1278 			SCTP_INP_INFO_RLOCK();
1279 			SCTP_ITERATOR_LOCK();
1280 			if (sctp_it_ctl.iterator_flags) {
1281 				/* We won't be staying here */
1282 				SCTP_INP_DECR_REF(it->inp);
1283 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1284 				if (sctp_it_ctl.iterator_flags &
1285 				    SCTP_ITERATOR_STOP_CUR_IT) {
1286 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1287 					goto done_with_iterator;
1288 				}
1289 				if (sctp_it_ctl.iterator_flags &
1290 				    SCTP_ITERATOR_STOP_CUR_INP) {
1291 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1292 					goto no_stcb;
1293 				}
1294 				/* If we reach here huh? */
1295 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1296 				    sctp_it_ctl.iterator_flags);
1297 				sctp_it_ctl.iterator_flags = 0;
1298 			}
1299 			SCTP_INP_RLOCK(it->inp);
1300 			SCTP_INP_DECR_REF(it->inp);
1301 			SCTP_TCB_LOCK(it->stcb);
1302 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1303 			iteration_count = 0;
1304 		}
1305 		/* run function on this one */
1306 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1307 
1308 		/*
1309 		 * we lie here, it really needs to have its own type but
1310 		 * first I must verify that this won't effect things :-0
1311 		 */
1312 		if (it->no_chunk_output == 0)
1313 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1314 
1315 		SCTP_TCB_UNLOCK(it->stcb);
1316 next_assoc:
1317 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1318 		if (it->stcb == NULL) {
1319 			/* Run last function */
1320 			if (it->function_inp_end != NULL) {
1321 				inp_skip = (*it->function_inp_end) (it->inp,
1322 				    it->pointer,
1323 				    it->val);
1324 			}
1325 		}
1326 	}
1327 	SCTP_INP_RUNLOCK(it->inp);
1328 no_stcb:
1329 	/* done with all assocs on this endpoint, move on to next endpoint */
1330 	it->done_current_ep = 0;
1331 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1332 		it->inp = NULL;
1333 	} else {
1334 		it->inp = LIST_NEXT(it->inp, sctp_list);
1335 	}
1336 	if (it->inp == NULL) {
1337 		goto done_with_iterator;
1338 	}
1339 	goto select_a_new_ep;
1340 }
1341 
1342 void
1343 sctp_iterator_worker(void)
1344 {
1345 	struct sctp_iterator *it, *nit;
1346 
1347 	/* This function is called with the WQ lock in place */
1348 
1349 	sctp_it_ctl.iterator_running = 1;
1350 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1351 		sctp_it_ctl.cur_it = it;
1352 		/* now lets work on this one */
1353 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1354 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1355 		CURVNET_SET(it->vn);
1356 		sctp_iterator_work(it);
1357 		sctp_it_ctl.cur_it = NULL;
1358 		CURVNET_RESTORE();
1359 		SCTP_IPI_ITERATOR_WQ_LOCK();
1360 		/* sa_ignore FREED_MEMORY */
1361 	}
1362 	sctp_it_ctl.iterator_running = 0;
1363 	return;
1364 }
1365 
1366 
1367 static void
1368 sctp_handle_addr_wq(void)
1369 {
1370 	/* deal with the ADDR wq from the rtsock calls */
1371 	struct sctp_laddr *wi, *nwi;
1372 	struct sctp_asconf_iterator *asc;
1373 
1374 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1375 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1376 	if (asc == NULL) {
1377 		/* Try later, no memory */
1378 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1379 		    (struct sctp_inpcb *)NULL,
1380 		    (struct sctp_tcb *)NULL,
1381 		    (struct sctp_nets *)NULL);
1382 		return;
1383 	}
1384 	LIST_INIT(&asc->list_of_work);
1385 	asc->cnt = 0;
1386 
1387 	SCTP_WQ_ADDR_LOCK();
1388 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1389 		LIST_REMOVE(wi, sctp_nxt_addr);
1390 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1391 		asc->cnt++;
1392 	}
1393 	SCTP_WQ_ADDR_UNLOCK();
1394 
1395 	if (asc->cnt == 0) {
1396 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1397 	} else {
1398 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1399 		    sctp_asconf_iterator_stcb,
1400 		    NULL,	/* No ep end for boundall */
1401 		    SCTP_PCB_FLAGS_BOUNDALL,
1402 		    SCTP_PCB_ANY_FEATURES,
1403 		    SCTP_ASOC_ANY_STATE,
1404 		    (void *)asc, 0,
1405 		    sctp_asconf_iterator_end, NULL, 0);
1406 	}
1407 }
1408 
1409 void
1410 sctp_timeout_handler(void *t)
1411 {
1412 	struct sctp_inpcb *inp;
1413 	struct sctp_tcb *stcb;
1414 	struct sctp_nets *net;
1415 	struct sctp_timer *tmr;
1416 
1417 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1418 	struct socket *so;
1419 
1420 #endif
1421 	int did_output, type;
1422 
1423 	tmr = (struct sctp_timer *)t;
1424 	inp = (struct sctp_inpcb *)tmr->ep;
1425 	stcb = (struct sctp_tcb *)tmr->tcb;
1426 	net = (struct sctp_nets *)tmr->net;
1427 	CURVNET_SET((struct vnet *)tmr->vnet);
1428 	did_output = 1;
1429 
1430 #ifdef SCTP_AUDITING_ENABLED
1431 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1432 	sctp_auditing(3, inp, stcb, net);
1433 #endif
1434 
1435 	/* sanity checks... */
1436 	if (tmr->self != (void *)tmr) {
1437 		/*
1438 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1439 		 * tmr);
1440 		 */
1441 		CURVNET_RESTORE();
1442 		return;
1443 	}
1444 	tmr->stopped_from = 0xa001;
1445 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1446 		/*
1447 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1448 		 * tmr->type);
1449 		 */
1450 		CURVNET_RESTORE();
1451 		return;
1452 	}
1453 	tmr->stopped_from = 0xa002;
1454 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1455 		CURVNET_RESTORE();
1456 		return;
1457 	}
1458 	/* if this is an iterator timeout, get the struct and clear inp */
1459 	tmr->stopped_from = 0xa003;
1460 	type = tmr->type;
1461 	if (inp) {
1462 		SCTP_INP_INCR_REF(inp);
1463 		if ((inp->sctp_socket == NULL) &&
1464 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1465 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1466 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1467 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1468 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1469 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1470 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1471 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1472 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1473 		    ) {
1474 			SCTP_INP_DECR_REF(inp);
1475 			CURVNET_RESTORE();
1476 			return;
1477 		}
1478 	}
1479 	tmr->stopped_from = 0xa004;
1480 	if (stcb) {
1481 		atomic_add_int(&stcb->asoc.refcnt, 1);
1482 		if (stcb->asoc.state == 0) {
1483 			atomic_add_int(&stcb->asoc.refcnt, -1);
1484 			if (inp) {
1485 				SCTP_INP_DECR_REF(inp);
1486 			}
1487 			CURVNET_RESTORE();
1488 			return;
1489 		}
1490 	}
1491 	tmr->stopped_from = 0xa005;
1492 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1493 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1494 		if (inp) {
1495 			SCTP_INP_DECR_REF(inp);
1496 		}
1497 		if (stcb) {
1498 			atomic_add_int(&stcb->asoc.refcnt, -1);
1499 		}
1500 		CURVNET_RESTORE();
1501 		return;
1502 	}
1503 	tmr->stopped_from = 0xa006;
1504 
1505 	if (stcb) {
1506 		SCTP_TCB_LOCK(stcb);
1507 		atomic_add_int(&stcb->asoc.refcnt, -1);
1508 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1509 		    ((stcb->asoc.state == 0) ||
1510 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1511 			SCTP_TCB_UNLOCK(stcb);
1512 			if (inp) {
1513 				SCTP_INP_DECR_REF(inp);
1514 			}
1515 			CURVNET_RESTORE();
1516 			return;
1517 		}
1518 	}
1519 	/* record in stopped what t-o occured */
1520 	tmr->stopped_from = tmr->type;
1521 
1522 	/* mark as being serviced now */
1523 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1524 		/*
1525 		 * Callout has been rescheduled.
1526 		 */
1527 		goto get_out;
1528 	}
1529 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1530 		/*
1531 		 * Not active, so no action.
1532 		 */
1533 		goto get_out;
1534 	}
1535 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1536 
1537 	/* call the handler for the appropriate timer type */
1538 	switch (tmr->type) {
1539 	case SCTP_TIMER_TYPE_ZERO_COPY:
1540 		if (inp == NULL) {
1541 			break;
1542 		}
1543 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1544 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1545 		}
1546 		break;
1547 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1548 		if (inp == NULL) {
1549 			break;
1550 		}
1551 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1552 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1553 		}
1554 		break;
1555 	case SCTP_TIMER_TYPE_ADDR_WQ:
1556 		sctp_handle_addr_wq();
1557 		break;
1558 	case SCTP_TIMER_TYPE_SEND:
1559 		if ((stcb == NULL) || (inp == NULL)) {
1560 			break;
1561 		}
1562 		SCTP_STAT_INCR(sctps_timodata);
1563 		stcb->asoc.timodata++;
1564 		stcb->asoc.num_send_timers_up--;
1565 		if (stcb->asoc.num_send_timers_up < 0) {
1566 			stcb->asoc.num_send_timers_up = 0;
1567 		}
1568 		SCTP_TCB_LOCK_ASSERT(stcb);
1569 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1570 			/* no need to unlock on tcb its gone */
1571 
1572 			goto out_decr;
1573 		}
1574 		SCTP_TCB_LOCK_ASSERT(stcb);
1575 #ifdef SCTP_AUDITING_ENABLED
1576 		sctp_auditing(4, inp, stcb, net);
1577 #endif
1578 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1579 		if ((stcb->asoc.num_send_timers_up == 0) &&
1580 		    (stcb->asoc.sent_queue_cnt > 0)) {
1581 			struct sctp_tmit_chunk *chk;
1582 
1583 			/*
1584 			 * safeguard. If there on some on the sent queue
1585 			 * somewhere but no timers running something is
1586 			 * wrong... so we start a timer on the first chunk
1587 			 * on the send queue on whatever net it is sent to.
1588 			 */
1589 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1590 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1591 			    chk->whoTo);
1592 		}
1593 		break;
1594 	case SCTP_TIMER_TYPE_INIT:
1595 		if ((stcb == NULL) || (inp == NULL)) {
1596 			break;
1597 		}
1598 		SCTP_STAT_INCR(sctps_timoinit);
1599 		stcb->asoc.timoinit++;
1600 		if (sctp_t1init_timer(inp, stcb, net)) {
1601 			/* no need to unlock on tcb its gone */
1602 			goto out_decr;
1603 		}
1604 		/* We do output but not here */
1605 		did_output = 0;
1606 		break;
1607 	case SCTP_TIMER_TYPE_RECV:
1608 		if ((stcb == NULL) || (inp == NULL)) {
1609 			break;
1610 		}
1611 		SCTP_STAT_INCR(sctps_timosack);
1612 		stcb->asoc.timosack++;
1613 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1614 #ifdef SCTP_AUDITING_ENABLED
1615 		sctp_auditing(4, inp, stcb, net);
1616 #endif
1617 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1618 		break;
1619 	case SCTP_TIMER_TYPE_SHUTDOWN:
1620 		if ((stcb == NULL) || (inp == NULL)) {
1621 			break;
1622 		}
1623 		if (sctp_shutdown_timer(inp, stcb, net)) {
1624 			/* no need to unlock on tcb its gone */
1625 			goto out_decr;
1626 		}
1627 		SCTP_STAT_INCR(sctps_timoshutdown);
1628 		stcb->asoc.timoshutdown++;
1629 #ifdef SCTP_AUDITING_ENABLED
1630 		sctp_auditing(4, inp, stcb, net);
1631 #endif
1632 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1633 		break;
1634 	case SCTP_TIMER_TYPE_HEARTBEAT:
1635 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1636 			break;
1637 		}
1638 		SCTP_STAT_INCR(sctps_timoheartbeat);
1639 		stcb->asoc.timoheartbeat++;
1640 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1641 			/* no need to unlock on tcb its gone */
1642 			goto out_decr;
1643 		}
1644 #ifdef SCTP_AUDITING_ENABLED
1645 		sctp_auditing(4, inp, stcb, net);
1646 #endif
1647 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1648 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1649 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1650 		}
1651 		break;
1652 	case SCTP_TIMER_TYPE_COOKIE:
1653 		if ((stcb == NULL) || (inp == NULL)) {
1654 			break;
1655 		}
1656 		if (sctp_cookie_timer(inp, stcb, net)) {
1657 			/* no need to unlock on tcb its gone */
1658 			goto out_decr;
1659 		}
1660 		SCTP_STAT_INCR(sctps_timocookie);
1661 		stcb->asoc.timocookie++;
1662 #ifdef SCTP_AUDITING_ENABLED
1663 		sctp_auditing(4, inp, stcb, net);
1664 #endif
1665 		/*
1666 		 * We consider T3 and Cookie timer pretty much the same with
1667 		 * respect to where from in chunk_output.
1668 		 */
1669 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1670 		break;
1671 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1672 		{
1673 			struct timeval tv;
1674 			int i, secret;
1675 
1676 			if (inp == NULL) {
1677 				break;
1678 			}
1679 			SCTP_STAT_INCR(sctps_timosecret);
1680 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1681 			SCTP_INP_WLOCK(inp);
1682 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1683 			inp->sctp_ep.last_secret_number =
1684 			    inp->sctp_ep.current_secret_number;
1685 			inp->sctp_ep.current_secret_number++;
1686 			if (inp->sctp_ep.current_secret_number >=
1687 			    SCTP_HOW_MANY_SECRETS) {
1688 				inp->sctp_ep.current_secret_number = 0;
1689 			}
1690 			secret = (int)inp->sctp_ep.current_secret_number;
1691 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1692 				inp->sctp_ep.secret_key[secret][i] =
1693 				    sctp_select_initial_TSN(&inp->sctp_ep);
1694 			}
1695 			SCTP_INP_WUNLOCK(inp);
1696 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1697 		}
1698 		did_output = 0;
1699 		break;
1700 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1701 		if ((stcb == NULL) || (inp == NULL)) {
1702 			break;
1703 		}
1704 		SCTP_STAT_INCR(sctps_timopathmtu);
1705 		sctp_pathmtu_timer(inp, stcb, net);
1706 		did_output = 0;
1707 		break;
1708 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1709 		if ((stcb == NULL) || (inp == NULL)) {
1710 			break;
1711 		}
1712 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1713 			/* no need to unlock on tcb its gone */
1714 			goto out_decr;
1715 		}
1716 		SCTP_STAT_INCR(sctps_timoshutdownack);
1717 		stcb->asoc.timoshutdownack++;
1718 #ifdef SCTP_AUDITING_ENABLED
1719 		sctp_auditing(4, inp, stcb, net);
1720 #endif
1721 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1722 		break;
1723 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1724 		if ((stcb == NULL) || (inp == NULL)) {
1725 			break;
1726 		}
1727 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1728 		sctp_abort_an_association(inp, stcb,
1729 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1730 		/* no need to unlock on tcb its gone */
1731 		goto out_decr;
1732 
1733 	case SCTP_TIMER_TYPE_STRRESET:
1734 		if ((stcb == NULL) || (inp == NULL)) {
1735 			break;
1736 		}
1737 		if (sctp_strreset_timer(inp, stcb, net)) {
1738 			/* no need to unlock on tcb its gone */
1739 			goto out_decr;
1740 		}
1741 		SCTP_STAT_INCR(sctps_timostrmrst);
1742 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1743 		break;
1744 	case SCTP_TIMER_TYPE_ASCONF:
1745 		if ((stcb == NULL) || (inp == NULL)) {
1746 			break;
1747 		}
1748 		if (sctp_asconf_timer(inp, stcb, net)) {
1749 			/* no need to unlock on tcb its gone */
1750 			goto out_decr;
1751 		}
1752 		SCTP_STAT_INCR(sctps_timoasconf);
1753 #ifdef SCTP_AUDITING_ENABLED
1754 		sctp_auditing(4, inp, stcb, net);
1755 #endif
1756 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1757 		break;
1758 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1759 		if ((stcb == NULL) || (inp == NULL)) {
1760 			break;
1761 		}
1762 		sctp_delete_prim_timer(inp, stcb, net);
1763 		SCTP_STAT_INCR(sctps_timodelprim);
1764 		break;
1765 
1766 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1767 		if ((stcb == NULL) || (inp == NULL)) {
1768 			break;
1769 		}
1770 		SCTP_STAT_INCR(sctps_timoautoclose);
1771 		sctp_autoclose_timer(inp, stcb, net);
1772 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1773 		did_output = 0;
1774 		break;
1775 	case SCTP_TIMER_TYPE_ASOCKILL:
1776 		if ((stcb == NULL) || (inp == NULL)) {
1777 			break;
1778 		}
1779 		SCTP_STAT_INCR(sctps_timoassockill);
1780 		/* Can we free it yet? */
1781 		SCTP_INP_DECR_REF(inp);
1782 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1783 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1784 		so = SCTP_INP_SO(inp);
1785 		atomic_add_int(&stcb->asoc.refcnt, 1);
1786 		SCTP_TCB_UNLOCK(stcb);
1787 		SCTP_SOCKET_LOCK(so, 1);
1788 		SCTP_TCB_LOCK(stcb);
1789 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1790 #endif
1791 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1792 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1793 		SCTP_SOCKET_UNLOCK(so, 1);
1794 #endif
1795 		/*
1796 		 * free asoc, always unlocks (or destroy's) so prevent
1797 		 * duplicate unlock or unlock of a free mtx :-0
1798 		 */
1799 		stcb = NULL;
1800 		goto out_no_decr;
1801 	case SCTP_TIMER_TYPE_INPKILL:
1802 		SCTP_STAT_INCR(sctps_timoinpkill);
1803 		if (inp == NULL) {
1804 			break;
1805 		}
1806 		/*
1807 		 * special case, take away our increment since WE are the
1808 		 * killer
1809 		 */
1810 		SCTP_INP_DECR_REF(inp);
1811 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1812 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1813 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1814 		inp = NULL;
1815 		goto out_no_decr;
1816 	default:
1817 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1818 		    tmr->type);
1819 		break;
1820 	}
1821 #ifdef SCTP_AUDITING_ENABLED
1822 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1823 	if (inp)
1824 		sctp_auditing(5, inp, stcb, net);
1825 #endif
1826 	if ((did_output) && stcb) {
1827 		/*
1828 		 * Now we need to clean up the control chunk chain if an
1829 		 * ECNE is on it. It must be marked as UNSENT again so next
1830 		 * call will continue to send it until such time that we get
1831 		 * a CWR, to remove it. It is, however, less likely that we
1832 		 * will find a ecn echo on the chain though.
1833 		 */
1834 		sctp_fix_ecn_echo(&stcb->asoc);
1835 	}
1836 get_out:
1837 	if (stcb) {
1838 		SCTP_TCB_UNLOCK(stcb);
1839 	}
1840 out_decr:
1841 	if (inp) {
1842 		SCTP_INP_DECR_REF(inp);
1843 	}
1844 out_no_decr:
1845 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1846 	    type);
1847 	CURVNET_RESTORE();
1848 }
1849 
1850 void
1851 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1852     struct sctp_nets *net)
1853 {
1854 	uint32_t to_ticks;
1855 	struct sctp_timer *tmr;
1856 
1857 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1858 		return;
1859 
1860 	tmr = NULL;
1861 	if (stcb) {
1862 		SCTP_TCB_LOCK_ASSERT(stcb);
1863 	}
1864 	switch (t_type) {
1865 	case SCTP_TIMER_TYPE_ZERO_COPY:
1866 		tmr = &inp->sctp_ep.zero_copy_timer;
1867 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1868 		break;
1869 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1870 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1871 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1872 		break;
1873 	case SCTP_TIMER_TYPE_ADDR_WQ:
1874 		/* Only 1 tick away :-) */
1875 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1876 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1877 		break;
1878 	case SCTP_TIMER_TYPE_SEND:
1879 		/* Here we use the RTO timer */
1880 		{
1881 			int rto_val;
1882 
1883 			if ((stcb == NULL) || (net == NULL)) {
1884 				return;
1885 			}
1886 			tmr = &net->rxt_timer;
1887 			if (net->RTO == 0) {
1888 				rto_val = stcb->asoc.initial_rto;
1889 			} else {
1890 				rto_val = net->RTO;
1891 			}
1892 			to_ticks = MSEC_TO_TICKS(rto_val);
1893 		}
1894 		break;
1895 	case SCTP_TIMER_TYPE_INIT:
1896 		/*
1897 		 * Here we use the INIT timer default usually about 1
1898 		 * minute.
1899 		 */
1900 		if ((stcb == NULL) || (net == NULL)) {
1901 			return;
1902 		}
1903 		tmr = &net->rxt_timer;
1904 		if (net->RTO == 0) {
1905 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1906 		} else {
1907 			to_ticks = MSEC_TO_TICKS(net->RTO);
1908 		}
1909 		break;
1910 	case SCTP_TIMER_TYPE_RECV:
1911 		/*
1912 		 * Here we use the Delayed-Ack timer value from the inp
1913 		 * ususually about 200ms.
1914 		 */
1915 		if (stcb == NULL) {
1916 			return;
1917 		}
1918 		tmr = &stcb->asoc.dack_timer;
1919 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1920 		break;
1921 	case SCTP_TIMER_TYPE_SHUTDOWN:
1922 		/* Here we use the RTO of the destination. */
1923 		if ((stcb == NULL) || (net == NULL)) {
1924 			return;
1925 		}
1926 		if (net->RTO == 0) {
1927 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1928 		} else {
1929 			to_ticks = MSEC_TO_TICKS(net->RTO);
1930 		}
1931 		tmr = &net->rxt_timer;
1932 		break;
1933 	case SCTP_TIMER_TYPE_HEARTBEAT:
1934 		/*
1935 		 * the net is used here so that we can add in the RTO. Even
1936 		 * though we use a different timer. We also add the HB timer
1937 		 * PLUS a random jitter.
1938 		 */
1939 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1940 			return;
1941 		} else {
1942 			uint32_t rndval;
1943 			uint32_t jitter;
1944 
1945 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1946 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1947 				return;
1948 			}
1949 			if (net->RTO == 0) {
1950 				to_ticks = stcb->asoc.initial_rto;
1951 			} else {
1952 				to_ticks = net->RTO;
1953 			}
1954 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1955 			jitter = rndval % to_ticks;
1956 			if (jitter >= (to_ticks >> 1)) {
1957 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1958 			} else {
1959 				to_ticks = to_ticks - jitter;
1960 			}
1961 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1962 			    !(net->dest_state & SCTP_ADDR_PF)) {
1963 				to_ticks += net->heart_beat_delay;
1964 			}
1965 			/*
1966 			 * Now we must convert the to_ticks that are now in
1967 			 * ms to ticks.
1968 			 */
1969 			to_ticks = MSEC_TO_TICKS(to_ticks);
1970 			tmr = &net->hb_timer;
1971 		}
1972 		break;
1973 	case SCTP_TIMER_TYPE_COOKIE:
1974 		/*
1975 		 * Here we can use the RTO timer from the network since one
1976 		 * RTT was compelete. If a retran happened then we will be
1977 		 * using the RTO initial value.
1978 		 */
1979 		if ((stcb == NULL) || (net == NULL)) {
1980 			return;
1981 		}
1982 		if (net->RTO == 0) {
1983 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1984 		} else {
1985 			to_ticks = MSEC_TO_TICKS(net->RTO);
1986 		}
1987 		tmr = &net->rxt_timer;
1988 		break;
1989 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1990 		/*
1991 		 * nothing needed but the endpoint here ususually about 60
1992 		 * minutes.
1993 		 */
1994 		if (inp == NULL) {
1995 			return;
1996 		}
1997 		tmr = &inp->sctp_ep.signature_change;
1998 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1999 		break;
2000 	case SCTP_TIMER_TYPE_ASOCKILL:
2001 		if (stcb == NULL) {
2002 			return;
2003 		}
2004 		tmr = &stcb->asoc.strreset_timer;
2005 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2006 		break;
2007 	case SCTP_TIMER_TYPE_INPKILL:
2008 		/*
2009 		 * The inp is setup to die. We re-use the signature_chage
2010 		 * timer since that has stopped and we are in the GONE
2011 		 * state.
2012 		 */
2013 		if (inp == NULL) {
2014 			return;
2015 		}
2016 		tmr = &inp->sctp_ep.signature_change;
2017 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2018 		break;
2019 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2020 		/*
2021 		 * Here we use the value found in the EP for PMTU ususually
2022 		 * about 10 minutes.
2023 		 */
2024 		if ((stcb == NULL) || (inp == NULL)) {
2025 			return;
2026 		}
2027 		if (net == NULL) {
2028 			return;
2029 		}
2030 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2031 			return;
2032 		}
2033 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2034 		tmr = &net->pmtu_timer;
2035 		break;
2036 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2037 		/* Here we use the RTO of the destination */
2038 		if ((stcb == NULL) || (net == NULL)) {
2039 			return;
2040 		}
2041 		if (net->RTO == 0) {
2042 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2043 		} else {
2044 			to_ticks = MSEC_TO_TICKS(net->RTO);
2045 		}
2046 		tmr = &net->rxt_timer;
2047 		break;
2048 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2049 		/*
2050 		 * Here we use the endpoints shutdown guard timer usually
2051 		 * about 3 minutes.
2052 		 */
2053 		if ((inp == NULL) || (stcb == NULL)) {
2054 			return;
2055 		}
2056 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2057 		tmr = &stcb->asoc.shut_guard_timer;
2058 		break;
2059 	case SCTP_TIMER_TYPE_STRRESET:
2060 		/*
2061 		 * Here the timer comes from the stcb but its value is from
2062 		 * the net's RTO.
2063 		 */
2064 		if ((stcb == NULL) || (net == NULL)) {
2065 			return;
2066 		}
2067 		if (net->RTO == 0) {
2068 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2069 		} else {
2070 			to_ticks = MSEC_TO_TICKS(net->RTO);
2071 		}
2072 		tmr = &stcb->asoc.strreset_timer;
2073 		break;
2074 	case SCTP_TIMER_TYPE_ASCONF:
2075 		/*
2076 		 * Here the timer comes from the stcb but its value is from
2077 		 * the net's RTO.
2078 		 */
2079 		if ((stcb == NULL) || (net == NULL)) {
2080 			return;
2081 		}
2082 		if (net->RTO == 0) {
2083 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2084 		} else {
2085 			to_ticks = MSEC_TO_TICKS(net->RTO);
2086 		}
2087 		tmr = &stcb->asoc.asconf_timer;
2088 		break;
2089 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2090 		if ((stcb == NULL) || (net != NULL)) {
2091 			return;
2092 		}
2093 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2094 		tmr = &stcb->asoc.delete_prim_timer;
2095 		break;
2096 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2097 		if (stcb == NULL) {
2098 			return;
2099 		}
2100 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2101 			/*
2102 			 * Really an error since stcb is NOT set to
2103 			 * autoclose
2104 			 */
2105 			return;
2106 		}
2107 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2108 		tmr = &stcb->asoc.autoclose_timer;
2109 		break;
2110 	default:
2111 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2112 		    __FUNCTION__, t_type);
2113 		return;
2114 		break;
2115 	}
2116 	if ((to_ticks <= 0) || (tmr == NULL)) {
2117 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2118 		    __FUNCTION__, t_type, to_ticks, tmr);
2119 		return;
2120 	}
2121 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2122 		/*
2123 		 * we do NOT allow you to have it already running. if it is
2124 		 * we leave the current one up unchanged
2125 		 */
2126 		return;
2127 	}
2128 	/* At this point we can proceed */
2129 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2130 		stcb->asoc.num_send_timers_up++;
2131 	}
2132 	tmr->stopped_from = 0;
2133 	tmr->type = t_type;
2134 	tmr->ep = (void *)inp;
2135 	tmr->tcb = (void *)stcb;
2136 	tmr->net = (void *)net;
2137 	tmr->self = (void *)tmr;
2138 	tmr->vnet = (void *)curvnet;
2139 	tmr->ticks = sctp_get_tick_count();
2140 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2141 	return;
2142 }
2143 
2144 void
2145 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2146     struct sctp_nets *net, uint32_t from)
2147 {
2148 	struct sctp_timer *tmr;
2149 
2150 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2151 	    (inp == NULL))
2152 		return;
2153 
2154 	tmr = NULL;
2155 	if (stcb) {
2156 		SCTP_TCB_LOCK_ASSERT(stcb);
2157 	}
2158 	switch (t_type) {
2159 	case SCTP_TIMER_TYPE_ZERO_COPY:
2160 		tmr = &inp->sctp_ep.zero_copy_timer;
2161 		break;
2162 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2163 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2164 		break;
2165 	case SCTP_TIMER_TYPE_ADDR_WQ:
2166 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2167 		break;
2168 	case SCTP_TIMER_TYPE_SEND:
2169 		if ((stcb == NULL) || (net == NULL)) {
2170 			return;
2171 		}
2172 		tmr = &net->rxt_timer;
2173 		break;
2174 	case SCTP_TIMER_TYPE_INIT:
2175 		if ((stcb == NULL) || (net == NULL)) {
2176 			return;
2177 		}
2178 		tmr = &net->rxt_timer;
2179 		break;
2180 	case SCTP_TIMER_TYPE_RECV:
2181 		if (stcb == NULL) {
2182 			return;
2183 		}
2184 		tmr = &stcb->asoc.dack_timer;
2185 		break;
2186 	case SCTP_TIMER_TYPE_SHUTDOWN:
2187 		if ((stcb == NULL) || (net == NULL)) {
2188 			return;
2189 		}
2190 		tmr = &net->rxt_timer;
2191 		break;
2192 	case SCTP_TIMER_TYPE_HEARTBEAT:
2193 		if ((stcb == NULL) || (net == NULL)) {
2194 			return;
2195 		}
2196 		tmr = &net->hb_timer;
2197 		break;
2198 	case SCTP_TIMER_TYPE_COOKIE:
2199 		if ((stcb == NULL) || (net == NULL)) {
2200 			return;
2201 		}
2202 		tmr = &net->rxt_timer;
2203 		break;
2204 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2205 		/* nothing needed but the endpoint here */
2206 		tmr = &inp->sctp_ep.signature_change;
2207 		/*
2208 		 * We re-use the newcookie timer for the INP kill timer. We
2209 		 * must assure that we do not kill it by accident.
2210 		 */
2211 		break;
2212 	case SCTP_TIMER_TYPE_ASOCKILL:
2213 		/*
2214 		 * Stop the asoc kill timer.
2215 		 */
2216 		if (stcb == NULL) {
2217 			return;
2218 		}
2219 		tmr = &stcb->asoc.strreset_timer;
2220 		break;
2221 
2222 	case SCTP_TIMER_TYPE_INPKILL:
2223 		/*
2224 		 * The inp is setup to die. We re-use the signature_chage
2225 		 * timer since that has stopped and we are in the GONE
2226 		 * state.
2227 		 */
2228 		tmr = &inp->sctp_ep.signature_change;
2229 		break;
2230 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2231 		if ((stcb == NULL) || (net == NULL)) {
2232 			return;
2233 		}
2234 		tmr = &net->pmtu_timer;
2235 		break;
2236 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2237 		if ((stcb == NULL) || (net == NULL)) {
2238 			return;
2239 		}
2240 		tmr = &net->rxt_timer;
2241 		break;
2242 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2243 		if (stcb == NULL) {
2244 			return;
2245 		}
2246 		tmr = &stcb->asoc.shut_guard_timer;
2247 		break;
2248 	case SCTP_TIMER_TYPE_STRRESET:
2249 		if (stcb == NULL) {
2250 			return;
2251 		}
2252 		tmr = &stcb->asoc.strreset_timer;
2253 		break;
2254 	case SCTP_TIMER_TYPE_ASCONF:
2255 		if (stcb == NULL) {
2256 			return;
2257 		}
2258 		tmr = &stcb->asoc.asconf_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2261 		if (stcb == NULL) {
2262 			return;
2263 		}
2264 		tmr = &stcb->asoc.delete_prim_timer;
2265 		break;
2266 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2267 		if (stcb == NULL) {
2268 			return;
2269 		}
2270 		tmr = &stcb->asoc.autoclose_timer;
2271 		break;
2272 	default:
2273 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2274 		    __FUNCTION__, t_type);
2275 		break;
2276 	}
2277 	if (tmr == NULL) {
2278 		return;
2279 	}
2280 	if ((tmr->type != t_type) && tmr->type) {
2281 		/*
2282 		 * Ok we have a timer that is under joint use. Cookie timer
2283 		 * per chance with the SEND timer. We therefore are NOT
2284 		 * running the timer that the caller wants stopped.  So just
2285 		 * return.
2286 		 */
2287 		return;
2288 	}
2289 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2290 		stcb->asoc.num_send_timers_up--;
2291 		if (stcb->asoc.num_send_timers_up < 0) {
2292 			stcb->asoc.num_send_timers_up = 0;
2293 		}
2294 	}
2295 	tmr->self = NULL;
2296 	tmr->stopped_from = from;
2297 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2298 	return;
2299 }
2300 
2301 uint32_t
2302 sctp_calculate_len(struct mbuf *m)
2303 {
2304 	uint32_t tlen = 0;
2305 	struct mbuf *at;
2306 
2307 	at = m;
2308 	while (at) {
2309 		tlen += SCTP_BUF_LEN(at);
2310 		at = SCTP_BUF_NEXT(at);
2311 	}
2312 	return (tlen);
2313 }
2314 
2315 void
2316 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2317     struct sctp_association *asoc, uint32_t mtu)
2318 {
2319 	/*
2320 	 * Reset the P-MTU size on this association, this involves changing
2321 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2322 	 * allow the DF flag to be cleared.
2323 	 */
2324 	struct sctp_tmit_chunk *chk;
2325 	unsigned int eff_mtu, ovh;
2326 
2327 	asoc->smallest_mtu = mtu;
2328 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2329 		ovh = SCTP_MIN_OVERHEAD;
2330 	} else {
2331 		ovh = SCTP_MIN_V4_OVERHEAD;
2332 	}
2333 	eff_mtu = mtu - ovh;
2334 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2335 		if (chk->send_size > eff_mtu) {
2336 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2337 		}
2338 	}
2339 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2340 		if (chk->send_size > eff_mtu) {
2341 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2342 		}
2343 	}
2344 }
2345 
2346 
2347 /*
2348  * given an association and starting time of the current RTT period return
2349  * RTO in number of msecs net should point to the current network
2350  */
2351 
2352 uint32_t
2353 sctp_calculate_rto(struct sctp_tcb *stcb,
2354     struct sctp_association *asoc,
2355     struct sctp_nets *net,
2356     struct timeval *told,
2357     int safe, int rtt_from_sack)
2358 {
2359 	/*-
2360 	 * given an association and the starting time of the current RTT
2361 	 * period (in value1/value2) return RTO in number of msecs.
2362 	 */
2363 	int32_t rtt;		/* RTT in ms */
2364 	uint32_t new_rto;
2365 	int first_measure = 0;
2366 	struct timeval now, then, *old;
2367 
2368 	/* Copy it out for sparc64 */
2369 	if (safe == sctp_align_unsafe_makecopy) {
2370 		old = &then;
2371 		memcpy(&then, told, sizeof(struct timeval));
2372 	} else if (safe == sctp_align_safe_nocopy) {
2373 		old = told;
2374 	} else {
2375 		/* error */
2376 		SCTP_PRINTF("Huh, bad rto calc call\n");
2377 		return (0);
2378 	}
2379 	/************************/
2380 	/* 1. calculate new RTT */
2381 	/************************/
2382 	/* get the current time */
2383 	if (stcb->asoc.use_precise_time) {
2384 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2385 	} else {
2386 		(void)SCTP_GETTIME_TIMEVAL(&now);
2387 	}
2388 	timevalsub(&now, old);
2389 	/* store the current RTT in us */
2390 	net->rtt = (uint64_t) 10000000 *(uint64_t) now.tv_sec +
2391 	         (uint64_t) now.tv_usec;
2392 
2393 	/* computer rtt in ms */
2394 	rtt = net->rtt / 1000;
2395 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2396 		/*
2397 		 * Tell the CC module that a new update has just occurred
2398 		 * from a sack
2399 		 */
2400 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2401 	}
2402 	/*
2403 	 * Do we need to determine the lan? We do this only on sacks i.e.
2404 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2405 	 */
2406 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2407 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2408 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2409 			net->lan_type = SCTP_LAN_INTERNET;
2410 		} else {
2411 			net->lan_type = SCTP_LAN_LOCAL;
2412 		}
2413 	}
2414 	/***************************/
2415 	/* 2. update RTTVAR & SRTT */
2416 	/***************************/
2417 	/*-
2418 	 * Compute the scaled average lastsa and the
2419 	 * scaled variance lastsv as described in van Jacobson
2420 	 * Paper "Congestion Avoidance and Control", Annex A.
2421 	 *
2422 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2423 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2424 	 */
2425 	if (net->RTO_measured) {
2426 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2427 		net->lastsa += rtt;
2428 		if (rtt < 0) {
2429 			rtt = -rtt;
2430 		}
2431 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2432 		net->lastsv += rtt;
2433 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2434 			rto_logging(net, SCTP_LOG_RTTVAR);
2435 		}
2436 	} else {
2437 		/* First RTO measurment */
2438 		net->RTO_measured = 1;
2439 		first_measure = 1;
2440 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2441 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2442 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2443 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2444 		}
2445 	}
2446 	if (net->lastsv == 0) {
2447 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2448 	}
2449 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2450 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2451 	    (stcb->asoc.sat_network_lockout == 0)) {
2452 		stcb->asoc.sat_network = 1;
2453 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2454 		stcb->asoc.sat_network = 0;
2455 		stcb->asoc.sat_network_lockout = 1;
2456 	}
2457 	/* bound it, per C6/C7 in Section 5.3.1 */
2458 	if (new_rto < stcb->asoc.minrto) {
2459 		new_rto = stcb->asoc.minrto;
2460 	}
2461 	if (new_rto > stcb->asoc.maxrto) {
2462 		new_rto = stcb->asoc.maxrto;
2463 	}
2464 	/* we are now returning the RTO */
2465 	return (new_rto);
2466 }
2467 
2468 /*
2469  * return a pointer to a contiguous piece of data from the given mbuf chain
2470  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2471  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2472  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2473  */
2474 caddr_t
2475 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2476 {
2477 	uint32_t count;
2478 	uint8_t *ptr;
2479 
2480 	ptr = in_ptr;
2481 	if ((off < 0) || (len <= 0))
2482 		return (NULL);
2483 
2484 	/* find the desired start location */
2485 	while ((m != NULL) && (off > 0)) {
2486 		if (off < SCTP_BUF_LEN(m))
2487 			break;
2488 		off -= SCTP_BUF_LEN(m);
2489 		m = SCTP_BUF_NEXT(m);
2490 	}
2491 	if (m == NULL)
2492 		return (NULL);
2493 
2494 	/* is the current mbuf large enough (eg. contiguous)? */
2495 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2496 		return (mtod(m, caddr_t)+off);
2497 	} else {
2498 		/* else, it spans more than one mbuf, so save a temp copy... */
2499 		while ((m != NULL) && (len > 0)) {
2500 			count = min(SCTP_BUF_LEN(m) - off, len);
2501 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2502 			len -= count;
2503 			ptr += count;
2504 			off = 0;
2505 			m = SCTP_BUF_NEXT(m);
2506 		}
2507 		if ((m == NULL) && (len > 0))
2508 			return (NULL);
2509 		else
2510 			return ((caddr_t)in_ptr);
2511 	}
2512 }
2513 
2514 
2515 
2516 struct sctp_paramhdr *
2517 sctp_get_next_param(struct mbuf *m,
2518     int offset,
2519     struct sctp_paramhdr *pull,
2520     int pull_limit)
2521 {
2522 	/* This just provides a typed signature to Peter's Pull routine */
2523 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2524 	    (uint8_t *) pull));
2525 }
2526 
2527 
2528 int
2529 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2530 {
2531 	/*
2532 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2533 	 * padlen is > 3 this routine will fail.
2534 	 */
2535 	uint8_t *dp;
2536 	int i;
2537 
2538 	if (padlen > 3) {
2539 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2540 		return (ENOBUFS);
2541 	}
2542 	if (padlen <= M_TRAILINGSPACE(m)) {
2543 		/*
2544 		 * The easy way. We hope the majority of the time we hit
2545 		 * here :)
2546 		 */
2547 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2548 		SCTP_BUF_LEN(m) += padlen;
2549 	} else {
2550 		/* Hard way we must grow the mbuf */
2551 		struct mbuf *tmp;
2552 
2553 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2554 		if (tmp == NULL) {
2555 			/* Out of space GAK! we are in big trouble. */
2556 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2557 			return (ENOBUFS);
2558 		}
2559 		/* setup and insert in middle */
2560 		SCTP_BUF_LEN(tmp) = padlen;
2561 		SCTP_BUF_NEXT(tmp) = NULL;
2562 		SCTP_BUF_NEXT(m) = tmp;
2563 		dp = mtod(tmp, uint8_t *);
2564 	}
2565 	/* zero out the pad */
2566 	for (i = 0; i < padlen; i++) {
2567 		*dp = 0;
2568 		dp++;
2569 	}
2570 	return (0);
2571 }
2572 
2573 int
2574 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2575 {
2576 	/* find the last mbuf in chain and pad it */
2577 	struct mbuf *m_at;
2578 
2579 	m_at = m;
2580 	if (last_mbuf) {
2581 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2582 	} else {
2583 		while (m_at) {
2584 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2585 				return (sctp_add_pad_tombuf(m_at, padval));
2586 			}
2587 			m_at = SCTP_BUF_NEXT(m_at);
2588 		}
2589 	}
2590 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2591 	return (EFAULT);
2592 }
2593 
2594 static void
2595 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2596     uint32_t error, int so_locked
2597 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2598     SCTP_UNUSED
2599 #endif
2600 )
2601 {
2602 	struct mbuf *m_notify;
2603 	struct sctp_assoc_change *sac;
2604 	struct sctp_queued_to_read *control;
2605 	unsigned int i;
2606 
2607 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2608 	struct socket *so;
2609 
2610 #endif
2611 
2612 	/*
2613 	 * For TCP model AND UDP connected sockets we will send an error up
2614 	 * when an ABORT comes in.
2615 	 */
2616 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2617 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2618 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2619 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2620 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2621 			stcb->sctp_socket->so_error = ECONNREFUSED;
2622 		} else {
2623 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2624 			stcb->sctp_socket->so_error = ECONNRESET;
2625 		}
2626 		/* Wake ANY sleepers */
2627 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2628 		so = SCTP_INP_SO(stcb->sctp_ep);
2629 		if (!so_locked) {
2630 			atomic_add_int(&stcb->asoc.refcnt, 1);
2631 			SCTP_TCB_UNLOCK(stcb);
2632 			SCTP_SOCKET_LOCK(so, 1);
2633 			SCTP_TCB_LOCK(stcb);
2634 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2635 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2636 				SCTP_SOCKET_UNLOCK(so, 1);
2637 				return;
2638 			}
2639 		}
2640 #endif
2641 		socantrcvmore(stcb->sctp_socket);
2642 		sorwakeup(stcb->sctp_socket);
2643 		sowwakeup(stcb->sctp_socket);
2644 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2645 		if (!so_locked) {
2646 			SCTP_SOCKET_UNLOCK(so, 1);
2647 		}
2648 #endif
2649 	}
2650 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2651 		/* event not enabled */
2652 		return;
2653 	}
2654 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2655 	if (m_notify == NULL)
2656 		/* no space left */
2657 		return;
2658 	SCTP_BUF_LEN(m_notify) = 0;
2659 
2660 	sac = mtod(m_notify, struct sctp_assoc_change *);
2661 	sac->sac_type = SCTP_ASSOC_CHANGE;
2662 	sac->sac_flags = 0;
2663 	sac->sac_length = sizeof(struct sctp_assoc_change);
2664 	sac->sac_state = event;
2665 	sac->sac_error = error;
2666 	/* XXX verify these stream counts */
2667 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2668 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2669 	sac->sac_assoc_id = sctp_get_associd(stcb);
2670 	i = 0;
2671 	if (stcb->asoc.peer_supports_prsctp) {
2672 		sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2673 	}
2674 	if (stcb->asoc.peer_supports_auth) {
2675 		sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2676 	}
2677 	if (stcb->asoc.peer_supports_asconf) {
2678 		sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2679 	}
2680 	sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2681 	if (stcb->asoc.peer_supports_strreset) {
2682 		sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2683 	}
2684 	sac->sac_length += i;
2685 	SCTP_BUF_LEN(m_notify) = sac->sac_length;
2686 	SCTP_BUF_NEXT(m_notify) = NULL;
2687 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2688 	    0, 0, stcb->asoc.context, 0, 0, 0,
2689 	    m_notify);
2690 	if (control == NULL) {
2691 		/* no memory */
2692 		sctp_m_freem(m_notify);
2693 		return;
2694 	}
2695 	control->length = SCTP_BUF_LEN(m_notify);
2696 	/* not that we need this */
2697 	control->tail_mbuf = m_notify;
2698 	control->spec_flags = M_NOTIFICATION;
2699 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2700 	    control,
2701 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2702 	    so_locked);
2703 	if (event == SCTP_COMM_LOST) {
2704 		/* Wake up any sleeper */
2705 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2706 		so = SCTP_INP_SO(stcb->sctp_ep);
2707 		if (!so_locked) {
2708 			atomic_add_int(&stcb->asoc.refcnt, 1);
2709 			SCTP_TCB_UNLOCK(stcb);
2710 			SCTP_SOCKET_LOCK(so, 1);
2711 			SCTP_TCB_LOCK(stcb);
2712 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2713 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2714 				SCTP_SOCKET_UNLOCK(so, 1);
2715 				return;
2716 			}
2717 		}
2718 #endif
2719 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2720 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2721 		if (!so_locked) {
2722 			SCTP_SOCKET_UNLOCK(so, 1);
2723 		}
2724 #endif
2725 	}
2726 }
2727 
2728 static void
2729 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2730     struct sockaddr *sa, uint32_t error)
2731 {
2732 	struct mbuf *m_notify;
2733 	struct sctp_paddr_change *spc;
2734 	struct sctp_queued_to_read *control;
2735 
2736 	if ((stcb == NULL) ||
2737 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2738 		/* event not enabled */
2739 		return;
2740 	}
2741 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2742 	if (m_notify == NULL)
2743 		return;
2744 	SCTP_BUF_LEN(m_notify) = 0;
2745 	spc = mtod(m_notify, struct sctp_paddr_change *);
2746 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2747 	spc->spc_flags = 0;
2748 	spc->spc_length = sizeof(struct sctp_paddr_change);
2749 	switch (sa->sa_family) {
2750 #ifdef INET
2751 	case AF_INET:
2752 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2753 		break;
2754 #endif
2755 #ifdef INET6
2756 	case AF_INET6:
2757 		{
2758 			struct sockaddr_in6 *sin6;
2759 
2760 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2761 
2762 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2763 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2764 				if (sin6->sin6_scope_id == 0) {
2765 					/* recover scope_id for user */
2766 					(void)sa6_recoverscope(sin6);
2767 				} else {
2768 					/* clear embedded scope_id for user */
2769 					in6_clearscope(&sin6->sin6_addr);
2770 				}
2771 			}
2772 			break;
2773 		}
2774 #endif
2775 	default:
2776 		/* TSNH */
2777 		break;
2778 	}
2779 	spc->spc_state = state;
2780 	spc->spc_error = error;
2781 	spc->spc_assoc_id = sctp_get_associd(stcb);
2782 
2783 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2784 	SCTP_BUF_NEXT(m_notify) = NULL;
2785 
2786 	/* append to socket */
2787 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2788 	    0, 0, stcb->asoc.context, 0, 0, 0,
2789 	    m_notify);
2790 	if (control == NULL) {
2791 		/* no memory */
2792 		sctp_m_freem(m_notify);
2793 		return;
2794 	}
2795 	control->length = SCTP_BUF_LEN(m_notify);
2796 	control->spec_flags = M_NOTIFICATION;
2797 	/* not that we need this */
2798 	control->tail_mbuf = m_notify;
2799 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2800 	    control,
2801 	    &stcb->sctp_socket->so_rcv, 1,
2802 	    SCTP_READ_LOCK_NOT_HELD,
2803 	    SCTP_SO_NOT_LOCKED);
2804 }
2805 
2806 
2807 static void
2808 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2809     struct sctp_tmit_chunk *chk, int so_locked
2810 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2811     SCTP_UNUSED
2812 #endif
2813 )
2814 {
2815 	struct mbuf *m_notify;
2816 	struct sctp_send_failed *ssf;
2817 	struct sctp_send_failed_event *ssfe;
2818 	struct sctp_queued_to_read *control;
2819 	int length;
2820 
2821 	if ((stcb == NULL) ||
2822 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2823 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2824 		/* event not enabled */
2825 		return;
2826 	}
2827 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2828 		length = sizeof(struct sctp_send_failed_event);
2829 	} else {
2830 		length = sizeof(struct sctp_send_failed);
2831 	}
2832 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA);
2833 	if (m_notify == NULL)
2834 		/* no space left */
2835 		return;
2836 	length += chk->send_size;
2837 	length -= sizeof(struct sctp_data_chunk);
2838 	SCTP_BUF_LEN(m_notify) = 0;
2839 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2840 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2841 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2842 		if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2843 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2844 		else
2845 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2846 		ssfe->ssfe_length = length;
2847 		ssfe->ssfe_error = error;
2848 		/* not exactly what the user sent in, but should be close :) */
2849 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2850 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2851 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2852 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2853 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2854 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2855 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2856 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2857 	} else {
2858 		ssf = mtod(m_notify, struct sctp_send_failed *);
2859 		ssf->ssf_type = SCTP_SEND_FAILED;
2860 		if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2861 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2862 		else
2863 			ssf->ssf_flags = SCTP_DATA_SENT;
2864 		ssf->ssf_length = length;
2865 		ssf->ssf_error = error;
2866 		/* not exactly what the user sent in, but should be close :) */
2867 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2868 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2869 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2870 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2871 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2872 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2873 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2874 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2875 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2876 	}
2877 	if (chk->data) {
2878 		/*
2879 		 * trim off the sctp chunk header(it should be there)
2880 		 */
2881 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2882 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2883 			sctp_mbuf_crush(chk->data);
2884 			chk->send_size -= sizeof(struct sctp_data_chunk);
2885 		}
2886 	}
2887 	SCTP_BUF_NEXT(m_notify) = chk->data;
2888 	/* Steal off the mbuf */
2889 	chk->data = NULL;
2890 	/*
2891 	 * For this case, we check the actual socket buffer, since the assoc
2892 	 * is going away we don't want to overfill the socket buffer for a
2893 	 * non-reader
2894 	 */
2895 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2896 		sctp_m_freem(m_notify);
2897 		return;
2898 	}
2899 	/* append to socket */
2900 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2901 	    0, 0, stcb->asoc.context, 0, 0, 0,
2902 	    m_notify);
2903 	if (control == NULL) {
2904 		/* no memory */
2905 		sctp_m_freem(m_notify);
2906 		return;
2907 	}
2908 	control->spec_flags = M_NOTIFICATION;
2909 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2910 	    control,
2911 	    &stcb->sctp_socket->so_rcv, 1,
2912 	    SCTP_READ_LOCK_NOT_HELD,
2913 	    so_locked);
2914 }
2915 
2916 
2917 static void
2918 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2919     struct sctp_stream_queue_pending *sp, int so_locked
2920 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2921     SCTP_UNUSED
2922 #endif
2923 )
2924 {
2925 	struct mbuf *m_notify;
2926 	struct sctp_send_failed *ssf;
2927 	struct sctp_send_failed_event *ssfe;
2928 	struct sctp_queued_to_read *control;
2929 	int length;
2930 
2931 	if ((stcb == NULL) ||
2932 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2933 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2934 		/* event not enabled */
2935 		return;
2936 	}
2937 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2938 		length = sizeof(struct sctp_send_failed_event);
2939 	} else {
2940 		length = sizeof(struct sctp_send_failed);
2941 	}
2942 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA);
2943 	if (m_notify == NULL) {
2944 		/* no space left */
2945 		return;
2946 	}
2947 	length += sp->length;
2948 	SCTP_BUF_LEN(m_notify) = 0;
2949 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2950 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2951 		ssfe->ssfe_type = SCTP_SEND_FAILED;
2952 		if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2953 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2954 		else
2955 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2956 		ssfe->ssfe_length = length;
2957 		ssfe->ssfe_error = error;
2958 		/* not exactly what the user sent in, but should be close :) */
2959 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2960 		ssfe->ssfe_info.snd_sid = sp->stream;
2961 		if (sp->some_taken) {
2962 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2963 		} else {
2964 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2965 		}
2966 		ssfe->ssfe_info.snd_ppid = sp->ppid;
2967 		ssfe->ssfe_info.snd_context = sp->context;
2968 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2969 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2970 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2971 	} else {
2972 		ssf = mtod(m_notify, struct sctp_send_failed *);
2973 		ssf->ssf_type = SCTP_SEND_FAILED;
2974 		if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2975 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2976 		else
2977 			ssf->ssf_flags = SCTP_DATA_SENT;
2978 		ssf->ssf_length = length;
2979 		ssf->ssf_error = error;
2980 		/* not exactly what the user sent in, but should be close :) */
2981 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2982 		ssf->ssf_info.sinfo_stream = sp->stream;
2983 		ssf->ssf_info.sinfo_ssn = sp->strseq;
2984 		if (sp->some_taken) {
2985 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2986 		} else {
2987 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2988 		}
2989 		ssf->ssf_info.sinfo_ppid = sp->ppid;
2990 		ssf->ssf_info.sinfo_context = sp->context;
2991 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2992 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2993 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2994 	}
2995 	SCTP_BUF_NEXT(m_notify) = sp->data;
2996 
2997 	/* Steal off the mbuf */
2998 	sp->data = NULL;
2999 	/*
3000 	 * For this case, we check the actual socket buffer, since the assoc
3001 	 * is going away we don't want to overfill the socket buffer for a
3002 	 * non-reader
3003 	 */
3004 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3005 		sctp_m_freem(m_notify);
3006 		return;
3007 	}
3008 	/* append to socket */
3009 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3010 	    0, 0, stcb->asoc.context, 0, 0, 0,
3011 	    m_notify);
3012 	if (control == NULL) {
3013 		/* no memory */
3014 		sctp_m_freem(m_notify);
3015 		return;
3016 	}
3017 	control->spec_flags = M_NOTIFICATION;
3018 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3019 	    control,
3020 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3021 }
3022 
3023 
3024 
3025 static void
3026 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3027 {
3028 	struct mbuf *m_notify;
3029 	struct sctp_adaptation_event *sai;
3030 	struct sctp_queued_to_read *control;
3031 
3032 	if ((stcb == NULL) ||
3033 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3034 		/* event not enabled */
3035 		return;
3036 	}
3037 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3038 	if (m_notify == NULL)
3039 		/* no space left */
3040 		return;
3041 	SCTP_BUF_LEN(m_notify) = 0;
3042 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3043 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3044 	sai->sai_flags = 0;
3045 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3046 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3047 	sai->sai_assoc_id = sctp_get_associd(stcb);
3048 
3049 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3050 	SCTP_BUF_NEXT(m_notify) = NULL;
3051 
3052 	/* append to socket */
3053 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3054 	    0, 0, stcb->asoc.context, 0, 0, 0,
3055 	    m_notify);
3056 	if (control == NULL) {
3057 		/* no memory */
3058 		sctp_m_freem(m_notify);
3059 		return;
3060 	}
3061 	control->length = SCTP_BUF_LEN(m_notify);
3062 	control->spec_flags = M_NOTIFICATION;
3063 	/* not that we need this */
3064 	control->tail_mbuf = m_notify;
3065 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3066 	    control,
3067 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3068 }
3069 
3070 /* This always must be called with the read-queue LOCKED in the INP */
3071 static void
3072 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3073     uint32_t val, int so_locked
3074 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3075     SCTP_UNUSED
3076 #endif
3077 )
3078 {
3079 	struct mbuf *m_notify;
3080 	struct sctp_pdapi_event *pdapi;
3081 	struct sctp_queued_to_read *control;
3082 	struct sockbuf *sb;
3083 
3084 	if ((stcb == NULL) ||
3085 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3086 		/* event not enabled */
3087 		return;
3088 	}
3089 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3090 		return;
3091 	}
3092 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3093 	if (m_notify == NULL)
3094 		/* no space left */
3095 		return;
3096 	SCTP_BUF_LEN(m_notify) = 0;
3097 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3098 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3099 	pdapi->pdapi_flags = 0;
3100 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3101 	pdapi->pdapi_indication = error;
3102 	pdapi->pdapi_stream = (val >> 16);
3103 	pdapi->pdapi_seq = (val & 0x0000ffff);
3104 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3105 
3106 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3107 	SCTP_BUF_NEXT(m_notify) = NULL;
3108 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3109 	    0, 0, stcb->asoc.context, 0, 0, 0,
3110 	    m_notify);
3111 	if (control == NULL) {
3112 		/* no memory */
3113 		sctp_m_freem(m_notify);
3114 		return;
3115 	}
3116 	control->spec_flags = M_NOTIFICATION;
3117 	control->length = SCTP_BUF_LEN(m_notify);
3118 	/* not that we need this */
3119 	control->tail_mbuf = m_notify;
3120 	control->held_length = 0;
3121 	control->length = 0;
3122 	sb = &stcb->sctp_socket->so_rcv;
3123 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3124 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3125 	}
3126 	sctp_sballoc(stcb, sb, m_notify);
3127 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3128 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3129 	}
3130 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3131 	control->end_added = 1;
3132 	if (stcb->asoc.control_pdapi)
3133 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3134 	else {
3135 		/* we really should not see this case */
3136 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3137 	}
3138 	if (stcb->sctp_ep && stcb->sctp_socket) {
3139 		/* This should always be the case */
3140 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3141 		struct socket *so;
3142 
3143 		so = SCTP_INP_SO(stcb->sctp_ep);
3144 		if (!so_locked) {
3145 			atomic_add_int(&stcb->asoc.refcnt, 1);
3146 			SCTP_TCB_UNLOCK(stcb);
3147 			SCTP_SOCKET_LOCK(so, 1);
3148 			SCTP_TCB_LOCK(stcb);
3149 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3150 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3151 				SCTP_SOCKET_UNLOCK(so, 1);
3152 				return;
3153 			}
3154 		}
3155 #endif
3156 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3157 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3158 		if (!so_locked) {
3159 			SCTP_SOCKET_UNLOCK(so, 1);
3160 		}
3161 #endif
3162 	}
3163 }
3164 
3165 static void
3166 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3167 {
3168 	struct mbuf *m_notify;
3169 	struct sctp_shutdown_event *sse;
3170 	struct sctp_queued_to_read *control;
3171 
3172 	/*
3173 	 * For TCP model AND UDP connected sockets we will send an error up
3174 	 * when an SHUTDOWN completes
3175 	 */
3176 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3177 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3178 		/* mark socket closed for read/write and wakeup! */
3179 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3180 		struct socket *so;
3181 
3182 		so = SCTP_INP_SO(stcb->sctp_ep);
3183 		atomic_add_int(&stcb->asoc.refcnt, 1);
3184 		SCTP_TCB_UNLOCK(stcb);
3185 		SCTP_SOCKET_LOCK(so, 1);
3186 		SCTP_TCB_LOCK(stcb);
3187 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3188 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3189 			SCTP_SOCKET_UNLOCK(so, 1);
3190 			return;
3191 		}
3192 #endif
3193 		socantsendmore(stcb->sctp_socket);
3194 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3195 		SCTP_SOCKET_UNLOCK(so, 1);
3196 #endif
3197 	}
3198 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3199 		/* event not enabled */
3200 		return;
3201 	}
3202 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3203 	if (m_notify == NULL)
3204 		/* no space left */
3205 		return;
3206 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3207 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3208 	sse->sse_flags = 0;
3209 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3210 	sse->sse_assoc_id = sctp_get_associd(stcb);
3211 
3212 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3213 	SCTP_BUF_NEXT(m_notify) = NULL;
3214 
3215 	/* append to socket */
3216 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3217 	    0, 0, stcb->asoc.context, 0, 0, 0,
3218 	    m_notify);
3219 	if (control == NULL) {
3220 		/* no memory */
3221 		sctp_m_freem(m_notify);
3222 		return;
3223 	}
3224 	control->spec_flags = M_NOTIFICATION;
3225 	control->length = SCTP_BUF_LEN(m_notify);
3226 	/* not that we need this */
3227 	control->tail_mbuf = m_notify;
3228 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3229 	    control,
3230 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3231 }
3232 
3233 static void
3234 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3235     int so_locked
3236 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3237     SCTP_UNUSED
3238 #endif
3239 )
3240 {
3241 	struct mbuf *m_notify;
3242 	struct sctp_sender_dry_event *event;
3243 	struct sctp_queued_to_read *control;
3244 
3245 	if ((stcb == NULL) ||
3246 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3247 		/* event not enabled */
3248 		return;
3249 	}
3250 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3251 	if (m_notify == NULL) {
3252 		/* no space left */
3253 		return;
3254 	}
3255 	SCTP_BUF_LEN(m_notify) = 0;
3256 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3257 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3258 	event->sender_dry_flags = 0;
3259 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3260 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3261 
3262 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3263 	SCTP_BUF_NEXT(m_notify) = NULL;
3264 
3265 	/* append to socket */
3266 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3267 	    0, 0, stcb->asoc.context, 0, 0, 0,
3268 	    m_notify);
3269 	if (control == NULL) {
3270 		/* no memory */
3271 		sctp_m_freem(m_notify);
3272 		return;
3273 	}
3274 	control->length = SCTP_BUF_LEN(m_notify);
3275 	control->spec_flags = M_NOTIFICATION;
3276 	/* not that we need this */
3277 	control->tail_mbuf = m_notify;
3278 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3279 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3280 }
3281 
3282 
3283 void
3284 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3285 {
3286 	struct mbuf *m_notify;
3287 	struct sctp_queued_to_read *control;
3288 	struct sctp_stream_change_event *stradd;
3289 	int len;
3290 
3291 	if ((stcb == NULL) ||
3292 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3293 		/* event not enabled */
3294 		return;
3295 	}
3296 	if ((stcb->asoc.peer_req_out) && flag) {
3297 		/* Peer made the request, don't tell the local user */
3298 		stcb->asoc.peer_req_out = 0;
3299 		return;
3300 	}
3301 	stcb->asoc.peer_req_out = 0;
3302 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3303 	if (m_notify == NULL)
3304 		/* no space left */
3305 		return;
3306 	SCTP_BUF_LEN(m_notify) = 0;
3307 	len = sizeof(struct sctp_stream_change_event);
3308 	if (len > M_TRAILINGSPACE(m_notify)) {
3309 		/* never enough room */
3310 		sctp_m_freem(m_notify);
3311 		return;
3312 	}
3313 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3314 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3315 	stradd->strchange_flags = flag;
3316 	stradd->strchange_length = len;
3317 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3318 	stradd->strchange_instrms = numberin;
3319 	stradd->strchange_outstrms = numberout;
3320 	SCTP_BUF_LEN(m_notify) = len;
3321 	SCTP_BUF_NEXT(m_notify) = NULL;
3322 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3323 		/* no space */
3324 		sctp_m_freem(m_notify);
3325 		return;
3326 	}
3327 	/* append to socket */
3328 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3329 	    0, 0, stcb->asoc.context, 0, 0, 0,
3330 	    m_notify);
3331 	if (control == NULL) {
3332 		/* no memory */
3333 		sctp_m_freem(m_notify);
3334 		return;
3335 	}
3336 	control->spec_flags = M_NOTIFICATION;
3337 	control->length = SCTP_BUF_LEN(m_notify);
3338 	/* not that we need this */
3339 	control->tail_mbuf = m_notify;
3340 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3341 	    control,
3342 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3343 }
3344 
3345 void
3346 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3347 {
3348 	struct mbuf *m_notify;
3349 	struct sctp_queued_to_read *control;
3350 	struct sctp_assoc_reset_event *strasoc;
3351 	int len;
3352 
3353 	if ((stcb == NULL) ||
3354 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3355 		/* event not enabled */
3356 		return;
3357 	}
3358 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3359 	if (m_notify == NULL)
3360 		/* no space left */
3361 		return;
3362 	SCTP_BUF_LEN(m_notify) = 0;
3363 	len = sizeof(struct sctp_assoc_reset_event);
3364 	if (len > M_TRAILINGSPACE(m_notify)) {
3365 		/* never enough room */
3366 		sctp_m_freem(m_notify);
3367 		return;
3368 	}
3369 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3370 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3371 	strasoc->assocreset_flags = flag;
3372 	strasoc->assocreset_length = len;
3373 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3374 	strasoc->assocreset_local_tsn = sending_tsn;
3375 	strasoc->assocreset_remote_tsn = recv_tsn;
3376 	SCTP_BUF_LEN(m_notify) = len;
3377 	SCTP_BUF_NEXT(m_notify) = NULL;
3378 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3379 		/* no space */
3380 		sctp_m_freem(m_notify);
3381 		return;
3382 	}
3383 	/* append to socket */
3384 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3385 	    0, 0, stcb->asoc.context, 0, 0, 0,
3386 	    m_notify);
3387 	if (control == NULL) {
3388 		/* no memory */
3389 		sctp_m_freem(m_notify);
3390 		return;
3391 	}
3392 	control->spec_flags = M_NOTIFICATION;
3393 	control->length = SCTP_BUF_LEN(m_notify);
3394 	/* not that we need this */
3395 	control->tail_mbuf = m_notify;
3396 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3397 	    control,
3398 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3399 }
3400 
3401 
3402 
3403 static void
3404 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3405     int number_entries, uint16_t * list, int flag)
3406 {
3407 	struct mbuf *m_notify;
3408 	struct sctp_queued_to_read *control;
3409 	struct sctp_stream_reset_event *strreset;
3410 	int len;
3411 
3412 	if ((stcb == NULL) ||
3413 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3414 		/* event not enabled */
3415 		return;
3416 	}
3417 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3418 	if (m_notify == NULL)
3419 		/* no space left */
3420 		return;
3421 	SCTP_BUF_LEN(m_notify) = 0;
3422 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3423 	if (len > M_TRAILINGSPACE(m_notify)) {
3424 		/* never enough room */
3425 		sctp_m_freem(m_notify);
3426 		return;
3427 	}
3428 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3429 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3430 	strreset->strreset_flags = flag;
3431 	strreset->strreset_length = len;
3432 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3433 	if (number_entries) {
3434 		int i;
3435 
3436 		for (i = 0; i < number_entries; i++) {
3437 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3438 		}
3439 	}
3440 	SCTP_BUF_LEN(m_notify) = len;
3441 	SCTP_BUF_NEXT(m_notify) = NULL;
3442 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3443 		/* no space */
3444 		sctp_m_freem(m_notify);
3445 		return;
3446 	}
3447 	/* append to socket */
3448 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3449 	    0, 0, stcb->asoc.context, 0, 0, 0,
3450 	    m_notify);
3451 	if (control == NULL) {
3452 		/* no memory */
3453 		sctp_m_freem(m_notify);
3454 		return;
3455 	}
3456 	control->spec_flags = M_NOTIFICATION;
3457 	control->length = SCTP_BUF_LEN(m_notify);
3458 	/* not that we need this */
3459 	control->tail_mbuf = m_notify;
3460 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3461 	    control,
3462 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3463 }
3464 
3465 
3466 void
3467 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3468     uint32_t error, void *data, int so_locked
3469 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3470     SCTP_UNUSED
3471 #endif
3472 )
3473 {
3474 	if ((stcb == NULL) ||
3475 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3476 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3477 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3478 		/* If the socket is gone we are out of here */
3479 		return;
3480 	}
3481 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3482 		return;
3483 	}
3484 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3485 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3486 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3487 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3488 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3489 			/* Don't report these in front states */
3490 			return;
3491 		}
3492 	}
3493 	switch (notification) {
3494 	case SCTP_NOTIFY_ASSOC_UP:
3495 		if (stcb->asoc.assoc_up_sent == 0) {
3496 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, so_locked);
3497 			stcb->asoc.assoc_up_sent = 1;
3498 		}
3499 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3500 			sctp_notify_adaptation_layer(stcb);
3501 		}
3502 		if (stcb->asoc.peer_supports_auth == 0) {
3503 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3504 			    NULL, so_locked);
3505 		}
3506 		break;
3507 	case SCTP_NOTIFY_ASSOC_DOWN:
3508 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, so_locked);
3509 		break;
3510 	case SCTP_NOTIFY_INTERFACE_DOWN:
3511 		{
3512 			struct sctp_nets *net;
3513 
3514 			net = (struct sctp_nets *)data;
3515 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3516 			    (struct sockaddr *)&net->ro._l_addr, error);
3517 			break;
3518 		}
3519 	case SCTP_NOTIFY_INTERFACE_UP:
3520 		{
3521 			struct sctp_nets *net;
3522 
3523 			net = (struct sctp_nets *)data;
3524 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3525 			    (struct sockaddr *)&net->ro._l_addr, error);
3526 			break;
3527 		}
3528 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3529 		{
3530 			struct sctp_nets *net;
3531 
3532 			net = (struct sctp_nets *)data;
3533 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3534 			    (struct sockaddr *)&net->ro._l_addr, error);
3535 			break;
3536 		}
3537 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3538 		sctp_notify_send_failed2(stcb, error,
3539 		    (struct sctp_stream_queue_pending *)data, so_locked);
3540 		break;
3541 	case SCTP_NOTIFY_DG_FAIL:
3542 		sctp_notify_send_failed(stcb, error,
3543 		    (struct sctp_tmit_chunk *)data, so_locked);
3544 		break;
3545 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3546 		{
3547 			uint32_t val;
3548 
3549 			val = *((uint32_t *) data);
3550 
3551 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3552 			break;
3553 		}
3554 	case SCTP_NOTIFY_STRDATA_ERR:
3555 		break;
3556 	case SCTP_NOTIFY_ASSOC_ABORTED:
3557 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3558 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3559 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, so_locked);
3560 		} else {
3561 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, so_locked);
3562 		}
3563 		break;
3564 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3565 		break;
3566 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3567 		break;
3568 	case SCTP_NOTIFY_ASSOC_RESTART:
3569 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, so_locked);
3570 		if (stcb->asoc.peer_supports_auth == 0) {
3571 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3572 			    NULL, so_locked);
3573 		}
3574 		break;
3575 	case SCTP_NOTIFY_HB_RESP:
3576 		break;
3577 	case SCTP_NOTIFY_STR_RESET_SEND:
3578 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3579 		break;
3580 	case SCTP_NOTIFY_STR_RESET_RECV:
3581 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3582 		break;
3583 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3584 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3585 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3586 		break;
3587 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3588 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3589 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3590 		break;
3591 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3592 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3593 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3594 		break;
3595 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3596 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3597 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3598 		break;
3599 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3600 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3601 		    error);
3602 		break;
3603 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3604 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3605 		    error);
3606 		break;
3607 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3608 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3609 		    error);
3610 		break;
3611 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3612 		break;
3613 	case SCTP_NOTIFY_ASCONF_FAILED:
3614 		break;
3615 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3616 		sctp_notify_shutdown_event(stcb);
3617 		break;
3618 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3619 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3620 		    (uint16_t) (uintptr_t) data,
3621 		    so_locked);
3622 		break;
3623 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3624 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3625 		    (uint16_t) (uintptr_t) data,
3626 		    so_locked);
3627 		break;
3628 	case SCTP_NOTIFY_NO_PEER_AUTH:
3629 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3630 		    (uint16_t) (uintptr_t) data,
3631 		    so_locked);
3632 		break;
3633 	case SCTP_NOTIFY_SENDER_DRY:
3634 		sctp_notify_sender_dry_event(stcb, so_locked);
3635 		break;
3636 	default:
3637 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3638 		    __FUNCTION__, notification, notification);
3639 		break;
3640 	}			/* end switch */
3641 }
3642 
3643 void
3644 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3645 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3646     SCTP_UNUSED
3647 #endif
3648 )
3649 {
3650 	struct sctp_association *asoc;
3651 	struct sctp_stream_out *outs;
3652 	struct sctp_tmit_chunk *chk, *nchk;
3653 	struct sctp_stream_queue_pending *sp, *nsp;
3654 	int i;
3655 
3656 	if (stcb == NULL) {
3657 		return;
3658 	}
3659 	asoc = &stcb->asoc;
3660 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3661 		/* already being freed */
3662 		return;
3663 	}
3664 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3665 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3666 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3667 		return;
3668 	}
3669 	/* now through all the gunk freeing chunks */
3670 	if (holds_lock == 0) {
3671 		SCTP_TCB_SEND_LOCK(stcb);
3672 	}
3673 	/* sent queue SHOULD be empty */
3674 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3675 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3676 		asoc->sent_queue_cnt--;
3677 		if (chk->data != NULL) {
3678 			sctp_free_bufspace(stcb, asoc, chk, 1);
3679 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3680 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3681 			if (chk->data) {
3682 				sctp_m_freem(chk->data);
3683 				chk->data = NULL;
3684 			}
3685 		}
3686 		sctp_free_a_chunk(stcb, chk, so_locked);
3687 		/* sa_ignore FREED_MEMORY */
3688 	}
3689 	/* pending send queue SHOULD be empty */
3690 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3691 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3692 		asoc->send_queue_cnt--;
3693 		if (chk->data != NULL) {
3694 			sctp_free_bufspace(stcb, asoc, chk, 1);
3695 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3696 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3697 			if (chk->data) {
3698 				sctp_m_freem(chk->data);
3699 				chk->data = NULL;
3700 			}
3701 		}
3702 		sctp_free_a_chunk(stcb, chk, so_locked);
3703 		/* sa_ignore FREED_MEMORY */
3704 	}
3705 	for (i = 0; i < asoc->streamoutcnt; i++) {
3706 		/* For each stream */
3707 		outs = &asoc->strmout[i];
3708 		/* clean up any sends there */
3709 		asoc->locked_on_sending = NULL;
3710 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3711 			asoc->stream_queue_cnt--;
3712 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3713 			sctp_free_spbufspace(stcb, asoc, sp);
3714 			if (sp->data) {
3715 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3716 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3717 				if (sp->data) {
3718 					sctp_m_freem(sp->data);
3719 					sp->data = NULL;
3720 				}
3721 			}
3722 			if (sp->net) {
3723 				sctp_free_remote_addr(sp->net);
3724 				sp->net = NULL;
3725 			}
3726 			/* Free the chunk */
3727 			sctp_free_a_strmoq(stcb, sp, so_locked);
3728 			/* sa_ignore FREED_MEMORY */
3729 		}
3730 	}
3731 
3732 	if (holds_lock == 0) {
3733 		SCTP_TCB_SEND_UNLOCK(stcb);
3734 	}
3735 }
3736 
3737 void
3738 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3739 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3740     SCTP_UNUSED
3741 #endif
3742 )
3743 {
3744 	if (stcb == NULL) {
3745 		return;
3746 	}
3747 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3748 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3749 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3750 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3751 	}
3752 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3753 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3754 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3755 		return;
3756 	}
3757 	/* Tell them we lost the asoc */
3758 	sctp_report_all_outbound(stcb, 1, so_locked);
3759 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3760 }
3761 
3762 void
3763 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3764     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3765     uint32_t vrf_id, uint16_t port)
3766 {
3767 	uint32_t vtag;
3768 
3769 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3770 	struct socket *so;
3771 
3772 #endif
3773 
3774 	vtag = 0;
3775 	if (stcb != NULL) {
3776 		/* We have a TCB to abort, send notification too */
3777 		vtag = stcb->asoc.peer_vtag;
3778 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3779 		/* get the assoc vrf id and table id */
3780 		vrf_id = stcb->asoc.vrf_id;
3781 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3782 	}
3783 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3784 	if (stcb != NULL) {
3785 		/* Ok, now lets free it */
3786 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3787 		so = SCTP_INP_SO(inp);
3788 		atomic_add_int(&stcb->asoc.refcnt, 1);
3789 		SCTP_TCB_UNLOCK(stcb);
3790 		SCTP_SOCKET_LOCK(so, 1);
3791 		SCTP_TCB_LOCK(stcb);
3792 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3793 #endif
3794 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3795 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3796 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3797 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3798 		}
3799 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3800 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3801 		SCTP_SOCKET_UNLOCK(so, 1);
3802 #endif
3803 	}
3804 }
3805 
3806 #ifdef SCTP_ASOCLOG_OF_TSNS
3807 void
3808 sctp_print_out_track_log(struct sctp_tcb *stcb)
3809 {
3810 #ifdef NOSIY_PRINTS
3811 	int i;
3812 
3813 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3814 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3815 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3816 		SCTP_PRINTF("None rcvd\n");
3817 		goto none_in;
3818 	}
3819 	if (stcb->asoc.tsn_in_wrapped) {
3820 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3821 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3822 			    stcb->asoc.in_tsnlog[i].tsn,
3823 			    stcb->asoc.in_tsnlog[i].strm,
3824 			    stcb->asoc.in_tsnlog[i].seq,
3825 			    stcb->asoc.in_tsnlog[i].flgs,
3826 			    stcb->asoc.in_tsnlog[i].sz);
3827 		}
3828 	}
3829 	if (stcb->asoc.tsn_in_at) {
3830 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3831 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3832 			    stcb->asoc.in_tsnlog[i].tsn,
3833 			    stcb->asoc.in_tsnlog[i].strm,
3834 			    stcb->asoc.in_tsnlog[i].seq,
3835 			    stcb->asoc.in_tsnlog[i].flgs,
3836 			    stcb->asoc.in_tsnlog[i].sz);
3837 		}
3838 	}
3839 none_in:
3840 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3841 	if ((stcb->asoc.tsn_out_at == 0) &&
3842 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3843 		SCTP_PRINTF("None sent\n");
3844 	}
3845 	if (stcb->asoc.tsn_out_wrapped) {
3846 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3847 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3848 			    stcb->asoc.out_tsnlog[i].tsn,
3849 			    stcb->asoc.out_tsnlog[i].strm,
3850 			    stcb->asoc.out_tsnlog[i].seq,
3851 			    stcb->asoc.out_tsnlog[i].flgs,
3852 			    stcb->asoc.out_tsnlog[i].sz);
3853 		}
3854 	}
3855 	if (stcb->asoc.tsn_out_at) {
3856 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3857 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3858 			    stcb->asoc.out_tsnlog[i].tsn,
3859 			    stcb->asoc.out_tsnlog[i].strm,
3860 			    stcb->asoc.out_tsnlog[i].seq,
3861 			    stcb->asoc.out_tsnlog[i].flgs,
3862 			    stcb->asoc.out_tsnlog[i].sz);
3863 		}
3864 	}
3865 #endif
3866 }
3867 
3868 #endif
3869 
3870 void
3871 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3872     int error, struct mbuf *op_err,
3873     int so_locked
3874 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3875     SCTP_UNUSED
3876 #endif
3877 )
3878 {
3879 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3880 	struct socket *so;
3881 
3882 #endif
3883 
3884 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3885 	so = SCTP_INP_SO(inp);
3886 #endif
3887 	if (stcb == NULL) {
3888 		/* Got to have a TCB */
3889 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3890 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3891 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3892 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3893 			}
3894 		}
3895 		return;
3896 	} else {
3897 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3898 	}
3899 	/* notify the ulp */
3900 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3901 		sctp_abort_notification(stcb, error, so_locked);
3902 	/* notify the peer */
3903 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3904 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3905 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3906 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3907 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3908 	}
3909 	/* now free the asoc */
3910 #ifdef SCTP_ASOCLOG_OF_TSNS
3911 	sctp_print_out_track_log(stcb);
3912 #endif
3913 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3914 	if (!so_locked) {
3915 		atomic_add_int(&stcb->asoc.refcnt, 1);
3916 		SCTP_TCB_UNLOCK(stcb);
3917 		SCTP_SOCKET_LOCK(so, 1);
3918 		SCTP_TCB_LOCK(stcb);
3919 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3920 	}
3921 #endif
3922 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3923 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3924 	if (!so_locked) {
3925 		SCTP_SOCKET_UNLOCK(so, 1);
3926 	}
3927 #endif
3928 }
3929 
3930 void
3931 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3932     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3933 {
3934 	struct sctp_chunkhdr *ch, chunk_buf;
3935 	unsigned int chk_length;
3936 	int contains_init_chunk;
3937 
3938 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3939 	/* Generate a TO address for future reference */
3940 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3941 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3942 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3943 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3944 		}
3945 	}
3946 	contains_init_chunk = 0;
3947 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3948 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3949 	while (ch != NULL) {
3950 		chk_length = ntohs(ch->chunk_length);
3951 		if (chk_length < sizeof(*ch)) {
3952 			/* break to abort land */
3953 			break;
3954 		}
3955 		switch (ch->chunk_type) {
3956 		case SCTP_INIT:
3957 			contains_init_chunk = 1;
3958 			break;
3959 		case SCTP_COOKIE_ECHO:
3960 			/* We hit here only if the assoc is being freed */
3961 			return;
3962 		case SCTP_PACKET_DROPPED:
3963 			/* we don't respond to pkt-dropped */
3964 			return;
3965 		case SCTP_ABORT_ASSOCIATION:
3966 			/* we don't respond with an ABORT to an ABORT */
3967 			return;
3968 		case SCTP_SHUTDOWN_COMPLETE:
3969 			/*
3970 			 * we ignore it since we are not waiting for it and
3971 			 * peer is gone
3972 			 */
3973 			return;
3974 		case SCTP_SHUTDOWN_ACK:
3975 			sctp_send_shutdown_complete2(m, sh, vrf_id, port);
3976 			return;
3977 		default:
3978 			break;
3979 		}
3980 		offset += SCTP_SIZE32(chk_length);
3981 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3982 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3983 	}
3984 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
3985 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
3986 	    (contains_init_chunk == 0))) {
3987 		sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
3988 	}
3989 }
3990 
3991 /*
3992  * check the inbound datagram to make sure there is not an abort inside it,
3993  * if there is return 1, else return 0.
3994  */
3995 int
3996 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3997 {
3998 	struct sctp_chunkhdr *ch;
3999 	struct sctp_init_chunk *init_chk, chunk_buf;
4000 	int offset;
4001 	unsigned int chk_length;
4002 
4003 	offset = iphlen + sizeof(struct sctphdr);
4004 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4005 	    (uint8_t *) & chunk_buf);
4006 	while (ch != NULL) {
4007 		chk_length = ntohs(ch->chunk_length);
4008 		if (chk_length < sizeof(*ch)) {
4009 			/* packet is probably corrupt */
4010 			break;
4011 		}
4012 		/* we seem to be ok, is it an abort? */
4013 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4014 			/* yep, tell them */
4015 			return (1);
4016 		}
4017 		if (ch->chunk_type == SCTP_INITIATION) {
4018 			/* need to update the Vtag */
4019 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4020 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4021 			if (init_chk != NULL) {
4022 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4023 			}
4024 		}
4025 		/* Nope, move to the next chunk */
4026 		offset += SCTP_SIZE32(chk_length);
4027 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4028 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4029 	}
4030 	return (0);
4031 }
4032 
4033 /*
4034  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4035  * set (i.e. it's 0) so, create this function to compare link local scopes
4036  */
4037 #ifdef INET6
4038 uint32_t
4039 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4040 {
4041 	struct sockaddr_in6 a, b;
4042 
4043 	/* save copies */
4044 	a = *addr1;
4045 	b = *addr2;
4046 
4047 	if (a.sin6_scope_id == 0)
4048 		if (sa6_recoverscope(&a)) {
4049 			/* can't get scope, so can't match */
4050 			return (0);
4051 		}
4052 	if (b.sin6_scope_id == 0)
4053 		if (sa6_recoverscope(&b)) {
4054 			/* can't get scope, so can't match */
4055 			return (0);
4056 		}
4057 	if (a.sin6_scope_id != b.sin6_scope_id)
4058 		return (0);
4059 
4060 	return (1);
4061 }
4062 
4063 /*
4064  * returns a sockaddr_in6 with embedded scope recovered and removed
4065  */
4066 struct sockaddr_in6 *
4067 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4068 {
4069 	/* check and strip embedded scope junk */
4070 	if (addr->sin6_family == AF_INET6) {
4071 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4072 			if (addr->sin6_scope_id == 0) {
4073 				*store = *addr;
4074 				if (!sa6_recoverscope(store)) {
4075 					/* use the recovered scope */
4076 					addr = store;
4077 				}
4078 			} else {
4079 				/* else, return the original "to" addr */
4080 				in6_clearscope(&addr->sin6_addr);
4081 			}
4082 		}
4083 	}
4084 	return (addr);
4085 }
4086 
4087 #endif
4088 
4089 /*
4090  * are the two addresses the same?  currently a "scopeless" check returns: 1
4091  * if same, 0 if not
4092  */
4093 int
4094 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4095 {
4096 
4097 	/* must be valid */
4098 	if (sa1 == NULL || sa2 == NULL)
4099 		return (0);
4100 
4101 	/* must be the same family */
4102 	if (sa1->sa_family != sa2->sa_family)
4103 		return (0);
4104 
4105 	switch (sa1->sa_family) {
4106 #ifdef INET6
4107 	case AF_INET6:
4108 		{
4109 			/* IPv6 addresses */
4110 			struct sockaddr_in6 *sin6_1, *sin6_2;
4111 
4112 			sin6_1 = (struct sockaddr_in6 *)sa1;
4113 			sin6_2 = (struct sockaddr_in6 *)sa2;
4114 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4115 			    sin6_2));
4116 		}
4117 #endif
4118 #ifdef INET
4119 	case AF_INET:
4120 		{
4121 			/* IPv4 addresses */
4122 			struct sockaddr_in *sin_1, *sin_2;
4123 
4124 			sin_1 = (struct sockaddr_in *)sa1;
4125 			sin_2 = (struct sockaddr_in *)sa2;
4126 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4127 		}
4128 #endif
4129 	default:
4130 		/* we don't do these... */
4131 		return (0);
4132 	}
4133 }
4134 
4135 void
4136 sctp_print_address(struct sockaddr *sa)
4137 {
4138 #ifdef INET6
4139 	char ip6buf[INET6_ADDRSTRLEN];
4140 
4141 	ip6buf[0] = 0;
4142 #endif
4143 
4144 	switch (sa->sa_family) {
4145 #ifdef INET6
4146 	case AF_INET6:
4147 		{
4148 			struct sockaddr_in6 *sin6;
4149 
4150 			sin6 = (struct sockaddr_in6 *)sa;
4151 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4152 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4153 			    ntohs(sin6->sin6_port),
4154 			    sin6->sin6_scope_id);
4155 			break;
4156 		}
4157 #endif
4158 #ifdef INET
4159 	case AF_INET:
4160 		{
4161 			struct sockaddr_in *sin;
4162 			unsigned char *p;
4163 
4164 			sin = (struct sockaddr_in *)sa;
4165 			p = (unsigned char *)&sin->sin_addr;
4166 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4167 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4168 			break;
4169 		}
4170 #endif
4171 	default:
4172 		SCTP_PRINTF("?\n");
4173 		break;
4174 	}
4175 }
4176 
4177 void
4178 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4179 {
4180 	switch (iph->ip_v) {
4181 #ifdef INET
4182 	case IPVERSION:
4183 		{
4184 			struct sockaddr_in lsa, fsa;
4185 
4186 			bzero(&lsa, sizeof(lsa));
4187 			lsa.sin_len = sizeof(lsa);
4188 			lsa.sin_family = AF_INET;
4189 			lsa.sin_addr = iph->ip_src;
4190 			lsa.sin_port = sh->src_port;
4191 			bzero(&fsa, sizeof(fsa));
4192 			fsa.sin_len = sizeof(fsa);
4193 			fsa.sin_family = AF_INET;
4194 			fsa.sin_addr = iph->ip_dst;
4195 			fsa.sin_port = sh->dest_port;
4196 			SCTP_PRINTF("src: ");
4197 			sctp_print_address((struct sockaddr *)&lsa);
4198 			SCTP_PRINTF("dest: ");
4199 			sctp_print_address((struct sockaddr *)&fsa);
4200 			break;
4201 		}
4202 #endif
4203 #ifdef INET6
4204 	case IPV6_VERSION >> 4:
4205 		{
4206 			struct ip6_hdr *ip6;
4207 			struct sockaddr_in6 lsa6, fsa6;
4208 
4209 			ip6 = (struct ip6_hdr *)iph;
4210 			bzero(&lsa6, sizeof(lsa6));
4211 			lsa6.sin6_len = sizeof(lsa6);
4212 			lsa6.sin6_family = AF_INET6;
4213 			lsa6.sin6_addr = ip6->ip6_src;
4214 			lsa6.sin6_port = sh->src_port;
4215 			bzero(&fsa6, sizeof(fsa6));
4216 			fsa6.sin6_len = sizeof(fsa6);
4217 			fsa6.sin6_family = AF_INET6;
4218 			fsa6.sin6_addr = ip6->ip6_dst;
4219 			fsa6.sin6_port = sh->dest_port;
4220 			SCTP_PRINTF("src: ");
4221 			sctp_print_address((struct sockaddr *)&lsa6);
4222 			SCTP_PRINTF("dest: ");
4223 			sctp_print_address((struct sockaddr *)&fsa6);
4224 			break;
4225 		}
4226 #endif
4227 	default:
4228 		/* TSNH */
4229 		break;
4230 	}
4231 }
4232 
4233 void
4234 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4235     struct sctp_inpcb *new_inp,
4236     struct sctp_tcb *stcb,
4237     int waitflags)
4238 {
4239 	/*
4240 	 * go through our old INP and pull off any control structures that
4241 	 * belong to stcb and move then to the new inp.
4242 	 */
4243 	struct socket *old_so, *new_so;
4244 	struct sctp_queued_to_read *control, *nctl;
4245 	struct sctp_readhead tmp_queue;
4246 	struct mbuf *m;
4247 	int error = 0;
4248 
4249 	old_so = old_inp->sctp_socket;
4250 	new_so = new_inp->sctp_socket;
4251 	TAILQ_INIT(&tmp_queue);
4252 	error = sblock(&old_so->so_rcv, waitflags);
4253 	if (error) {
4254 		/*
4255 		 * Gak, can't get sblock, we have a problem. data will be
4256 		 * left stranded.. and we don't dare look at it since the
4257 		 * other thread may be reading something. Oh well, its a
4258 		 * screwed up app that does a peeloff OR a accept while
4259 		 * reading from the main socket... actually its only the
4260 		 * peeloff() case, since I think read will fail on a
4261 		 * listening socket..
4262 		 */
4263 		return;
4264 	}
4265 	/* lock the socket buffers */
4266 	SCTP_INP_READ_LOCK(old_inp);
4267 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4268 		/* Pull off all for out target stcb */
4269 		if (control->stcb == stcb) {
4270 			/* remove it we want it */
4271 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4272 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4273 			m = control->data;
4274 			while (m) {
4275 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4276 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4277 				}
4278 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4279 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4280 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4281 				}
4282 				m = SCTP_BUF_NEXT(m);
4283 			}
4284 		}
4285 	}
4286 	SCTP_INP_READ_UNLOCK(old_inp);
4287 	/* Remove the sb-lock on the old socket */
4288 
4289 	sbunlock(&old_so->so_rcv);
4290 	/* Now we move them over to the new socket buffer */
4291 	SCTP_INP_READ_LOCK(new_inp);
4292 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4293 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4294 		m = control->data;
4295 		while (m) {
4296 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4297 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4298 			}
4299 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4300 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4301 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4302 			}
4303 			m = SCTP_BUF_NEXT(m);
4304 		}
4305 	}
4306 	SCTP_INP_READ_UNLOCK(new_inp);
4307 }
4308 
4309 void
4310 sctp_add_to_readq(struct sctp_inpcb *inp,
4311     struct sctp_tcb *stcb,
4312     struct sctp_queued_to_read *control,
4313     struct sockbuf *sb,
4314     int end,
4315     int inp_read_lock_held,
4316     int so_locked
4317 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4318     SCTP_UNUSED
4319 #endif
4320 )
4321 {
4322 	/*
4323 	 * Here we must place the control on the end of the socket read
4324 	 * queue AND increment sb_cc so that select will work properly on
4325 	 * read.
4326 	 */
4327 	struct mbuf *m, *prev = NULL;
4328 
4329 	if (inp == NULL) {
4330 		/* Gak, TSNH!! */
4331 #ifdef INVARIANTS
4332 		panic("Gak, inp NULL on add_to_readq");
4333 #endif
4334 		return;
4335 	}
4336 	if (inp_read_lock_held == 0)
4337 		SCTP_INP_READ_LOCK(inp);
4338 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4339 		sctp_free_remote_addr(control->whoFrom);
4340 		if (control->data) {
4341 			sctp_m_freem(control->data);
4342 			control->data = NULL;
4343 		}
4344 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4345 		if (inp_read_lock_held == 0)
4346 			SCTP_INP_READ_UNLOCK(inp);
4347 		return;
4348 	}
4349 	if (!(control->spec_flags & M_NOTIFICATION)) {
4350 		atomic_add_int(&inp->total_recvs, 1);
4351 		if (!control->do_not_ref_stcb) {
4352 			atomic_add_int(&stcb->total_recvs, 1);
4353 		}
4354 	}
4355 	m = control->data;
4356 	control->held_length = 0;
4357 	control->length = 0;
4358 	while (m) {
4359 		if (SCTP_BUF_LEN(m) == 0) {
4360 			/* Skip mbufs with NO length */
4361 			if (prev == NULL) {
4362 				/* First one */
4363 				control->data = sctp_m_free(m);
4364 				m = control->data;
4365 			} else {
4366 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4367 				m = SCTP_BUF_NEXT(prev);
4368 			}
4369 			if (m == NULL) {
4370 				control->tail_mbuf = prev;
4371 			}
4372 			continue;
4373 		}
4374 		prev = m;
4375 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4376 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4377 		}
4378 		sctp_sballoc(stcb, sb, m);
4379 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4380 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4381 		}
4382 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4383 		m = SCTP_BUF_NEXT(m);
4384 	}
4385 	if (prev != NULL) {
4386 		control->tail_mbuf = prev;
4387 	} else {
4388 		/* Everything got collapsed out?? */
4389 		sctp_free_remote_addr(control->whoFrom);
4390 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4391 		if (inp_read_lock_held == 0)
4392 			SCTP_INP_READ_UNLOCK(inp);
4393 		return;
4394 	}
4395 	if (end) {
4396 		control->end_added = 1;
4397 	}
4398 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4399 	if (inp_read_lock_held == 0)
4400 		SCTP_INP_READ_UNLOCK(inp);
4401 	if (inp && inp->sctp_socket) {
4402 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4403 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4404 		} else {
4405 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4406 			struct socket *so;
4407 
4408 			so = SCTP_INP_SO(inp);
4409 			if (!so_locked) {
4410 				if (stcb) {
4411 					atomic_add_int(&stcb->asoc.refcnt, 1);
4412 					SCTP_TCB_UNLOCK(stcb);
4413 				}
4414 				SCTP_SOCKET_LOCK(so, 1);
4415 				if (stcb) {
4416 					SCTP_TCB_LOCK(stcb);
4417 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4418 				}
4419 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4420 					SCTP_SOCKET_UNLOCK(so, 1);
4421 					return;
4422 				}
4423 			}
4424 #endif
4425 			sctp_sorwakeup(inp, inp->sctp_socket);
4426 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4427 			if (!so_locked) {
4428 				SCTP_SOCKET_UNLOCK(so, 1);
4429 			}
4430 #endif
4431 		}
4432 	}
4433 }
4434 
4435 
4436 int
4437 sctp_append_to_readq(struct sctp_inpcb *inp,
4438     struct sctp_tcb *stcb,
4439     struct sctp_queued_to_read *control,
4440     struct mbuf *m,
4441     int end,
4442     int ctls_cumack,
4443     struct sockbuf *sb)
4444 {
4445 	/*
4446 	 * A partial delivery API event is underway. OR we are appending on
4447 	 * the reassembly queue.
4448 	 *
4449 	 * If PDAPI this means we need to add m to the end of the data.
4450 	 * Increase the length in the control AND increment the sb_cc.
4451 	 * Otherwise sb is NULL and all we need to do is put it at the end
4452 	 * of the mbuf chain.
4453 	 */
4454 	int len = 0;
4455 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4456 
4457 	if (inp) {
4458 		SCTP_INP_READ_LOCK(inp);
4459 	}
4460 	if (control == NULL) {
4461 get_out:
4462 		if (inp) {
4463 			SCTP_INP_READ_UNLOCK(inp);
4464 		}
4465 		return (-1);
4466 	}
4467 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4468 		SCTP_INP_READ_UNLOCK(inp);
4469 		return (0);
4470 	}
4471 	if (control->end_added) {
4472 		/* huh this one is complete? */
4473 		goto get_out;
4474 	}
4475 	mm = m;
4476 	if (mm == NULL) {
4477 		goto get_out;
4478 	}
4479 	while (mm) {
4480 		if (SCTP_BUF_LEN(mm) == 0) {
4481 			/* Skip mbufs with NO lenght */
4482 			if (prev == NULL) {
4483 				/* First one */
4484 				m = sctp_m_free(mm);
4485 				mm = m;
4486 			} else {
4487 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4488 				mm = SCTP_BUF_NEXT(prev);
4489 			}
4490 			continue;
4491 		}
4492 		prev = mm;
4493 		len += SCTP_BUF_LEN(mm);
4494 		if (sb) {
4495 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4496 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4497 			}
4498 			sctp_sballoc(stcb, sb, mm);
4499 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4500 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4501 			}
4502 		}
4503 		mm = SCTP_BUF_NEXT(mm);
4504 	}
4505 	if (prev) {
4506 		tail = prev;
4507 	} else {
4508 		/* Really there should always be a prev */
4509 		if (m == NULL) {
4510 			/* Huh nothing left? */
4511 #ifdef INVARIANTS
4512 			panic("Nothing left to add?");
4513 #else
4514 			goto get_out;
4515 #endif
4516 		}
4517 		tail = m;
4518 	}
4519 	if (control->tail_mbuf) {
4520 		/* append */
4521 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4522 		control->tail_mbuf = tail;
4523 	} else {
4524 		/* nothing there */
4525 #ifdef INVARIANTS
4526 		if (control->data != NULL) {
4527 			panic("This should NOT happen");
4528 		}
4529 #endif
4530 		control->data = m;
4531 		control->tail_mbuf = tail;
4532 	}
4533 	atomic_add_int(&control->length, len);
4534 	if (end) {
4535 		/* message is complete */
4536 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4537 			stcb->asoc.control_pdapi = NULL;
4538 		}
4539 		control->held_length = 0;
4540 		control->end_added = 1;
4541 	}
4542 	if (stcb == NULL) {
4543 		control->do_not_ref_stcb = 1;
4544 	}
4545 	/*
4546 	 * When we are appending in partial delivery, the cum-ack is used
4547 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4548 	 * is populated in the outbound sinfo structure from the true cumack
4549 	 * if the association exists...
4550 	 */
4551 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4552 	if (inp) {
4553 		SCTP_INP_READ_UNLOCK(inp);
4554 	}
4555 	if (inp && inp->sctp_socket) {
4556 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4557 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4558 		} else {
4559 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4560 			struct socket *so;
4561 
4562 			so = SCTP_INP_SO(inp);
4563 			if (stcb) {
4564 				atomic_add_int(&stcb->asoc.refcnt, 1);
4565 				SCTP_TCB_UNLOCK(stcb);
4566 			}
4567 			SCTP_SOCKET_LOCK(so, 1);
4568 			if (stcb) {
4569 				SCTP_TCB_LOCK(stcb);
4570 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4571 			}
4572 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4573 				SCTP_SOCKET_UNLOCK(so, 1);
4574 				return (0);
4575 			}
4576 #endif
4577 			sctp_sorwakeup(inp, inp->sctp_socket);
4578 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4579 			SCTP_SOCKET_UNLOCK(so, 1);
4580 #endif
4581 		}
4582 	}
4583 	return (0);
4584 }
4585 
4586 
4587 
4588 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4589  *************ALTERNATE ROUTING CODE
4590  */
4591 
4592 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4593  *************ALTERNATE ROUTING CODE
4594  */
4595 
4596 struct mbuf *
4597 sctp_generate_invmanparam(int err)
4598 {
4599 	/* Return a MBUF with a invalid mandatory parameter */
4600 	struct mbuf *m;
4601 
4602 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4603 	if (m) {
4604 		struct sctp_paramhdr *ph;
4605 
4606 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4607 		ph = mtod(m, struct sctp_paramhdr *);
4608 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4609 		ph->param_type = htons(err);
4610 	}
4611 	return (m);
4612 }
4613 
4614 #ifdef SCTP_MBCNT_LOGGING
4615 void
4616 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4617     struct sctp_tmit_chunk *tp1, int chk_cnt)
4618 {
4619 	if (tp1->data == NULL) {
4620 		return;
4621 	}
4622 	asoc->chunks_on_out_queue -= chk_cnt;
4623 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4624 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4625 		    asoc->total_output_queue_size,
4626 		    tp1->book_size,
4627 		    0,
4628 		    tp1->mbcnt);
4629 	}
4630 	if (asoc->total_output_queue_size >= tp1->book_size) {
4631 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4632 	} else {
4633 		asoc->total_output_queue_size = 0;
4634 	}
4635 
4636 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4637 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4638 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4639 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4640 		} else {
4641 			stcb->sctp_socket->so_snd.sb_cc = 0;
4642 
4643 		}
4644 	}
4645 }
4646 
4647 #endif
4648 
4649 int
4650 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4651     int reason, int so_locked
4652 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4653     SCTP_UNUSED
4654 #endif
4655 )
4656 {
4657 	struct sctp_stream_out *strq;
4658 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4659 	struct sctp_stream_queue_pending *sp;
4660 	uint16_t stream = 0, seq = 0;
4661 	uint8_t foundeom = 0;
4662 	int ret_sz = 0;
4663 	int notdone;
4664 	int do_wakeup_routine = 0;
4665 
4666 	stream = tp1->rec.data.stream_number;
4667 	seq = tp1->rec.data.stream_seq;
4668 	do {
4669 		ret_sz += tp1->book_size;
4670 		if (tp1->data != NULL) {
4671 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4672 				sctp_flight_size_decrease(tp1);
4673 				sctp_total_flight_decrease(stcb, tp1);
4674 			}
4675 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4676 			stcb->asoc.peers_rwnd += tp1->send_size;
4677 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4678 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4679 			if (tp1->data) {
4680 				sctp_m_freem(tp1->data);
4681 				tp1->data = NULL;
4682 			}
4683 			do_wakeup_routine = 1;
4684 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4685 				stcb->asoc.sent_queue_cnt_removeable--;
4686 			}
4687 		}
4688 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4689 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4690 		    SCTP_DATA_NOT_FRAG) {
4691 			/* not frag'ed we ae done   */
4692 			notdone = 0;
4693 			foundeom = 1;
4694 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4695 			/* end of frag, we are done */
4696 			notdone = 0;
4697 			foundeom = 1;
4698 		} else {
4699 			/*
4700 			 * Its a begin or middle piece, we must mark all of
4701 			 * it
4702 			 */
4703 			notdone = 1;
4704 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4705 		}
4706 	} while (tp1 && notdone);
4707 	if (foundeom == 0) {
4708 		/*
4709 		 * The multi-part message was scattered across the send and
4710 		 * sent queue.
4711 		 */
4712 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4713 			if ((tp1->rec.data.stream_number != stream) ||
4714 			    (tp1->rec.data.stream_seq != seq)) {
4715 				break;
4716 			}
4717 			/*
4718 			 * save to chk in case we have some on stream out
4719 			 * queue. If so and we have an un-transmitted one we
4720 			 * don't have to fudge the TSN.
4721 			 */
4722 			chk = tp1;
4723 			ret_sz += tp1->book_size;
4724 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4725 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4726 			if (tp1->data) {
4727 				sctp_m_freem(tp1->data);
4728 				tp1->data = NULL;
4729 			}
4730 			/* No flight involved here book the size to 0 */
4731 			tp1->book_size = 0;
4732 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4733 				foundeom = 1;
4734 			}
4735 			do_wakeup_routine = 1;
4736 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4737 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4738 			/*
4739 			 * on to the sent queue so we can wait for it to be
4740 			 * passed by.
4741 			 */
4742 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4743 			    sctp_next);
4744 			stcb->asoc.send_queue_cnt--;
4745 			stcb->asoc.sent_queue_cnt++;
4746 		}
4747 	}
4748 	if (foundeom == 0) {
4749 		/*
4750 		 * Still no eom found. That means there is stuff left on the
4751 		 * stream out queue.. yuck.
4752 		 */
4753 		strq = &stcb->asoc.strmout[stream];
4754 		SCTP_TCB_SEND_LOCK(stcb);
4755 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4756 			/* FIXME: Shouldn't this be a serial number check? */
4757 			if (sp->strseq > seq) {
4758 				break;
4759 			}
4760 			/* Check if its our SEQ */
4761 			if (sp->strseq == seq) {
4762 				sp->discard_rest = 1;
4763 				/*
4764 				 * We may need to put a chunk on the queue
4765 				 * that holds the TSN that would have been
4766 				 * sent with the LAST bit.
4767 				 */
4768 				if (chk == NULL) {
4769 					/* Yep, we have to */
4770 					sctp_alloc_a_chunk(stcb, chk);
4771 					if (chk == NULL) {
4772 						/*
4773 						 * we are hosed. All we can
4774 						 * do is nothing.. which
4775 						 * will cause an abort if
4776 						 * the peer is paying
4777 						 * attention.
4778 						 */
4779 						goto oh_well;
4780 					}
4781 					memset(chk, 0, sizeof(*chk));
4782 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4783 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4784 					chk->asoc = &stcb->asoc;
4785 					chk->rec.data.stream_seq = sp->strseq;
4786 					chk->rec.data.stream_number = sp->stream;
4787 					chk->rec.data.payloadtype = sp->ppid;
4788 					chk->rec.data.context = sp->context;
4789 					chk->flags = sp->act_flags;
4790 					if (sp->net)
4791 						chk->whoTo = sp->net;
4792 					else
4793 						chk->whoTo = stcb->asoc.primary_destination;
4794 					atomic_add_int(&chk->whoTo->ref_count, 1);
4795 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4796 					stcb->asoc.pr_sctp_cnt++;
4797 					chk->pr_sctp_on = 1;
4798 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4799 					stcb->asoc.sent_queue_cnt++;
4800 					stcb->asoc.pr_sctp_cnt++;
4801 				} else {
4802 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4803 				}
4804 		oh_well:
4805 				if (sp->data) {
4806 					/*
4807 					 * Pull any data to free up the SB
4808 					 * and allow sender to "add more"
4809 					 * whilc we will throw away :-)
4810 					 */
4811 					sctp_free_spbufspace(stcb, &stcb->asoc,
4812 					    sp);
4813 					ret_sz += sp->length;
4814 					do_wakeup_routine = 1;
4815 					sp->some_taken = 1;
4816 					sctp_m_freem(sp->data);
4817 					sp->length = 0;
4818 					sp->data = NULL;
4819 					sp->tail_mbuf = NULL;
4820 				}
4821 				break;
4822 			}
4823 		}		/* End tailq_foreach */
4824 		SCTP_TCB_SEND_UNLOCK(stcb);
4825 	}
4826 	if (do_wakeup_routine) {
4827 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4828 		struct socket *so;
4829 
4830 		so = SCTP_INP_SO(stcb->sctp_ep);
4831 		if (!so_locked) {
4832 			atomic_add_int(&stcb->asoc.refcnt, 1);
4833 			SCTP_TCB_UNLOCK(stcb);
4834 			SCTP_SOCKET_LOCK(so, 1);
4835 			SCTP_TCB_LOCK(stcb);
4836 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4837 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4838 				/* assoc was freed while we were unlocked */
4839 				SCTP_SOCKET_UNLOCK(so, 1);
4840 				return (ret_sz);
4841 			}
4842 		}
4843 #endif
4844 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4845 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4846 		if (!so_locked) {
4847 			SCTP_SOCKET_UNLOCK(so, 1);
4848 		}
4849 #endif
4850 	}
4851 	return (ret_sz);
4852 }
4853 
4854 /*
4855  * checks to see if the given address, sa, is one that is currently known by
4856  * the kernel note: can't distinguish the same address on multiple interfaces
4857  * and doesn't handle multiple addresses with different zone/scope id's note:
4858  * ifa_ifwithaddr() compares the entire sockaddr struct
4859  */
4860 struct sctp_ifa *
4861 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4862     int holds_lock)
4863 {
4864 	struct sctp_laddr *laddr;
4865 
4866 	if (holds_lock == 0) {
4867 		SCTP_INP_RLOCK(inp);
4868 	}
4869 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4870 		if (laddr->ifa == NULL)
4871 			continue;
4872 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4873 			continue;
4874 #ifdef INET
4875 		if (addr->sa_family == AF_INET) {
4876 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4877 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4878 				/* found him. */
4879 				if (holds_lock == 0) {
4880 					SCTP_INP_RUNLOCK(inp);
4881 				}
4882 				return (laddr->ifa);
4883 				break;
4884 			}
4885 		}
4886 #endif
4887 #ifdef INET6
4888 		if (addr->sa_family == AF_INET6) {
4889 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4890 			    &laddr->ifa->address.sin6)) {
4891 				/* found him. */
4892 				if (holds_lock == 0) {
4893 					SCTP_INP_RUNLOCK(inp);
4894 				}
4895 				return (laddr->ifa);
4896 				break;
4897 			}
4898 		}
4899 #endif
4900 	}
4901 	if (holds_lock == 0) {
4902 		SCTP_INP_RUNLOCK(inp);
4903 	}
4904 	return (NULL);
4905 }
4906 
4907 uint32_t
4908 sctp_get_ifa_hash_val(struct sockaddr *addr)
4909 {
4910 	switch (addr->sa_family) {
4911 #ifdef INET
4912 	case AF_INET:
4913 		{
4914 			struct sockaddr_in *sin;
4915 
4916 			sin = (struct sockaddr_in *)addr;
4917 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4918 		}
4919 #endif
4920 #ifdef INET6
4921 	case INET6:
4922 		{
4923 			struct sockaddr_in6 *sin6;
4924 			uint32_t hash_of_addr;
4925 
4926 			sin6 = (struct sockaddr_in6 *)addr;
4927 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4928 			    sin6->sin6_addr.s6_addr32[1] +
4929 			    sin6->sin6_addr.s6_addr32[2] +
4930 			    sin6->sin6_addr.s6_addr32[3]);
4931 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4932 			return (hash_of_addr);
4933 		}
4934 #endif
4935 	default:
4936 		break;
4937 	}
4938 	return (0);
4939 }
4940 
4941 struct sctp_ifa *
4942 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4943 {
4944 	struct sctp_ifa *sctp_ifap;
4945 	struct sctp_vrf *vrf;
4946 	struct sctp_ifalist *hash_head;
4947 	uint32_t hash_of_addr;
4948 
4949 	if (holds_lock == 0)
4950 		SCTP_IPI_ADDR_RLOCK();
4951 
4952 	vrf = sctp_find_vrf(vrf_id);
4953 	if (vrf == NULL) {
4954 stage_right:
4955 		if (holds_lock == 0)
4956 			SCTP_IPI_ADDR_RUNLOCK();
4957 		return (NULL);
4958 	}
4959 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4960 
4961 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4962 	if (hash_head == NULL) {
4963 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4964 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4965 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4966 		sctp_print_address(addr);
4967 		SCTP_PRINTF("No such bucket for address\n");
4968 		if (holds_lock == 0)
4969 			SCTP_IPI_ADDR_RUNLOCK();
4970 
4971 		return (NULL);
4972 	}
4973 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4974 		if (sctp_ifap == NULL) {
4975 #ifdef INVARIANTS
4976 			panic("Huh LIST_FOREACH corrupt");
4977 			goto stage_right;
4978 #else
4979 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4980 			goto stage_right;
4981 #endif
4982 		}
4983 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4984 			continue;
4985 #ifdef INET
4986 		if (addr->sa_family == AF_INET) {
4987 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4988 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4989 				/* found him. */
4990 				if (holds_lock == 0)
4991 					SCTP_IPI_ADDR_RUNLOCK();
4992 				return (sctp_ifap);
4993 				break;
4994 			}
4995 		}
4996 #endif
4997 #ifdef INET6
4998 		if (addr->sa_family == AF_INET6) {
4999 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5000 			    &sctp_ifap->address.sin6)) {
5001 				/* found him. */
5002 				if (holds_lock == 0)
5003 					SCTP_IPI_ADDR_RUNLOCK();
5004 				return (sctp_ifap);
5005 				break;
5006 			}
5007 		}
5008 #endif
5009 	}
5010 	if (holds_lock == 0)
5011 		SCTP_IPI_ADDR_RUNLOCK();
5012 	return (NULL);
5013 }
5014 
5015 static void
5016 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5017     uint32_t rwnd_req)
5018 {
5019 	/* User pulled some data, do we need a rwnd update? */
5020 	int r_unlocked = 0;
5021 	uint32_t dif, rwnd;
5022 	struct socket *so = NULL;
5023 
5024 	if (stcb == NULL)
5025 		return;
5026 
5027 	atomic_add_int(&stcb->asoc.refcnt, 1);
5028 
5029 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5030 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5031 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5032 		/* Pre-check If we are freeing no update */
5033 		goto no_lock;
5034 	}
5035 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5036 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5037 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5038 		goto out;
5039 	}
5040 	so = stcb->sctp_socket;
5041 	if (so == NULL) {
5042 		goto out;
5043 	}
5044 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5045 	/* Have you have freed enough to look */
5046 	*freed_so_far = 0;
5047 	/* Yep, its worth a look and the lock overhead */
5048 
5049 	/* Figure out what the rwnd would be */
5050 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5051 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5052 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5053 	} else {
5054 		dif = 0;
5055 	}
5056 	if (dif >= rwnd_req) {
5057 		if (hold_rlock) {
5058 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5059 			r_unlocked = 1;
5060 		}
5061 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5062 			/*
5063 			 * One last check before we allow the guy possibly
5064 			 * to get in. There is a race, where the guy has not
5065 			 * reached the gate. In that case
5066 			 */
5067 			goto out;
5068 		}
5069 		SCTP_TCB_LOCK(stcb);
5070 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5071 			/* No reports here */
5072 			SCTP_TCB_UNLOCK(stcb);
5073 			goto out;
5074 		}
5075 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5076 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5077 
5078 		sctp_chunk_output(stcb->sctp_ep, stcb,
5079 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5080 		/* make sure no timer is running */
5081 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5082 		SCTP_TCB_UNLOCK(stcb);
5083 	} else {
5084 		/* Update how much we have pending */
5085 		stcb->freed_by_sorcv_sincelast = dif;
5086 	}
5087 out:
5088 	if (so && r_unlocked && hold_rlock) {
5089 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5090 	}
5091 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5092 no_lock:
5093 	atomic_add_int(&stcb->asoc.refcnt, -1);
5094 	return;
5095 }
5096 
5097 int
5098 sctp_sorecvmsg(struct socket *so,
5099     struct uio *uio,
5100     struct mbuf **mp,
5101     struct sockaddr *from,
5102     int fromlen,
5103     int *msg_flags,
5104     struct sctp_sndrcvinfo *sinfo,
5105     int filling_sinfo)
5106 {
5107 	/*
5108 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5109 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5110 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5111 	 * On the way out we may send out any combination of:
5112 	 * MSG_NOTIFICATION MSG_EOR
5113 	 *
5114 	 */
5115 	struct sctp_inpcb *inp = NULL;
5116 	int my_len = 0;
5117 	int cp_len = 0, error = 0;
5118 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5119 	struct mbuf *m = NULL;
5120 	struct sctp_tcb *stcb = NULL;
5121 	int wakeup_read_socket = 0;
5122 	int freecnt_applied = 0;
5123 	int out_flags = 0, in_flags = 0;
5124 	int block_allowed = 1;
5125 	uint32_t freed_so_far = 0;
5126 	uint32_t copied_so_far = 0;
5127 	int in_eeor_mode = 0;
5128 	int no_rcv_needed = 0;
5129 	uint32_t rwnd_req = 0;
5130 	int hold_sblock = 0;
5131 	int hold_rlock = 0;
5132 	int slen = 0;
5133 	uint32_t held_length = 0;
5134 	int sockbuf_lock = 0;
5135 
5136 	if (uio == NULL) {
5137 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5138 		return (EINVAL);
5139 	}
5140 	if (msg_flags) {
5141 		in_flags = *msg_flags;
5142 		if (in_flags & MSG_PEEK)
5143 			SCTP_STAT_INCR(sctps_read_peeks);
5144 	} else {
5145 		in_flags = 0;
5146 	}
5147 	slen = uio->uio_resid;
5148 
5149 	/* Pull in and set up our int flags */
5150 	if (in_flags & MSG_OOB) {
5151 		/* Out of band's NOT supported */
5152 		return (EOPNOTSUPP);
5153 	}
5154 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5155 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5156 		return (EINVAL);
5157 	}
5158 	if ((in_flags & (MSG_DONTWAIT
5159 	    | MSG_NBIO
5160 	    )) ||
5161 	    SCTP_SO_IS_NBIO(so)) {
5162 		block_allowed = 0;
5163 	}
5164 	/* setup the endpoint */
5165 	inp = (struct sctp_inpcb *)so->so_pcb;
5166 	if (inp == NULL) {
5167 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5168 		return (EFAULT);
5169 	}
5170 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5171 	/* Must be at least a MTU's worth */
5172 	if (rwnd_req < SCTP_MIN_RWND)
5173 		rwnd_req = SCTP_MIN_RWND;
5174 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5175 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5176 		sctp_misc_ints(SCTP_SORECV_ENTER,
5177 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5178 	}
5179 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5180 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5181 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5182 	}
5183 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5184 	sockbuf_lock = 1;
5185 	if (error) {
5186 		goto release_unlocked;
5187 	}
5188 restart:
5189 
5190 
5191 restart_nosblocks:
5192 	if (hold_sblock == 0) {
5193 		SOCKBUF_LOCK(&so->so_rcv);
5194 		hold_sblock = 1;
5195 	}
5196 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5197 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5198 		goto out;
5199 	}
5200 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5201 		if (so->so_error) {
5202 			error = so->so_error;
5203 			if ((in_flags & MSG_PEEK) == 0)
5204 				so->so_error = 0;
5205 			goto out;
5206 		} else {
5207 			if (so->so_rcv.sb_cc == 0) {
5208 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5209 				/* indicate EOF */
5210 				error = 0;
5211 				goto out;
5212 			}
5213 		}
5214 	}
5215 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5216 		/* we need to wait for data */
5217 		if ((so->so_rcv.sb_cc == 0) &&
5218 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5219 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5220 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5221 				/*
5222 				 * For active open side clear flags for
5223 				 * re-use passive open is blocked by
5224 				 * connect.
5225 				 */
5226 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5227 					/*
5228 					 * You were aborted, passive side
5229 					 * always hits here
5230 					 */
5231 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5232 					error = ECONNRESET;
5233 				}
5234 				so->so_state &= ~(SS_ISCONNECTING |
5235 				    SS_ISDISCONNECTING |
5236 				    SS_ISCONFIRMING |
5237 				    SS_ISCONNECTED);
5238 				if (error == 0) {
5239 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5240 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5241 						error = ENOTCONN;
5242 					}
5243 				}
5244 				goto out;
5245 			}
5246 		}
5247 		error = sbwait(&so->so_rcv);
5248 		if (error) {
5249 			goto out;
5250 		}
5251 		held_length = 0;
5252 		goto restart_nosblocks;
5253 	} else if (so->so_rcv.sb_cc == 0) {
5254 		if (so->so_error) {
5255 			error = so->so_error;
5256 			if ((in_flags & MSG_PEEK) == 0)
5257 				so->so_error = 0;
5258 		} else {
5259 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5260 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5261 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5262 					/*
5263 					 * For active open side clear flags
5264 					 * for re-use passive open is
5265 					 * blocked by connect.
5266 					 */
5267 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5268 						/*
5269 						 * You were aborted, passive
5270 						 * side always hits here
5271 						 */
5272 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5273 						error = ECONNRESET;
5274 					}
5275 					so->so_state &= ~(SS_ISCONNECTING |
5276 					    SS_ISDISCONNECTING |
5277 					    SS_ISCONFIRMING |
5278 					    SS_ISCONNECTED);
5279 					if (error == 0) {
5280 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5281 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5282 							error = ENOTCONN;
5283 						}
5284 					}
5285 					goto out;
5286 				}
5287 			}
5288 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5289 			error = EWOULDBLOCK;
5290 		}
5291 		goto out;
5292 	}
5293 	if (hold_sblock == 1) {
5294 		SOCKBUF_UNLOCK(&so->so_rcv);
5295 		hold_sblock = 0;
5296 	}
5297 	/* we possibly have data we can read */
5298 	/* sa_ignore FREED_MEMORY */
5299 	control = TAILQ_FIRST(&inp->read_queue);
5300 	if (control == NULL) {
5301 		/*
5302 		 * This could be happening since the appender did the
5303 		 * increment but as not yet did the tailq insert onto the
5304 		 * read_queue
5305 		 */
5306 		if (hold_rlock == 0) {
5307 			SCTP_INP_READ_LOCK(inp);
5308 		}
5309 		control = TAILQ_FIRST(&inp->read_queue);
5310 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5311 #ifdef INVARIANTS
5312 			panic("Huh, its non zero and nothing on control?");
5313 #endif
5314 			so->so_rcv.sb_cc = 0;
5315 		}
5316 		SCTP_INP_READ_UNLOCK(inp);
5317 		hold_rlock = 0;
5318 		goto restart;
5319 	}
5320 	if ((control->length == 0) &&
5321 	    (control->do_not_ref_stcb)) {
5322 		/*
5323 		 * Clean up code for freeing assoc that left behind a
5324 		 * pdapi.. maybe a peer in EEOR that just closed after
5325 		 * sending and never indicated a EOR.
5326 		 */
5327 		if (hold_rlock == 0) {
5328 			hold_rlock = 1;
5329 			SCTP_INP_READ_LOCK(inp);
5330 		}
5331 		control->held_length = 0;
5332 		if (control->data) {
5333 			/* Hmm there is data here .. fix */
5334 			struct mbuf *m_tmp;
5335 			int cnt = 0;
5336 
5337 			m_tmp = control->data;
5338 			while (m_tmp) {
5339 				cnt += SCTP_BUF_LEN(m_tmp);
5340 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5341 					control->tail_mbuf = m_tmp;
5342 					control->end_added = 1;
5343 				}
5344 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5345 			}
5346 			control->length = cnt;
5347 		} else {
5348 			/* remove it */
5349 			TAILQ_REMOVE(&inp->read_queue, control, next);
5350 			/* Add back any hiddend data */
5351 			sctp_free_remote_addr(control->whoFrom);
5352 			sctp_free_a_readq(stcb, control);
5353 		}
5354 		if (hold_rlock) {
5355 			hold_rlock = 0;
5356 			SCTP_INP_READ_UNLOCK(inp);
5357 		}
5358 		goto restart;
5359 	}
5360 	if ((control->length == 0) &&
5361 	    (control->end_added == 1)) {
5362 		/*
5363 		 * Do we also need to check for (control->pdapi_aborted ==
5364 		 * 1)?
5365 		 */
5366 		if (hold_rlock == 0) {
5367 			hold_rlock = 1;
5368 			SCTP_INP_READ_LOCK(inp);
5369 		}
5370 		TAILQ_REMOVE(&inp->read_queue, control, next);
5371 		if (control->data) {
5372 #ifdef INVARIANTS
5373 			panic("control->data not null but control->length == 0");
5374 #else
5375 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5376 			sctp_m_freem(control->data);
5377 			control->data = NULL;
5378 #endif
5379 		}
5380 		if (control->aux_data) {
5381 			sctp_m_free(control->aux_data);
5382 			control->aux_data = NULL;
5383 		}
5384 		sctp_free_remote_addr(control->whoFrom);
5385 		sctp_free_a_readq(stcb, control);
5386 		if (hold_rlock) {
5387 			hold_rlock = 0;
5388 			SCTP_INP_READ_UNLOCK(inp);
5389 		}
5390 		goto restart;
5391 	}
5392 	if (control->length == 0) {
5393 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5394 		    (filling_sinfo)) {
5395 			/* find a more suitable one then this */
5396 			ctl = TAILQ_NEXT(control, next);
5397 			while (ctl) {
5398 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5399 				    (ctl->some_taken ||
5400 				    (ctl->spec_flags & M_NOTIFICATION) ||
5401 				    ((ctl->do_not_ref_stcb == 0) &&
5402 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5403 				    ) {
5404 					/*-
5405 					 * If we have a different TCB next, and there is data
5406 					 * present. If we have already taken some (pdapi), OR we can
5407 					 * ref the tcb and no delivery as started on this stream, we
5408 					 * take it. Note we allow a notification on a different
5409 					 * assoc to be delivered..
5410 					 */
5411 					control = ctl;
5412 					goto found_one;
5413 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5414 					    (ctl->length) &&
5415 					    ((ctl->some_taken) ||
5416 					    ((ctl->do_not_ref_stcb == 0) &&
5417 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5418 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5419 					/*-
5420 					 * If we have the same tcb, and there is data present, and we
5421 					 * have the strm interleave feature present. Then if we have
5422 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5423 					 * not started a delivery for this stream, we can take it.
5424 					 * Note we do NOT allow a notificaiton on the same assoc to
5425 					 * be delivered.
5426 					 */
5427 					control = ctl;
5428 					goto found_one;
5429 				}
5430 				ctl = TAILQ_NEXT(ctl, next);
5431 			}
5432 		}
5433 		/*
5434 		 * if we reach here, not suitable replacement is available
5435 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5436 		 * into the our held count, and its time to sleep again.
5437 		 */
5438 		held_length = so->so_rcv.sb_cc;
5439 		control->held_length = so->so_rcv.sb_cc;
5440 		goto restart;
5441 	}
5442 	/* Clear the held length since there is something to read */
5443 	control->held_length = 0;
5444 	if (hold_rlock) {
5445 		SCTP_INP_READ_UNLOCK(inp);
5446 		hold_rlock = 0;
5447 	}
5448 found_one:
5449 	/*
5450 	 * If we reach here, control has a some data for us to read off.
5451 	 * Note that stcb COULD be NULL.
5452 	 */
5453 	control->some_taken++;
5454 	if (hold_sblock) {
5455 		SOCKBUF_UNLOCK(&so->so_rcv);
5456 		hold_sblock = 0;
5457 	}
5458 	stcb = control->stcb;
5459 	if (stcb) {
5460 		if ((control->do_not_ref_stcb == 0) &&
5461 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5462 			if (freecnt_applied == 0)
5463 				stcb = NULL;
5464 		} else if (control->do_not_ref_stcb == 0) {
5465 			/* you can't free it on me please */
5466 			/*
5467 			 * The lock on the socket buffer protects us so the
5468 			 * free code will stop. But since we used the
5469 			 * socketbuf lock and the sender uses the tcb_lock
5470 			 * to increment, we need to use the atomic add to
5471 			 * the refcnt
5472 			 */
5473 			if (freecnt_applied) {
5474 #ifdef INVARIANTS
5475 				panic("refcnt already incremented");
5476 #else
5477 				SCTP_PRINTF("refcnt already incremented?\n");
5478 #endif
5479 			} else {
5480 				atomic_add_int(&stcb->asoc.refcnt, 1);
5481 				freecnt_applied = 1;
5482 			}
5483 			/*
5484 			 * Setup to remember how much we have not yet told
5485 			 * the peer our rwnd has opened up. Note we grab the
5486 			 * value from the tcb from last time. Note too that
5487 			 * sack sending clears this when a sack is sent,
5488 			 * which is fine. Once we hit the rwnd_req, we then
5489 			 * will go to the sctp_user_rcvd() that will not
5490 			 * lock until it KNOWs it MUST send a WUP-SACK.
5491 			 */
5492 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5493 			stcb->freed_by_sorcv_sincelast = 0;
5494 		}
5495 	}
5496 	if (stcb &&
5497 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5498 	    control->do_not_ref_stcb == 0) {
5499 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5500 	}
5501 	/* First lets get off the sinfo and sockaddr info */
5502 	if ((sinfo) && filling_sinfo) {
5503 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5504 		nxt = TAILQ_NEXT(control, next);
5505 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5506 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5507 			struct sctp_extrcvinfo *s_extra;
5508 
5509 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5510 			if ((nxt) &&
5511 			    (nxt->length)) {
5512 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5513 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5514 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5515 				}
5516 				if (nxt->spec_flags & M_NOTIFICATION) {
5517 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5518 				}
5519 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5520 				s_extra->sreinfo_next_length = nxt->length;
5521 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5522 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5523 				if (nxt->tail_mbuf != NULL) {
5524 					if (nxt->end_added) {
5525 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5526 					}
5527 				}
5528 			} else {
5529 				/*
5530 				 * we explicitly 0 this, since the memcpy
5531 				 * got some other things beyond the older
5532 				 * sinfo_ that is on the control's structure
5533 				 * :-D
5534 				 */
5535 				nxt = NULL;
5536 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5537 				s_extra->sreinfo_next_aid = 0;
5538 				s_extra->sreinfo_next_length = 0;
5539 				s_extra->sreinfo_next_ppid = 0;
5540 				s_extra->sreinfo_next_stream = 0;
5541 			}
5542 		}
5543 		/*
5544 		 * update off the real current cum-ack, if we have an stcb.
5545 		 */
5546 		if ((control->do_not_ref_stcb == 0) && stcb)
5547 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5548 		/*
5549 		 * mask off the high bits, we keep the actual chunk bits in
5550 		 * there.
5551 		 */
5552 		sinfo->sinfo_flags &= 0x00ff;
5553 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5554 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5555 		}
5556 	}
5557 #ifdef SCTP_ASOCLOG_OF_TSNS
5558 	{
5559 		int index, newindex;
5560 		struct sctp_pcbtsn_rlog *entry;
5561 
5562 		do {
5563 			index = inp->readlog_index;
5564 			newindex = index + 1;
5565 			if (newindex >= SCTP_READ_LOG_SIZE) {
5566 				newindex = 0;
5567 			}
5568 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5569 		entry = &inp->readlog[index];
5570 		entry->vtag = control->sinfo_assoc_id;
5571 		entry->strm = control->sinfo_stream;
5572 		entry->seq = control->sinfo_ssn;
5573 		entry->sz = control->length;
5574 		entry->flgs = control->sinfo_flags;
5575 	}
5576 #endif
5577 	if (fromlen && from) {
5578 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
5579 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5580 #ifdef INET6
5581 		case AF_INET6:
5582 			((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5583 			break;
5584 #endif
5585 #ifdef INET
5586 		case AF_INET:
5587 			((struct sockaddr_in *)from)->sin_port = control->port_from;
5588 			break;
5589 #endif
5590 		default:
5591 			break;
5592 		}
5593 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5594 
5595 #if defined(INET) && defined(INET6)
5596 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5597 		    (from->sa_family == AF_INET) &&
5598 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5599 			struct sockaddr_in *sin;
5600 			struct sockaddr_in6 sin6;
5601 
5602 			sin = (struct sockaddr_in *)from;
5603 			bzero(&sin6, sizeof(sin6));
5604 			sin6.sin6_family = AF_INET6;
5605 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5606 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5607 			bcopy(&sin->sin_addr,
5608 			    &sin6.sin6_addr.s6_addr32[3],
5609 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5610 			sin6.sin6_port = sin->sin_port;
5611 			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
5612 		}
5613 #endif
5614 #if defined(INET6)
5615 		{
5616 			struct sockaddr_in6 lsa6, *from6;
5617 
5618 			from6 = (struct sockaddr_in6 *)from;
5619 			sctp_recover_scope_mac(from6, (&lsa6));
5620 		}
5621 #endif
5622 	}
5623 	/* now copy out what data we can */
5624 	if (mp == NULL) {
5625 		/* copy out each mbuf in the chain up to length */
5626 get_more_data:
5627 		m = control->data;
5628 		while (m) {
5629 			/* Move out all we can */
5630 			cp_len = (int)uio->uio_resid;
5631 			my_len = (int)SCTP_BUF_LEN(m);
5632 			if (cp_len > my_len) {
5633 				/* not enough in this buf */
5634 				cp_len = my_len;
5635 			}
5636 			if (hold_rlock) {
5637 				SCTP_INP_READ_UNLOCK(inp);
5638 				hold_rlock = 0;
5639 			}
5640 			if (cp_len > 0)
5641 				error = uiomove(mtod(m, char *), cp_len, uio);
5642 			/* re-read */
5643 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5644 				goto release;
5645 			}
5646 			if ((control->do_not_ref_stcb == 0) && stcb &&
5647 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5648 				no_rcv_needed = 1;
5649 			}
5650 			if (error) {
5651 				/* error we are out of here */
5652 				goto release;
5653 			}
5654 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5655 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5656 			    ((control->end_added == 0) ||
5657 			    (control->end_added &&
5658 			    (TAILQ_NEXT(control, next) == NULL)))
5659 			    ) {
5660 				SCTP_INP_READ_LOCK(inp);
5661 				hold_rlock = 1;
5662 			}
5663 			if (cp_len == SCTP_BUF_LEN(m)) {
5664 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5665 				    (control->end_added)) {
5666 					out_flags |= MSG_EOR;
5667 					if ((control->do_not_ref_stcb == 0) &&
5668 					    (control->stcb != NULL) &&
5669 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5670 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5671 				}
5672 				if (control->spec_flags & M_NOTIFICATION) {
5673 					out_flags |= MSG_NOTIFICATION;
5674 				}
5675 				/* we ate up the mbuf */
5676 				if (in_flags & MSG_PEEK) {
5677 					/* just looking */
5678 					m = SCTP_BUF_NEXT(m);
5679 					copied_so_far += cp_len;
5680 				} else {
5681 					/* dispose of the mbuf */
5682 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5683 						sctp_sblog(&so->so_rcv,
5684 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5685 					}
5686 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5687 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5688 						sctp_sblog(&so->so_rcv,
5689 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5690 					}
5691 					copied_so_far += cp_len;
5692 					freed_so_far += cp_len;
5693 					freed_so_far += MSIZE;
5694 					atomic_subtract_int(&control->length, cp_len);
5695 					control->data = sctp_m_free(m);
5696 					m = control->data;
5697 					/*
5698 					 * been through it all, must hold sb
5699 					 * lock ok to null tail
5700 					 */
5701 					if (control->data == NULL) {
5702 #ifdef INVARIANTS
5703 						if ((control->end_added == 0) ||
5704 						    (TAILQ_NEXT(control, next) == NULL)) {
5705 							/*
5706 							 * If the end is not
5707 							 * added, OR the
5708 							 * next is NOT null
5709 							 * we MUST have the
5710 							 * lock.
5711 							 */
5712 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5713 								panic("Hmm we don't own the lock?");
5714 							}
5715 						}
5716 #endif
5717 						control->tail_mbuf = NULL;
5718 #ifdef INVARIANTS
5719 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5720 							panic("end_added, nothing left and no MSG_EOR");
5721 						}
5722 #endif
5723 					}
5724 				}
5725 			} else {
5726 				/* Do we need to trim the mbuf? */
5727 				if (control->spec_flags & M_NOTIFICATION) {
5728 					out_flags |= MSG_NOTIFICATION;
5729 				}
5730 				if ((in_flags & MSG_PEEK) == 0) {
5731 					SCTP_BUF_RESV_UF(m, cp_len);
5732 					SCTP_BUF_LEN(m) -= cp_len;
5733 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5734 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5735 					}
5736 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5737 					if ((control->do_not_ref_stcb == 0) &&
5738 					    stcb) {
5739 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5740 					}
5741 					copied_so_far += cp_len;
5742 					freed_so_far += cp_len;
5743 					freed_so_far += MSIZE;
5744 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5745 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5746 						    SCTP_LOG_SBRESULT, 0);
5747 					}
5748 					atomic_subtract_int(&control->length, cp_len);
5749 				} else {
5750 					copied_so_far += cp_len;
5751 				}
5752 			}
5753 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5754 				break;
5755 			}
5756 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5757 			    (control->do_not_ref_stcb == 0) &&
5758 			    (freed_so_far >= rwnd_req)) {
5759 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5760 			}
5761 		}		/* end while(m) */
5762 		/*
5763 		 * At this point we have looked at it all and we either have
5764 		 * a MSG_EOR/or read all the user wants... <OR>
5765 		 * control->length == 0.
5766 		 */
5767 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5768 			/* we are done with this control */
5769 			if (control->length == 0) {
5770 				if (control->data) {
5771 #ifdef INVARIANTS
5772 					panic("control->data not null at read eor?");
5773 #else
5774 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5775 					sctp_m_freem(control->data);
5776 					control->data = NULL;
5777 #endif
5778 				}
5779 		done_with_control:
5780 				if (TAILQ_NEXT(control, next) == NULL) {
5781 					/*
5782 					 * If we don't have a next we need a
5783 					 * lock, if there is a next
5784 					 * interrupt is filling ahead of us
5785 					 * and we don't need a lock to
5786 					 * remove this guy (which is the
5787 					 * head of the queue).
5788 					 */
5789 					if (hold_rlock == 0) {
5790 						SCTP_INP_READ_LOCK(inp);
5791 						hold_rlock = 1;
5792 					}
5793 				}
5794 				TAILQ_REMOVE(&inp->read_queue, control, next);
5795 				/* Add back any hiddend data */
5796 				if (control->held_length) {
5797 					held_length = 0;
5798 					control->held_length = 0;
5799 					wakeup_read_socket = 1;
5800 				}
5801 				if (control->aux_data) {
5802 					sctp_m_free(control->aux_data);
5803 					control->aux_data = NULL;
5804 				}
5805 				no_rcv_needed = control->do_not_ref_stcb;
5806 				sctp_free_remote_addr(control->whoFrom);
5807 				control->data = NULL;
5808 				sctp_free_a_readq(stcb, control);
5809 				control = NULL;
5810 				if ((freed_so_far >= rwnd_req) &&
5811 				    (no_rcv_needed == 0))
5812 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5813 
5814 			} else {
5815 				/*
5816 				 * The user did not read all of this
5817 				 * message, turn off the returned MSG_EOR
5818 				 * since we are leaving more behind on the
5819 				 * control to read.
5820 				 */
5821 #ifdef INVARIANTS
5822 				if (control->end_added &&
5823 				    (control->data == NULL) &&
5824 				    (control->tail_mbuf == NULL)) {
5825 					panic("Gak, control->length is corrupt?");
5826 				}
5827 #endif
5828 				no_rcv_needed = control->do_not_ref_stcb;
5829 				out_flags &= ~MSG_EOR;
5830 			}
5831 		}
5832 		if (out_flags & MSG_EOR) {
5833 			goto release;
5834 		}
5835 		if ((uio->uio_resid == 0) ||
5836 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5837 		    ) {
5838 			goto release;
5839 		}
5840 		/*
5841 		 * If I hit here the receiver wants more and this message is
5842 		 * NOT done (pd-api). So two questions. Can we block? if not
5843 		 * we are done. Did the user NOT set MSG_WAITALL?
5844 		 */
5845 		if (block_allowed == 0) {
5846 			goto release;
5847 		}
5848 		/*
5849 		 * We need to wait for more data a few things: - We don't
5850 		 * sbunlock() so we don't get someone else reading. - We
5851 		 * must be sure to account for the case where what is added
5852 		 * is NOT to our control when we wakeup.
5853 		 */
5854 
5855 		/*
5856 		 * Do we need to tell the transport a rwnd update might be
5857 		 * needed before we go to sleep?
5858 		 */
5859 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5860 		    ((freed_so_far >= rwnd_req) &&
5861 		    (control->do_not_ref_stcb == 0) &&
5862 		    (no_rcv_needed == 0))) {
5863 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5864 		}
5865 wait_some_more:
5866 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5867 			goto release;
5868 		}
5869 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5870 			goto release;
5871 
5872 		if (hold_rlock == 1) {
5873 			SCTP_INP_READ_UNLOCK(inp);
5874 			hold_rlock = 0;
5875 		}
5876 		if (hold_sblock == 0) {
5877 			SOCKBUF_LOCK(&so->so_rcv);
5878 			hold_sblock = 1;
5879 		}
5880 		if ((copied_so_far) && (control->length == 0) &&
5881 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5882 			goto release;
5883 		}
5884 		if (so->so_rcv.sb_cc <= control->held_length) {
5885 			error = sbwait(&so->so_rcv);
5886 			if (error) {
5887 				goto release;
5888 			}
5889 			control->held_length = 0;
5890 		}
5891 		if (hold_sblock) {
5892 			SOCKBUF_UNLOCK(&so->so_rcv);
5893 			hold_sblock = 0;
5894 		}
5895 		if (control->length == 0) {
5896 			/* still nothing here */
5897 			if (control->end_added == 1) {
5898 				/* he aborted, or is done i.e.did a shutdown */
5899 				out_flags |= MSG_EOR;
5900 				if (control->pdapi_aborted) {
5901 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5902 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5903 
5904 					out_flags |= MSG_TRUNC;
5905 				} else {
5906 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5907 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5908 				}
5909 				goto done_with_control;
5910 			}
5911 			if (so->so_rcv.sb_cc > held_length) {
5912 				control->held_length = so->so_rcv.sb_cc;
5913 				held_length = 0;
5914 			}
5915 			goto wait_some_more;
5916 		} else if (control->data == NULL) {
5917 			/*
5918 			 * we must re-sync since data is probably being
5919 			 * added
5920 			 */
5921 			SCTP_INP_READ_LOCK(inp);
5922 			if ((control->length > 0) && (control->data == NULL)) {
5923 				/*
5924 				 * big trouble.. we have the lock and its
5925 				 * corrupt?
5926 				 */
5927 #ifdef INVARIANTS
5928 				panic("Impossible data==NULL length !=0");
5929 #endif
5930 				out_flags |= MSG_EOR;
5931 				out_flags |= MSG_TRUNC;
5932 				control->length = 0;
5933 				SCTP_INP_READ_UNLOCK(inp);
5934 				goto done_with_control;
5935 			}
5936 			SCTP_INP_READ_UNLOCK(inp);
5937 			/* We will fall around to get more data */
5938 		}
5939 		goto get_more_data;
5940 	} else {
5941 		/*-
5942 		 * Give caller back the mbuf chain,
5943 		 * store in uio_resid the length
5944 		 */
5945 		wakeup_read_socket = 0;
5946 		if ((control->end_added == 0) ||
5947 		    (TAILQ_NEXT(control, next) == NULL)) {
5948 			/* Need to get rlock */
5949 			if (hold_rlock == 0) {
5950 				SCTP_INP_READ_LOCK(inp);
5951 				hold_rlock = 1;
5952 			}
5953 		}
5954 		if (control->end_added) {
5955 			out_flags |= MSG_EOR;
5956 			if ((control->do_not_ref_stcb == 0) &&
5957 			    (control->stcb != NULL) &&
5958 			    ((control->spec_flags & M_NOTIFICATION) == 0))
5959 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5960 		}
5961 		if (control->spec_flags & M_NOTIFICATION) {
5962 			out_flags |= MSG_NOTIFICATION;
5963 		}
5964 		uio->uio_resid = control->length;
5965 		*mp = control->data;
5966 		m = control->data;
5967 		while (m) {
5968 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5969 				sctp_sblog(&so->so_rcv,
5970 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5971 			}
5972 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5973 			freed_so_far += SCTP_BUF_LEN(m);
5974 			freed_so_far += MSIZE;
5975 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5976 				sctp_sblog(&so->so_rcv,
5977 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5978 			}
5979 			m = SCTP_BUF_NEXT(m);
5980 		}
5981 		control->data = control->tail_mbuf = NULL;
5982 		control->length = 0;
5983 		if (out_flags & MSG_EOR) {
5984 			/* Done with this control */
5985 			goto done_with_control;
5986 		}
5987 	}
5988 release:
5989 	if (hold_rlock == 1) {
5990 		SCTP_INP_READ_UNLOCK(inp);
5991 		hold_rlock = 0;
5992 	}
5993 	if (hold_sblock == 1) {
5994 		SOCKBUF_UNLOCK(&so->so_rcv);
5995 		hold_sblock = 0;
5996 	}
5997 	sbunlock(&so->so_rcv);
5998 	sockbuf_lock = 0;
5999 
6000 release_unlocked:
6001 	if (hold_sblock) {
6002 		SOCKBUF_UNLOCK(&so->so_rcv);
6003 		hold_sblock = 0;
6004 	}
6005 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6006 		if ((freed_so_far >= rwnd_req) &&
6007 		    (control && (control->do_not_ref_stcb == 0)) &&
6008 		    (no_rcv_needed == 0))
6009 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6010 	}
6011 out:
6012 	if (msg_flags) {
6013 		*msg_flags = out_flags;
6014 	}
6015 	if (((out_flags & MSG_EOR) == 0) &&
6016 	    ((in_flags & MSG_PEEK) == 0) &&
6017 	    (sinfo) &&
6018 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6019 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6020 		struct sctp_extrcvinfo *s_extra;
6021 
6022 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6023 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6024 	}
6025 	if (hold_rlock == 1) {
6026 		SCTP_INP_READ_UNLOCK(inp);
6027 	}
6028 	if (hold_sblock) {
6029 		SOCKBUF_UNLOCK(&so->so_rcv);
6030 	}
6031 	if (sockbuf_lock) {
6032 		sbunlock(&so->so_rcv);
6033 	}
6034 	if (freecnt_applied) {
6035 		/*
6036 		 * The lock on the socket buffer protects us so the free
6037 		 * code will stop. But since we used the socketbuf lock and
6038 		 * the sender uses the tcb_lock to increment, we need to use
6039 		 * the atomic add to the refcnt.
6040 		 */
6041 		if (stcb == NULL) {
6042 #ifdef INVARIANTS
6043 			panic("stcb for refcnt has gone NULL?");
6044 			goto stage_left;
6045 #else
6046 			goto stage_left;
6047 #endif
6048 		}
6049 		atomic_add_int(&stcb->asoc.refcnt, -1);
6050 		/* Save the value back for next time */
6051 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6052 	}
6053 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6054 		if (stcb) {
6055 			sctp_misc_ints(SCTP_SORECV_DONE,
6056 			    freed_so_far,
6057 			    ((uio) ? (slen - uio->uio_resid) : slen),
6058 			    stcb->asoc.my_rwnd,
6059 			    so->so_rcv.sb_cc);
6060 		} else {
6061 			sctp_misc_ints(SCTP_SORECV_DONE,
6062 			    freed_so_far,
6063 			    ((uio) ? (slen - uio->uio_resid) : slen),
6064 			    0,
6065 			    so->so_rcv.sb_cc);
6066 		}
6067 	}
6068 stage_left:
6069 	if (wakeup_read_socket) {
6070 		sctp_sorwakeup(inp, so);
6071 	}
6072 	return (error);
6073 }
6074 
6075 
6076 #ifdef SCTP_MBUF_LOGGING
6077 struct mbuf *
6078 sctp_m_free(struct mbuf *m)
6079 {
6080 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6081 		if (SCTP_BUF_IS_EXTENDED(m)) {
6082 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6083 		}
6084 	}
6085 	return (m_free(m));
6086 }
6087 
6088 void
6089 sctp_m_freem(struct mbuf *mb)
6090 {
6091 	while (mb != NULL)
6092 		mb = sctp_m_free(mb);
6093 }
6094 
6095 #endif
6096 
6097 int
6098 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6099 {
6100 	/*
6101 	 * Given a local address. For all associations that holds the
6102 	 * address, request a peer-set-primary.
6103 	 */
6104 	struct sctp_ifa *ifa;
6105 	struct sctp_laddr *wi;
6106 
6107 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6108 	if (ifa == NULL) {
6109 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6110 		return (EADDRNOTAVAIL);
6111 	}
6112 	/*
6113 	 * Now that we have the ifa we must awaken the iterator with this
6114 	 * message.
6115 	 */
6116 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6117 	if (wi == NULL) {
6118 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6119 		return (ENOMEM);
6120 	}
6121 	/* Now incr the count and int wi structure */
6122 	SCTP_INCR_LADDR_COUNT();
6123 	bzero(wi, sizeof(*wi));
6124 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6125 	wi->ifa = ifa;
6126 	wi->action = SCTP_SET_PRIM_ADDR;
6127 	atomic_add_int(&ifa->refcount, 1);
6128 
6129 	/* Now add it to the work queue */
6130 	SCTP_WQ_ADDR_LOCK();
6131 	/*
6132 	 * Should this really be a tailq? As it is we will process the
6133 	 * newest first :-0
6134 	 */
6135 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6136 	SCTP_WQ_ADDR_UNLOCK();
6137 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6138 	    (struct sctp_inpcb *)NULL,
6139 	    (struct sctp_tcb *)NULL,
6140 	    (struct sctp_nets *)NULL);
6141 	return (0);
6142 }
6143 
6144 
6145 int
6146 sctp_soreceive(struct socket *so,
6147     struct sockaddr **psa,
6148     struct uio *uio,
6149     struct mbuf **mp0,
6150     struct mbuf **controlp,
6151     int *flagsp)
6152 {
6153 	int error, fromlen;
6154 	uint8_t sockbuf[256];
6155 	struct sockaddr *from;
6156 	struct sctp_extrcvinfo sinfo;
6157 	int filling_sinfo = 1;
6158 	struct sctp_inpcb *inp;
6159 
6160 	inp = (struct sctp_inpcb *)so->so_pcb;
6161 	/* pickup the assoc we are reading from */
6162 	if (inp == NULL) {
6163 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6164 		return (EINVAL);
6165 	}
6166 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6167 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6168 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6169 	    (controlp == NULL)) {
6170 		/* user does not want the sndrcv ctl */
6171 		filling_sinfo = 0;
6172 	}
6173 	if (psa) {
6174 		from = (struct sockaddr *)sockbuf;
6175 		fromlen = sizeof(sockbuf);
6176 		from->sa_len = 0;
6177 	} else {
6178 		from = NULL;
6179 		fromlen = 0;
6180 	}
6181 
6182 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6183 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6184 	if ((controlp) && (filling_sinfo)) {
6185 		/* copy back the sinfo in a CMSG format */
6186 		if (filling_sinfo)
6187 			*controlp = sctp_build_ctl_nchunk(inp,
6188 			    (struct sctp_sndrcvinfo *)&sinfo);
6189 		else
6190 			*controlp = NULL;
6191 	}
6192 	if (psa) {
6193 		/* copy back the address info */
6194 		if (from && from->sa_len) {
6195 			*psa = sodupsockaddr(from, M_NOWAIT);
6196 		} else {
6197 			*psa = NULL;
6198 		}
6199 	}
6200 	return (error);
6201 }
6202 
6203 
6204 
6205 
6206 
6207 int
6208 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6209     int totaddr, int *error)
6210 {
6211 	int added = 0;
6212 	int i;
6213 	struct sctp_inpcb *inp;
6214 	struct sockaddr *sa;
6215 	size_t incr = 0;
6216 
6217 #ifdef INET
6218 	struct sockaddr_in *sin;
6219 
6220 #endif
6221 #ifdef INET6
6222 	struct sockaddr_in6 *sin6;
6223 
6224 #endif
6225 
6226 	sa = addr;
6227 	inp = stcb->sctp_ep;
6228 	*error = 0;
6229 	for (i = 0; i < totaddr; i++) {
6230 		switch (sa->sa_family) {
6231 #ifdef INET
6232 		case AF_INET:
6233 			incr = sizeof(struct sockaddr_in);
6234 			sin = (struct sockaddr_in *)sa;
6235 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6236 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6237 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6238 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6239 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6240 				*error = EINVAL;
6241 				goto out_now;
6242 			}
6243 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6244 				/* assoc gone no un-lock */
6245 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6246 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6247 				*error = ENOBUFS;
6248 				goto out_now;
6249 			}
6250 			added++;
6251 			break;
6252 #endif
6253 #ifdef INET6
6254 		case AF_INET6:
6255 			incr = sizeof(struct sockaddr_in6);
6256 			sin6 = (struct sockaddr_in6 *)sa;
6257 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6258 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6259 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6260 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6261 				*error = EINVAL;
6262 				goto out_now;
6263 			}
6264 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6265 				/* assoc gone no un-lock */
6266 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6267 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6268 				*error = ENOBUFS;
6269 				goto out_now;
6270 			}
6271 			added++;
6272 			break;
6273 #endif
6274 		default:
6275 			break;
6276 		}
6277 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6278 	}
6279 out_now:
6280 	return (added);
6281 }
6282 
6283 struct sctp_tcb *
6284 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6285     int *totaddr, int *num_v4, int *num_v6, int *error,
6286     int limit, int *bad_addr)
6287 {
6288 	struct sockaddr *sa;
6289 	struct sctp_tcb *stcb = NULL;
6290 	size_t incr, at, i;
6291 
6292 	at = incr = 0;
6293 	sa = addr;
6294 
6295 	*error = *num_v6 = *num_v4 = 0;
6296 	/* account and validate addresses */
6297 	for (i = 0; i < (size_t)*totaddr; i++) {
6298 		switch (sa->sa_family) {
6299 #ifdef INET
6300 		case AF_INET:
6301 			(*num_v4) += 1;
6302 			incr = sizeof(struct sockaddr_in);
6303 			if (sa->sa_len != incr) {
6304 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6305 				*error = EINVAL;
6306 				*bad_addr = 1;
6307 				return (NULL);
6308 			}
6309 			break;
6310 #endif
6311 #ifdef INET6
6312 		case AF_INET6:
6313 			{
6314 				struct sockaddr_in6 *sin6;
6315 
6316 				sin6 = (struct sockaddr_in6 *)sa;
6317 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6318 					/* Must be non-mapped for connectx */
6319 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6320 					*error = EINVAL;
6321 					*bad_addr = 1;
6322 					return (NULL);
6323 				}
6324 				(*num_v6) += 1;
6325 				incr = sizeof(struct sockaddr_in6);
6326 				if (sa->sa_len != incr) {
6327 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6328 					*error = EINVAL;
6329 					*bad_addr = 1;
6330 					return (NULL);
6331 				}
6332 				break;
6333 			}
6334 #endif
6335 		default:
6336 			*totaddr = i;
6337 			/* we are done */
6338 			break;
6339 		}
6340 		if (i == (size_t)*totaddr) {
6341 			break;
6342 		}
6343 		SCTP_INP_INCR_REF(inp);
6344 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6345 		if (stcb != NULL) {
6346 			/* Already have or am bring up an association */
6347 			return (stcb);
6348 		} else {
6349 			SCTP_INP_DECR_REF(inp);
6350 		}
6351 		if ((at + incr) > (size_t)limit) {
6352 			*totaddr = i;
6353 			break;
6354 		}
6355 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6356 	}
6357 	return ((struct sctp_tcb *)NULL);
6358 }
6359 
6360 /*
6361  * sctp_bindx(ADD) for one address.
6362  * assumes all arguments are valid/checked by caller.
6363  */
6364 void
6365 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6366     struct sockaddr *sa, sctp_assoc_t assoc_id,
6367     uint32_t vrf_id, int *error, void *p)
6368 {
6369 	struct sockaddr *addr_touse;
6370 
6371 #ifdef INET6
6372 	struct sockaddr_in sin;
6373 
6374 #endif
6375 
6376 	/* see if we're bound all already! */
6377 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6378 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6379 		*error = EINVAL;
6380 		return;
6381 	}
6382 	addr_touse = sa;
6383 #ifdef INET6
6384 	if (sa->sa_family == AF_INET6) {
6385 		struct sockaddr_in6 *sin6;
6386 
6387 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6388 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6389 			*error = EINVAL;
6390 			return;
6391 		}
6392 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6393 			/* can only bind v6 on PF_INET6 sockets */
6394 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6395 			*error = EINVAL;
6396 			return;
6397 		}
6398 		sin6 = (struct sockaddr_in6 *)addr_touse;
6399 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6400 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6401 			    SCTP_IPV6_V6ONLY(inp)) {
6402 				/* can't bind v4-mapped on PF_INET sockets */
6403 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6404 				*error = EINVAL;
6405 				return;
6406 			}
6407 			in6_sin6_2_sin(&sin, sin6);
6408 			addr_touse = (struct sockaddr *)&sin;
6409 		}
6410 	}
6411 #endif
6412 #ifdef INET
6413 	if (sa->sa_family == AF_INET) {
6414 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6415 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6416 			*error = EINVAL;
6417 			return;
6418 		}
6419 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6420 		    SCTP_IPV6_V6ONLY(inp)) {
6421 			/* can't bind v4 on PF_INET sockets */
6422 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6423 			*error = EINVAL;
6424 			return;
6425 		}
6426 	}
6427 #endif
6428 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6429 		if (p == NULL) {
6430 			/* Can't get proc for Net/Open BSD */
6431 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6432 			*error = EINVAL;
6433 			return;
6434 		}
6435 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6436 		return;
6437 	}
6438 	/*
6439 	 * No locks required here since bind and mgmt_ep_sa all do their own
6440 	 * locking. If we do something for the FIX: below we may need to
6441 	 * lock in that case.
6442 	 */
6443 	if (assoc_id == 0) {
6444 		/* add the address */
6445 		struct sctp_inpcb *lep;
6446 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6447 
6448 		/* validate the incoming port */
6449 		if ((lsin->sin_port != 0) &&
6450 		    (lsin->sin_port != inp->sctp_lport)) {
6451 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6452 			*error = EINVAL;
6453 			return;
6454 		} else {
6455 			/* user specified 0 port, set it to existing port */
6456 			lsin->sin_port = inp->sctp_lport;
6457 		}
6458 
6459 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6460 		if (lep != NULL) {
6461 			/*
6462 			 * We must decrement the refcount since we have the
6463 			 * ep already and are binding. No remove going on
6464 			 * here.
6465 			 */
6466 			SCTP_INP_DECR_REF(lep);
6467 		}
6468 		if (lep == inp) {
6469 			/* already bound to it.. ok */
6470 			return;
6471 		} else if (lep == NULL) {
6472 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6473 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6474 			    SCTP_ADD_IP_ADDRESS,
6475 			    vrf_id, NULL);
6476 		} else {
6477 			*error = EADDRINUSE;
6478 		}
6479 		if (*error)
6480 			return;
6481 	} else {
6482 		/*
6483 		 * FIX: decide whether we allow assoc based bindx
6484 		 */
6485 	}
6486 }
6487 
6488 /*
6489  * sctp_bindx(DELETE) for one address.
6490  * assumes all arguments are valid/checked by caller.
6491  */
6492 void
6493 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6494     struct sockaddr *sa, sctp_assoc_t assoc_id,
6495     uint32_t vrf_id, int *error)
6496 {
6497 	struct sockaddr *addr_touse;
6498 
6499 #ifdef INET6
6500 	struct sockaddr_in sin;
6501 
6502 #endif
6503 
6504 	/* see if we're bound all already! */
6505 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6506 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6507 		*error = EINVAL;
6508 		return;
6509 	}
6510 	addr_touse = sa;
6511 #if defined(INET6)
6512 	if (sa->sa_family == AF_INET6) {
6513 		struct sockaddr_in6 *sin6;
6514 
6515 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6516 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6517 			*error = EINVAL;
6518 			return;
6519 		}
6520 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6521 			/* can only bind v6 on PF_INET6 sockets */
6522 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6523 			*error = EINVAL;
6524 			return;
6525 		}
6526 		sin6 = (struct sockaddr_in6 *)addr_touse;
6527 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6528 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6529 			    SCTP_IPV6_V6ONLY(inp)) {
6530 				/* can't bind mapped-v4 on PF_INET sockets */
6531 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6532 				*error = EINVAL;
6533 				return;
6534 			}
6535 			in6_sin6_2_sin(&sin, sin6);
6536 			addr_touse = (struct sockaddr *)&sin;
6537 		}
6538 	}
6539 #endif
6540 #ifdef INET
6541 	if (sa->sa_family == AF_INET) {
6542 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6543 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6544 			*error = EINVAL;
6545 			return;
6546 		}
6547 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6548 		    SCTP_IPV6_V6ONLY(inp)) {
6549 			/* can't bind v4 on PF_INET sockets */
6550 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6551 			*error = EINVAL;
6552 			return;
6553 		}
6554 	}
6555 #endif
6556 	/*
6557 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6558 	 * below is ever changed we may need to lock before calling
6559 	 * association level binding.
6560 	 */
6561 	if (assoc_id == 0) {
6562 		/* delete the address */
6563 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6564 		    SCTP_DEL_IP_ADDRESS,
6565 		    vrf_id, NULL);
6566 	} else {
6567 		/*
6568 		 * FIX: decide whether we allow assoc based bindx
6569 		 */
6570 	}
6571 }
6572 
6573 /*
6574  * returns the valid local address count for an assoc, taking into account
6575  * all scoping rules
6576  */
6577 int
6578 sctp_local_addr_count(struct sctp_tcb *stcb)
6579 {
6580 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6581 	int ipv4_addr_legal, ipv6_addr_legal;
6582 	struct sctp_vrf *vrf;
6583 	struct sctp_ifn *sctp_ifn;
6584 	struct sctp_ifa *sctp_ifa;
6585 	int count = 0;
6586 
6587 	/* Turn on all the appropriate scopes */
6588 	loopback_scope = stcb->asoc.loopback_scope;
6589 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6590 	local_scope = stcb->asoc.local_scope;
6591 	site_scope = stcb->asoc.site_scope;
6592 	ipv4_addr_legal = ipv6_addr_legal = 0;
6593 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6594 		ipv6_addr_legal = 1;
6595 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6596 			ipv4_addr_legal = 1;
6597 		}
6598 	} else {
6599 		ipv4_addr_legal = 1;
6600 	}
6601 
6602 	SCTP_IPI_ADDR_RLOCK();
6603 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6604 	if (vrf == NULL) {
6605 		/* no vrf, no addresses */
6606 		SCTP_IPI_ADDR_RUNLOCK();
6607 		return (0);
6608 	}
6609 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6610 		/*
6611 		 * bound all case: go through all ifns on the vrf
6612 		 */
6613 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6614 			if ((loopback_scope == 0) &&
6615 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6616 				continue;
6617 			}
6618 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6619 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6620 					continue;
6621 				switch (sctp_ifa->address.sa.sa_family) {
6622 #ifdef INET
6623 				case AF_INET:
6624 					if (ipv4_addr_legal) {
6625 						struct sockaddr_in *sin;
6626 
6627 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6628 						if (sin->sin_addr.s_addr == 0) {
6629 							/*
6630 							 * skip unspecified
6631 							 * addrs
6632 							 */
6633 							continue;
6634 						}
6635 						if ((ipv4_local_scope == 0) &&
6636 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6637 							continue;
6638 						}
6639 						/* count this one */
6640 						count++;
6641 					} else {
6642 						continue;
6643 					}
6644 					break;
6645 #endif
6646 #ifdef INET6
6647 				case AF_INET6:
6648 					if (ipv6_addr_legal) {
6649 						struct sockaddr_in6 *sin6;
6650 
6651 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6652 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6653 							continue;
6654 						}
6655 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6656 							if (local_scope == 0)
6657 								continue;
6658 							if (sin6->sin6_scope_id == 0) {
6659 								if (sa6_recoverscope(sin6) != 0)
6660 									/*
6661 									 *
6662 									 * bad
6663 									 *
6664 									 * li
6665 									 * nk
6666 									 *
6667 									 * loc
6668 									 * al
6669 									 *
6670 									 * add
6671 									 * re
6672 									 * ss
6673 									 * */
6674 									continue;
6675 							}
6676 						}
6677 						if ((site_scope == 0) &&
6678 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6679 							continue;
6680 						}
6681 						/* count this one */
6682 						count++;
6683 					}
6684 					break;
6685 #endif
6686 				default:
6687 					/* TSNH */
6688 					break;
6689 				}
6690 			}
6691 		}
6692 	} else {
6693 		/*
6694 		 * subset bound case
6695 		 */
6696 		struct sctp_laddr *laddr;
6697 
6698 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6699 		    sctp_nxt_addr) {
6700 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6701 				continue;
6702 			}
6703 			/* count this one */
6704 			count++;
6705 		}
6706 	}
6707 	SCTP_IPI_ADDR_RUNLOCK();
6708 	return (count);
6709 }
6710 
6711 #if defined(SCTP_LOCAL_TRACE_BUF)
6712 
6713 void
6714 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6715 {
6716 	uint32_t saveindex, newindex;
6717 
6718 	do {
6719 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6720 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6721 			newindex = 1;
6722 		} else {
6723 			newindex = saveindex + 1;
6724 		}
6725 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6726 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6727 		saveindex = 0;
6728 	}
6729 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6730 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6731 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6732 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6733 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6734 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6735 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6736 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6737 }
6738 
6739 #endif
6740 /* XXX: Remove the #ifdef after tunneling over IPv6 works also on FreeBSD. */
6741 #ifdef INET
6742 /* We will need to add support
6743  * to bind the ports and such here
6744  * so we can do UDP tunneling. In
6745  * the mean-time, we return error
6746  */
6747 #include <netinet/udp.h>
6748 #include <netinet/udp_var.h>
6749 #include <sys/proc.h>
6750 #ifdef INET6
6751 #include <netinet6/sctp6_var.h>
6752 #endif
6753 
6754 static void
6755 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6756 {
6757 	struct ip *iph;
6758 	struct mbuf *sp, *last;
6759 	struct udphdr *uhdr;
6760 	uint16_t port = 0;
6761 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6762 
6763 	/*
6764 	 * Split out the mbuf chain. Leave the IP header in m, place the
6765 	 * rest in the sp.
6766 	 */
6767 	if ((m->m_flags & M_PKTHDR) == 0) {
6768 		/* Can't handle one that is not a pkt hdr */
6769 		goto out;
6770 	}
6771 	/* pull the src port */
6772 	iph = mtod(m, struct ip *);
6773 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6774 
6775 	port = uhdr->uh_sport;
6776 	sp = m_split(m, off, M_DONTWAIT);
6777 	if (sp == NULL) {
6778 		/* Gak, drop packet, we can't do a split */
6779 		goto out;
6780 	}
6781 	if (sp->m_pkthdr.len < header_size) {
6782 		/* Gak, packet can't have an SCTP header in it - to small */
6783 		m_freem(sp);
6784 		goto out;
6785 	}
6786 	/* ok now pull up the UDP header and SCTP header together */
6787 	sp = m_pullup(sp, header_size);
6788 	if (sp == NULL) {
6789 		/* Gak pullup failed */
6790 		goto out;
6791 	}
6792 	/* trim out the UDP header */
6793 	m_adj(sp, sizeof(struct udphdr));
6794 
6795 	/* Now reconstruct the mbuf chain */
6796 	/* 1) find last one */
6797 	last = m;
6798 	while (last->m_next != NULL) {
6799 		last = last->m_next;
6800 	}
6801 	last->m_next = sp;
6802 	m->m_pkthdr.len += sp->m_pkthdr.len;
6803 	last = m;
6804 	while (last != NULL) {
6805 		last = last->m_next;
6806 	}
6807 	/* Now its ready for sctp_input or sctp6_input */
6808 	iph = mtod(m, struct ip *);
6809 	switch (iph->ip_v) {
6810 #ifdef INET
6811 	case IPVERSION:
6812 		{
6813 			uint16_t len;
6814 
6815 			/* its IPv4 */
6816 			len = SCTP_GET_IPV4_LENGTH(iph);
6817 			len -= sizeof(struct udphdr);
6818 			SCTP_GET_IPV4_LENGTH(iph) = len;
6819 			sctp_input_with_port(m, off, port);
6820 			break;
6821 		}
6822 #endif
6823 #ifdef INET6
6824 	case IPV6_VERSION >> 4:
6825 		{
6826 			/* its IPv6 - NOT supported */
6827 			goto out;
6828 			break;
6829 
6830 		}
6831 #endif
6832 	default:
6833 		{
6834 			m_freem(m);
6835 			break;
6836 		}
6837 	}
6838 	return;
6839 out:
6840 	m_freem(m);
6841 }
6842 
6843 void
6844 sctp_over_udp_stop(void)
6845 {
6846 	struct socket *sop;
6847 
6848 	/*
6849 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6850 	 * for writting!
6851 	 */
6852 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6853 		/* Nothing to do */
6854 		return;
6855 	}
6856 	sop = SCTP_BASE_INFO(udp_tun_socket);
6857 	soclose(sop);
6858 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6859 }
6860 
6861 int
6862 sctp_over_udp_start(void)
6863 {
6864 	uint16_t port;
6865 	int ret;
6866 	struct sockaddr_in sin;
6867 	struct socket *sop = NULL;
6868 	struct thread *th;
6869 	struct ucred *cred;
6870 
6871 	/*
6872 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6873 	 * for writting!
6874 	 */
6875 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6876 	if (port == 0) {
6877 		/* Must have a port set */
6878 		return (EINVAL);
6879 	}
6880 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6881 		/* Already running -- must stop first */
6882 		return (EALREADY);
6883 	}
6884 	th = curthread;
6885 	cred = th->td_ucred;
6886 	if ((ret = socreate(PF_INET, &sop,
6887 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6888 		return (ret);
6889 	}
6890 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6891 	/* call the special UDP hook */
6892 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6893 	if (ret) {
6894 		goto exit_stage_left;
6895 	}
6896 	/* Ok we have a socket, bind it to the port */
6897 	memset(&sin, 0, sizeof(sin));
6898 	sin.sin_len = sizeof(sin);
6899 	sin.sin_family = AF_INET;
6900 	sin.sin_port = htons(port);
6901 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6902 	if (ret) {
6903 		/* Close up we cant get the port */
6904 exit_stage_left:
6905 		sctp_over_udp_stop();
6906 		return (ret);
6907 	}
6908 	/*
6909 	 * Ok we should now get UDP packets directly to our input routine
6910 	 * sctp_recv_upd_tunneled_packet().
6911 	 */
6912 	return (0);
6913 }
6914 
6915 #endif
6916