xref: /freebsd/sys/netinet/sctputil.c (revision 3ef51c5fb9163f2aafb1c14729e06a8bf0c4d113)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #endif
45 #include <netinet/sctp_header.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_bsd_addr.h>
53 
54 
55 #ifndef KTR_SCTP
56 #define KTR_SCTP KTR_SUBSYS
57 #endif
58 
59 extern struct sctp_cc_functions sctp_cc_functions[];
60 extern struct sctp_ss_functions sctp_ss_functions[];
61 
62 void
63 sctp_sblog(struct sockbuf *sb,
64     struct sctp_tcb *stcb, int from, int incr)
65 {
66 	struct sctp_cwnd_log sctp_clog;
67 
68 	sctp_clog.x.sb.stcb = stcb;
69 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
70 	if (stcb)
71 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
72 	else
73 		sctp_clog.x.sb.stcb_sbcc = 0;
74 	sctp_clog.x.sb.incr = incr;
75 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
76 	    SCTP_LOG_EVENT_SB,
77 	    from,
78 	    sctp_clog.x.misc.log1,
79 	    sctp_clog.x.misc.log2,
80 	    sctp_clog.x.misc.log3,
81 	    sctp_clog.x.misc.log4);
82 }
83 
84 void
85 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
86 {
87 	struct sctp_cwnd_log sctp_clog;
88 
89 	sctp_clog.x.close.inp = (void *)inp;
90 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
91 	if (stcb) {
92 		sctp_clog.x.close.stcb = (void *)stcb;
93 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
94 	} else {
95 		sctp_clog.x.close.stcb = 0;
96 		sctp_clog.x.close.state = 0;
97 	}
98 	sctp_clog.x.close.loc = loc;
99 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
100 	    SCTP_LOG_EVENT_CLOSE,
101 	    0,
102 	    sctp_clog.x.misc.log1,
103 	    sctp_clog.x.misc.log2,
104 	    sctp_clog.x.misc.log3,
105 	    sctp_clog.x.misc.log4);
106 }
107 
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
204     int from)
205 {
206 	struct sctp_cwnd_log sctp_clog;
207 
208 	memset(&sctp_clog, 0, sizeof(sctp_clog));
209 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
210 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
211 	sctp_clog.x.fr.tsn = tsn;
212 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
213 	    SCTP_LOG_EVENT_FR,
214 	    from,
215 	    sctp_clog.x.misc.log1,
216 	    sctp_clog.x.misc.log2,
217 	    sctp_clog.x.misc.log3,
218 	    sctp_clog.x.misc.log4);
219 }
220 
221 void
222 sctp_log_mb(struct mbuf *m, int from)
223 {
224 	struct sctp_cwnd_log sctp_clog;
225 
226 	sctp_clog.x.mb.mp = m;
227 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog.x.mb.ext = 0;
235 		sctp_clog.x.mb.refcnt = 0;
236 	}
237 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238 	    SCTP_LOG_EVENT_MBUF,
239 	    from,
240 	    sctp_clog.x.misc.log1,
241 	    sctp_clog.x.misc.log2,
242 	    sctp_clog.x.misc.log3,
243 	    sctp_clog.x.misc.log4);
244 }
245 
246 void
247 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
248     int from)
249 {
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	if (control == NULL) {
253 		SCTP_PRINTF("Gak log of NULL?\n");
254 		return;
255 	}
256 	sctp_clog.x.strlog.stcb = control->stcb;
257 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
258 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
259 	sctp_clog.x.strlog.strm = control->sinfo_stream;
260 	if (poschk != NULL) {
261 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
262 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
263 	} else {
264 		sctp_clog.x.strlog.e_tsn = 0;
265 		sctp_clog.x.strlog.e_sseq = 0;
266 	}
267 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
268 	    SCTP_LOG_EVENT_STRM,
269 	    from,
270 	    sctp_clog.x.misc.log1,
271 	    sctp_clog.x.misc.log2,
272 	    sctp_clog.x.misc.log3,
273 	    sctp_clog.x.misc.log4);
274 }
275 
276 void
277 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
278 {
279 	struct sctp_cwnd_log sctp_clog;
280 
281 	sctp_clog.x.cwnd.net = net;
282 	if (stcb->asoc.send_queue_cnt > 255)
283 		sctp_clog.x.cwnd.cnt_in_send = 255;
284 	else
285 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
286 	if (stcb->asoc.stream_queue_cnt > 255)
287 		sctp_clog.x.cwnd.cnt_in_str = 255;
288 	else
289 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
290 
291 	if (net) {
292 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
293 		sctp_clog.x.cwnd.inflight = net->flight_size;
294 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
295 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
296 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
297 	}
298 	if (SCTP_CWNDLOG_PRESEND == from) {
299 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
300 	}
301 	sctp_clog.x.cwnd.cwnd_augment = augment;
302 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
303 	    SCTP_LOG_EVENT_CWND,
304 	    from,
305 	    sctp_clog.x.misc.log1,
306 	    sctp_clog.x.misc.log2,
307 	    sctp_clog.x.misc.log3,
308 	    sctp_clog.x.misc.log4);
309 }
310 
311 void
312 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
313 {
314 	struct sctp_cwnd_log sctp_clog;
315 
316 	memset(&sctp_clog, 0, sizeof(sctp_clog));
317 	if (inp) {
318 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
319 
320 	} else {
321 		sctp_clog.x.lock.sock = (void *)NULL;
322 	}
323 	sctp_clog.x.lock.inp = (void *)inp;
324 	if (stcb) {
325 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
326 	} else {
327 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
328 	}
329 	if (inp) {
330 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
331 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
332 	} else {
333 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
334 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
335 	}
336 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
337 	if (inp && (inp->sctp_socket)) {
338 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
339 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
340 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
341 	} else {
342 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
343 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
344 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
345 	}
346 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
347 	    SCTP_LOG_LOCK_EVENT,
348 	    from,
349 	    sctp_clog.x.misc.log1,
350 	    sctp_clog.x.misc.log2,
351 	    sctp_clog.x.misc.log3,
352 	    sctp_clog.x.misc.log4);
353 }
354 
355 void
356 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
357 {
358 	struct sctp_cwnd_log sctp_clog;
359 
360 	memset(&sctp_clog, 0, sizeof(sctp_clog));
361 	sctp_clog.x.cwnd.net = net;
362 	sctp_clog.x.cwnd.cwnd_new_value = error;
363 	sctp_clog.x.cwnd.inflight = net->flight_size;
364 	sctp_clog.x.cwnd.cwnd_augment = burst;
365 	if (stcb->asoc.send_queue_cnt > 255)
366 		sctp_clog.x.cwnd.cnt_in_send = 255;
367 	else
368 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
369 	if (stcb->asoc.stream_queue_cnt > 255)
370 		sctp_clog.x.cwnd.cnt_in_str = 255;
371 	else
372 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
373 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
374 	    SCTP_LOG_EVENT_MAXBURST,
375 	    from,
376 	    sctp_clog.x.misc.log1,
377 	    sctp_clog.x.misc.log2,
378 	    sctp_clog.x.misc.log3,
379 	    sctp_clog.x.misc.log4);
380 }
381 
382 void
383 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
384 {
385 	struct sctp_cwnd_log sctp_clog;
386 
387 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
388 	sctp_clog.x.rwnd.send_size = snd_size;
389 	sctp_clog.x.rwnd.overhead = overhead;
390 	sctp_clog.x.rwnd.new_rwnd = 0;
391 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
392 	    SCTP_LOG_EVENT_RWND,
393 	    from,
394 	    sctp_clog.x.misc.log1,
395 	    sctp_clog.x.misc.log2,
396 	    sctp_clog.x.misc.log3,
397 	    sctp_clog.x.misc.log4);
398 }
399 
400 void
401 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
402 {
403 	struct sctp_cwnd_log sctp_clog;
404 
405 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
406 	sctp_clog.x.rwnd.send_size = flight_size;
407 	sctp_clog.x.rwnd.overhead = overhead;
408 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
409 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
410 	    SCTP_LOG_EVENT_RWND,
411 	    from,
412 	    sctp_clog.x.misc.log1,
413 	    sctp_clog.x.misc.log2,
414 	    sctp_clog.x.misc.log3,
415 	    sctp_clog.x.misc.log4);
416 }
417 
418 void
419 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
420 {
421 	struct sctp_cwnd_log sctp_clog;
422 
423 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
424 	sctp_clog.x.mbcnt.size_change = book;
425 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
426 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
427 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
428 	    SCTP_LOG_EVENT_MBCNT,
429 	    from,
430 	    sctp_clog.x.misc.log1,
431 	    sctp_clog.x.misc.log2,
432 	    sctp_clog.x.misc.log3,
433 	    sctp_clog.x.misc.log4);
434 }
435 
436 void
437 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
438 {
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	    SCTP_LOG_MISC_EVENT,
441 	    from,
442 	    a, b, c, d);
443 }
444 
445 void
446 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
447 {
448 	struct sctp_cwnd_log sctp_clog;
449 
450 	sctp_clog.x.wake.stcb = (void *)stcb;
451 	sctp_clog.x.wake.wake_cnt = wake_cnt;
452 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
453 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
454 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
455 
456 	if (stcb->asoc.stream_queue_cnt < 0xff)
457 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
458 	else
459 		sctp_clog.x.wake.stream_qcnt = 0xff;
460 
461 	if (stcb->asoc.chunks_on_out_queue < 0xff)
462 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
463 	else
464 		sctp_clog.x.wake.chunks_on_oque = 0xff;
465 
466 	sctp_clog.x.wake.sctpflags = 0;
467 	/* set in the defered mode stuff */
468 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
469 		sctp_clog.x.wake.sctpflags |= 1;
470 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
471 		sctp_clog.x.wake.sctpflags |= 2;
472 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
473 		sctp_clog.x.wake.sctpflags |= 4;
474 	/* what about the sb */
475 	if (stcb->sctp_socket) {
476 		struct socket *so = stcb->sctp_socket;
477 
478 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
479 	} else {
480 		sctp_clog.x.wake.sbflags = 0xff;
481 	}
482 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
483 	    SCTP_LOG_EVENT_WAKE,
484 	    from,
485 	    sctp_clog.x.misc.log1,
486 	    sctp_clog.x.misc.log2,
487 	    sctp_clog.x.misc.log3,
488 	    sctp_clog.x.misc.log4);
489 }
490 
491 void
492 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
493 {
494 	struct sctp_cwnd_log sctp_clog;
495 
496 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
497 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
498 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
499 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
500 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
501 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
502 	sctp_clog.x.blk.sndlen = sendlen;
503 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
504 	    SCTP_LOG_EVENT_BLOCK,
505 	    from,
506 	    sctp_clog.x.misc.log1,
507 	    sctp_clog.x.misc.log2,
508 	    sctp_clog.x.misc.log3,
509 	    sctp_clog.x.misc.log4);
510 }
511 
512 int
513 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
514 {
515 	/* May need to fix this if ktrdump does not work */
516 	return (0);
517 }
518 
519 #ifdef SCTP_AUDITING_ENABLED
520 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
521 static int sctp_audit_indx = 0;
522 
523 static
524 void
525 sctp_print_audit_report(void)
526 {
527 	int i;
528 	int cnt;
529 
530 	cnt = 0;
531 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
532 		if ((sctp_audit_data[i][0] == 0xe0) &&
533 		    (sctp_audit_data[i][1] == 0x01)) {
534 			cnt = 0;
535 			SCTP_PRINTF("\n");
536 		} else if (sctp_audit_data[i][0] == 0xf0) {
537 			cnt = 0;
538 			SCTP_PRINTF("\n");
539 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
540 		    (sctp_audit_data[i][1] == 0x01)) {
541 			SCTP_PRINTF("\n");
542 			cnt = 0;
543 		}
544 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
545 		    (uint32_t) sctp_audit_data[i][1]);
546 		cnt++;
547 		if ((cnt % 14) == 0)
548 			SCTP_PRINTF("\n");
549 	}
550 	for (i = 0; i < sctp_audit_indx; i++) {
551 		if ((sctp_audit_data[i][0] == 0xe0) &&
552 		    (sctp_audit_data[i][1] == 0x01)) {
553 			cnt = 0;
554 			SCTP_PRINTF("\n");
555 		} else if (sctp_audit_data[i][0] == 0xf0) {
556 			cnt = 0;
557 			SCTP_PRINTF("\n");
558 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
559 		    (sctp_audit_data[i][1] == 0x01)) {
560 			SCTP_PRINTF("\n");
561 			cnt = 0;
562 		}
563 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
564 		    (uint32_t) sctp_audit_data[i][1]);
565 		cnt++;
566 		if ((cnt % 14) == 0)
567 			SCTP_PRINTF("\n");
568 	}
569 	SCTP_PRINTF("\n");
570 }
571 
572 void
573 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
574     struct sctp_nets *net)
575 {
576 	int resend_cnt, tot_out, rep, tot_book_cnt;
577 	struct sctp_nets *lnet;
578 	struct sctp_tmit_chunk *chk;
579 
580 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
581 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
582 	sctp_audit_indx++;
583 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
584 		sctp_audit_indx = 0;
585 	}
586 	if (inp == NULL) {
587 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
588 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
589 		sctp_audit_indx++;
590 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
591 			sctp_audit_indx = 0;
592 		}
593 		return;
594 	}
595 	if (stcb == NULL) {
596 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
597 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
598 		sctp_audit_indx++;
599 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
600 			sctp_audit_indx = 0;
601 		}
602 		return;
603 	}
604 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
605 	sctp_audit_data[sctp_audit_indx][1] =
606 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
607 	sctp_audit_indx++;
608 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609 		sctp_audit_indx = 0;
610 	}
611 	rep = 0;
612 	tot_book_cnt = 0;
613 	resend_cnt = tot_out = 0;
614 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
615 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
616 			resend_cnt++;
617 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
618 			tot_out += chk->book_size;
619 			tot_book_cnt++;
620 		}
621 	}
622 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
623 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
624 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
625 		sctp_audit_indx++;
626 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
627 			sctp_audit_indx = 0;
628 		}
629 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
630 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
631 		rep = 1;
632 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
633 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
634 		sctp_audit_data[sctp_audit_indx][1] =
635 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
636 		sctp_audit_indx++;
637 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
638 			sctp_audit_indx = 0;
639 		}
640 	}
641 	if (tot_out != stcb->asoc.total_flight) {
642 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
643 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
644 		sctp_audit_indx++;
645 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
646 			sctp_audit_indx = 0;
647 		}
648 		rep = 1;
649 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
650 		    (int)stcb->asoc.total_flight);
651 		stcb->asoc.total_flight = tot_out;
652 	}
653 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
654 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
655 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
656 		sctp_audit_indx++;
657 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
658 			sctp_audit_indx = 0;
659 		}
660 		rep = 1;
661 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
662 
663 		stcb->asoc.total_flight_count = tot_book_cnt;
664 	}
665 	tot_out = 0;
666 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
667 		tot_out += lnet->flight_size;
668 	}
669 	if (tot_out != stcb->asoc.total_flight) {
670 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
671 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
672 		sctp_audit_indx++;
673 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
674 			sctp_audit_indx = 0;
675 		}
676 		rep = 1;
677 		SCTP_PRINTF("real flight:%d net total was %d\n",
678 		    stcb->asoc.total_flight, tot_out);
679 		/* now corrective action */
680 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681 
682 			tot_out = 0;
683 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
684 				if ((chk->whoTo == lnet) &&
685 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
686 					tot_out += chk->book_size;
687 				}
688 			}
689 			if (lnet->flight_size != tot_out) {
690 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
691 				    lnet, lnet->flight_size,
692 				    tot_out);
693 				lnet->flight_size = tot_out;
694 			}
695 		}
696 	}
697 	if (rep) {
698 		sctp_print_audit_report();
699 	}
700 }
701 
702 void
703 sctp_audit_log(uint8_t ev, uint8_t fd)
704 {
705 
706 	sctp_audit_data[sctp_audit_indx][0] = ev;
707 	sctp_audit_data[sctp_audit_indx][1] = fd;
708 	sctp_audit_indx++;
709 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
710 		sctp_audit_indx = 0;
711 	}
712 }
713 
714 #endif
715 
716 /*
717  * sctp_stop_timers_for_shutdown() should be called
718  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
719  * state to make sure that all timers are stopped.
720  */
721 void
722 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
723 {
724 	struct sctp_association *asoc;
725 	struct sctp_nets *net;
726 
727 	asoc = &stcb->asoc;
728 
729 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
730 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
731 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
732 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
733 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
734 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
735 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
736 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
737 	}
738 }
739 
740 /*
741  * a list of sizes based on typical mtu's, used only if next hop size not
742  * returned.
743  */
744 static uint32_t sctp_mtu_sizes[] = {
745 	68,
746 	296,
747 	508,
748 	512,
749 	544,
750 	576,
751 	1006,
752 	1492,
753 	1500,
754 	1536,
755 	2002,
756 	2048,
757 	4352,
758 	4464,
759 	8166,
760 	17914,
761 	32000,
762 	65535
763 };
764 
765 /*
766  * Return the largest MTU smaller than val. If there is no
767  * entry, just return val.
768  */
769 uint32_t
770 sctp_get_prev_mtu(uint32_t val)
771 {
772 	uint32_t i;
773 
774 	if (val <= sctp_mtu_sizes[0]) {
775 		return (val);
776 	}
777 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
778 		if (val <= sctp_mtu_sizes[i]) {
779 			break;
780 		}
781 	}
782 	return (sctp_mtu_sizes[i - 1]);
783 }
784 
785 /*
786  * Return the smallest MTU larger than val. If there is no
787  * entry, just return val.
788  */
789 uint32_t
790 sctp_get_next_mtu(uint32_t val)
791 {
792 	/* select another MTU that is just bigger than this one */
793 	uint32_t i;
794 
795 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
796 		if (val < sctp_mtu_sizes[i]) {
797 			return (sctp_mtu_sizes[i]);
798 		}
799 	}
800 	return (val);
801 }
802 
803 void
804 sctp_fill_random_store(struct sctp_pcb *m)
805 {
806 	/*
807 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
808 	 * our counter. The result becomes our good random numbers and we
809 	 * then setup to give these out. Note that we do no locking to
810 	 * protect this. This is ok, since if competing folks call this we
811 	 * will get more gobbled gook in the random store which is what we
812 	 * want. There is a danger that two guys will use the same random
813 	 * numbers, but thats ok too since that is random as well :->
814 	 */
815 	m->store_at = 0;
816 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
817 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
818 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
819 	m->random_counter++;
820 }
821 
822 uint32_t
823 sctp_select_initial_TSN(struct sctp_pcb *inp)
824 {
825 	/*
826 	 * A true implementation should use random selection process to get
827 	 * the initial stream sequence number, using RFC1750 as a good
828 	 * guideline
829 	 */
830 	uint32_t x, *xp;
831 	uint8_t *p;
832 	int store_at, new_store;
833 
834 	if (inp->initial_sequence_debug != 0) {
835 		uint32_t ret;
836 
837 		ret = inp->initial_sequence_debug;
838 		inp->initial_sequence_debug++;
839 		return (ret);
840 	}
841 retry:
842 	store_at = inp->store_at;
843 	new_store = store_at + sizeof(uint32_t);
844 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
845 		new_store = 0;
846 	}
847 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
848 		goto retry;
849 	}
850 	if (new_store == 0) {
851 		/* Refill the random store */
852 		sctp_fill_random_store(inp);
853 	}
854 	p = &inp->random_store[store_at];
855 	xp = (uint32_t *) p;
856 	x = *xp;
857 	return (x);
858 }
859 
860 uint32_t
861 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
862 {
863 	uint32_t x;
864 	struct timeval now;
865 
866 	if (check) {
867 		(void)SCTP_GETTIME_TIMEVAL(&now);
868 	}
869 	for (;;) {
870 		x = sctp_select_initial_TSN(&inp->sctp_ep);
871 		if (x == 0) {
872 			/* we never use 0 */
873 			continue;
874 		}
875 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
876 			break;
877 		}
878 	}
879 	return (x);
880 }
881 
882 int
883 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
884     uint32_t override_tag, uint32_t vrf_id)
885 {
886 	struct sctp_association *asoc;
887 
888 	/*
889 	 * Anything set to zero is taken care of by the allocation routine's
890 	 * bzero
891 	 */
892 
893 	/*
894 	 * Up front select what scoping to apply on addresses I tell my peer
895 	 * Not sure what to do with these right now, we will need to come up
896 	 * with a way to set them. We may need to pass them through from the
897 	 * caller in the sctp_aloc_assoc() function.
898 	 */
899 	int i;
900 
901 	asoc = &stcb->asoc;
902 	/* init all variables to a known value. */
903 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
904 	asoc->max_burst = m->sctp_ep.max_burst;
905 	asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
906 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
907 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
908 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
909 	asoc->ecn_allowed = m->sctp_ecn_enable;
910 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
911 	asoc->sctp_cmt_pf = (uint8_t) 0;
912 	asoc->sctp_frag_point = m->sctp_frag_point;
913 	asoc->sctp_features = m->sctp_features;
914 	asoc->default_dscp = m->sctp_ep.default_dscp;
915 #ifdef INET6
916 	if (m->sctp_ep.default_flowlabel) {
917 		asoc->default_flowlabel = m->sctp_ep.default_flowlabel;
918 	} else {
919 		if (m->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
920 			asoc->default_flowlabel = sctp_select_initial_TSN(&m->sctp_ep);
921 			asoc->default_flowlabel &= 0x000fffff;
922 			asoc->default_flowlabel |= 0x80000000;
923 		} else {
924 			asoc->default_flowlabel = 0;
925 		}
926 	}
927 #endif
928 	asoc->sb_send_resv = 0;
929 	if (override_tag) {
930 		asoc->my_vtag = override_tag;
931 	} else {
932 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
933 	}
934 	/* Get the nonce tags */
935 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
936 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
937 	asoc->vrf_id = vrf_id;
938 
939 #ifdef SCTP_ASOCLOG_OF_TSNS
940 	asoc->tsn_in_at = 0;
941 	asoc->tsn_out_at = 0;
942 	asoc->tsn_in_wrapped = 0;
943 	asoc->tsn_out_wrapped = 0;
944 	asoc->cumack_log_at = 0;
945 	asoc->cumack_log_atsnt = 0;
946 #endif
947 #ifdef SCTP_FS_SPEC_LOG
948 	asoc->fs_index = 0;
949 #endif
950 	asoc->refcnt = 0;
951 	asoc->assoc_up_sent = 0;
952 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
953 	    sctp_select_initial_TSN(&m->sctp_ep);
954 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
955 	/* we are optimisitic here */
956 	asoc->peer_supports_pktdrop = 1;
957 	asoc->peer_supports_nat = 0;
958 	asoc->sent_queue_retran_cnt = 0;
959 
960 	/* for CMT */
961 	asoc->last_net_cmt_send_started = NULL;
962 
963 	/* This will need to be adjusted */
964 	asoc->last_acked_seq = asoc->init_seq_number - 1;
965 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
966 	asoc->asconf_seq_in = asoc->last_acked_seq;
967 
968 	/* here we are different, we hold the next one we expect */
969 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
970 
971 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
972 	asoc->initial_rto = m->sctp_ep.initial_rto;
973 
974 	asoc->max_init_times = m->sctp_ep.max_init_times;
975 	asoc->max_send_times = m->sctp_ep.max_send_times;
976 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
977 	asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold;
978 	asoc->free_chunk_cnt = 0;
979 
980 	asoc->iam_blocking = 0;
981 	asoc->context = m->sctp_context;
982 	asoc->local_strreset_support = m->local_strreset_support;
983 	asoc->def_send = m->def_send;
984 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
985 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
986 	asoc->pr_sctp_cnt = 0;
987 	asoc->total_output_queue_size = 0;
988 
989 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
990 		struct in6pcb *inp6;
991 
992 		/* Its a V6 socket */
993 		inp6 = (struct in6pcb *)m;
994 		asoc->ipv6_addr_legal = 1;
995 		/* Now look at the binding flag to see if V4 will be legal */
996 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
997 			asoc->ipv4_addr_legal = 1;
998 		} else {
999 			/* V4 addresses are NOT legal on the association */
1000 			asoc->ipv4_addr_legal = 0;
1001 		}
1002 	} else {
1003 		/* Its a V4 socket, no - V6 */
1004 		asoc->ipv4_addr_legal = 1;
1005 		asoc->ipv6_addr_legal = 0;
1006 	}
1007 
1008 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1009 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1010 
1011 	asoc->smallest_mtu = m->sctp_frag_point;
1012 	asoc->minrto = m->sctp_ep.sctp_minrto;
1013 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1014 
1015 	asoc->locked_on_sending = NULL;
1016 	asoc->stream_locked_on = 0;
1017 	asoc->ecn_echo_cnt_onq = 0;
1018 	asoc->stream_locked = 0;
1019 
1020 	asoc->send_sack = 1;
1021 
1022 	LIST_INIT(&asoc->sctp_restricted_addrs);
1023 
1024 	TAILQ_INIT(&asoc->nets);
1025 	TAILQ_INIT(&asoc->pending_reply_queue);
1026 	TAILQ_INIT(&asoc->asconf_ack_sent);
1027 	/* Setup to fill the hb random cache at first HB */
1028 	asoc->hb_random_idx = 4;
1029 
1030 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1031 
1032 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1033 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1034 
1035 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1036 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1037 
1038 	/*
1039 	 * Now the stream parameters, here we allocate space for all streams
1040 	 * that we request by default.
1041 	 */
1042 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1043 	    m->sctp_ep.pre_open_stream_count;
1044 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1045 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1046 	    SCTP_M_STRMO);
1047 	if (asoc->strmout == NULL) {
1048 		/* big trouble no memory */
1049 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1050 		return (ENOMEM);
1051 	}
1052 	for (i = 0; i < asoc->streamoutcnt; i++) {
1053 		/*
1054 		 * inbound side must be set to 0xffff, also NOTE when we get
1055 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1056 		 * count (streamoutcnt) but first check if we sent to any of
1057 		 * the upper streams that were dropped (if some were). Those
1058 		 * that were dropped must be notified to the upper layer as
1059 		 * failed to send.
1060 		 */
1061 		asoc->strmout[i].next_sequence_sent = 0x0;
1062 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1063 		asoc->strmout[i].stream_no = i;
1064 		asoc->strmout[i].last_msg_incomplete = 0;
1065 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1066 	}
1067 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1068 
1069 	/* Now the mapping array */
1070 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1071 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1072 	    SCTP_M_MAP);
1073 	if (asoc->mapping_array == NULL) {
1074 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1075 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1076 		return (ENOMEM);
1077 	}
1078 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1079 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1080 	    SCTP_M_MAP);
1081 	if (asoc->nr_mapping_array == NULL) {
1082 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1083 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1084 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1085 		return (ENOMEM);
1086 	}
1087 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1088 
1089 	/* Now the init of the other outqueues */
1090 	TAILQ_INIT(&asoc->free_chunks);
1091 	TAILQ_INIT(&asoc->control_send_queue);
1092 	TAILQ_INIT(&asoc->asconf_send_queue);
1093 	TAILQ_INIT(&asoc->send_queue);
1094 	TAILQ_INIT(&asoc->sent_queue);
1095 	TAILQ_INIT(&asoc->reasmqueue);
1096 	TAILQ_INIT(&asoc->resetHead);
1097 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1098 	TAILQ_INIT(&asoc->asconf_queue);
1099 	/* authentication fields */
1100 	asoc->authinfo.random = NULL;
1101 	asoc->authinfo.active_keyid = 0;
1102 	asoc->authinfo.assoc_key = NULL;
1103 	asoc->authinfo.assoc_keyid = 0;
1104 	asoc->authinfo.recv_key = NULL;
1105 	asoc->authinfo.recv_keyid = 0;
1106 	LIST_INIT(&asoc->shared_keys);
1107 	asoc->marked_retrans = 0;
1108 	asoc->port = m->sctp_ep.port;
1109 	asoc->timoinit = 0;
1110 	asoc->timodata = 0;
1111 	asoc->timosack = 0;
1112 	asoc->timoshutdown = 0;
1113 	asoc->timoheartbeat = 0;
1114 	asoc->timocookie = 0;
1115 	asoc->timoshutdownack = 0;
1116 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1117 	asoc->discontinuity_time = asoc->start_time;
1118 	/*
1119 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1120 	 * freed later when the association is freed.
1121 	 */
1122 	return (0);
1123 }
1124 
1125 void
1126 sctp_print_mapping_array(struct sctp_association *asoc)
1127 {
1128 	unsigned int i, limit;
1129 
1130 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1131 	    asoc->mapping_array_size,
1132 	    asoc->mapping_array_base_tsn,
1133 	    asoc->cumulative_tsn,
1134 	    asoc->highest_tsn_inside_map,
1135 	    asoc->highest_tsn_inside_nr_map);
1136 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1137 		if (asoc->mapping_array[limit - 1] != 0) {
1138 			break;
1139 		}
1140 	}
1141 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1142 	for (i = 0; i < limit; i++) {
1143 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1144 	}
1145 	if (limit % 16)
1146 		printf("\n");
1147 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1148 		if (asoc->nr_mapping_array[limit - 1]) {
1149 			break;
1150 		}
1151 	}
1152 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1153 	for (i = 0; i < limit; i++) {
1154 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1155 	}
1156 	if (limit % 16)
1157 		printf("\n");
1158 }
1159 
1160 int
1161 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1162 {
1163 	/* mapping array needs to grow */
1164 	uint8_t *new_array1, *new_array2;
1165 	uint32_t new_size;
1166 
1167 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1168 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1169 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1170 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1171 		/* can't get more, forget it */
1172 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1173 		if (new_array1) {
1174 			SCTP_FREE(new_array1, SCTP_M_MAP);
1175 		}
1176 		if (new_array2) {
1177 			SCTP_FREE(new_array2, SCTP_M_MAP);
1178 		}
1179 		return (-1);
1180 	}
1181 	memset(new_array1, 0, new_size);
1182 	memset(new_array2, 0, new_size);
1183 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1184 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1185 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1186 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1187 	asoc->mapping_array = new_array1;
1188 	asoc->nr_mapping_array = new_array2;
1189 	asoc->mapping_array_size = new_size;
1190 	return (0);
1191 }
1192 
1193 
1194 static void
1195 sctp_iterator_work(struct sctp_iterator *it)
1196 {
1197 	int iteration_count = 0;
1198 	int inp_skip = 0;
1199 	int first_in = 1;
1200 	struct sctp_inpcb *tinp;
1201 
1202 	SCTP_INP_INFO_RLOCK();
1203 	SCTP_ITERATOR_LOCK();
1204 	if (it->inp) {
1205 		SCTP_INP_RLOCK(it->inp);
1206 		SCTP_INP_DECR_REF(it->inp);
1207 	}
1208 	if (it->inp == NULL) {
1209 		/* iterator is complete */
1210 done_with_iterator:
1211 		SCTP_ITERATOR_UNLOCK();
1212 		SCTP_INP_INFO_RUNLOCK();
1213 		if (it->function_atend != NULL) {
1214 			(*it->function_atend) (it->pointer, it->val);
1215 		}
1216 		SCTP_FREE(it, SCTP_M_ITER);
1217 		return;
1218 	}
1219 select_a_new_ep:
1220 	if (first_in) {
1221 		first_in = 0;
1222 	} else {
1223 		SCTP_INP_RLOCK(it->inp);
1224 	}
1225 	while (((it->pcb_flags) &&
1226 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1227 	    ((it->pcb_features) &&
1228 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1229 		/* endpoint flags or features don't match, so keep looking */
1230 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1231 			SCTP_INP_RUNLOCK(it->inp);
1232 			goto done_with_iterator;
1233 		}
1234 		tinp = it->inp;
1235 		it->inp = LIST_NEXT(it->inp, sctp_list);
1236 		SCTP_INP_RUNLOCK(tinp);
1237 		if (it->inp == NULL) {
1238 			goto done_with_iterator;
1239 		}
1240 		SCTP_INP_RLOCK(it->inp);
1241 	}
1242 	/* now go through each assoc which is in the desired state */
1243 	if (it->done_current_ep == 0) {
1244 		if (it->function_inp != NULL)
1245 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1246 		it->done_current_ep = 1;
1247 	}
1248 	if (it->stcb == NULL) {
1249 		/* run the per instance function */
1250 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1251 	}
1252 	if ((inp_skip) || it->stcb == NULL) {
1253 		if (it->function_inp_end != NULL) {
1254 			inp_skip = (*it->function_inp_end) (it->inp,
1255 			    it->pointer,
1256 			    it->val);
1257 		}
1258 		SCTP_INP_RUNLOCK(it->inp);
1259 		goto no_stcb;
1260 	}
1261 	while (it->stcb) {
1262 		SCTP_TCB_LOCK(it->stcb);
1263 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1264 			/* not in the right state... keep looking */
1265 			SCTP_TCB_UNLOCK(it->stcb);
1266 			goto next_assoc;
1267 		}
1268 		/* see if we have limited out the iterator loop */
1269 		iteration_count++;
1270 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1271 			/* Pause to let others grab the lock */
1272 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1273 			SCTP_TCB_UNLOCK(it->stcb);
1274 			SCTP_INP_INCR_REF(it->inp);
1275 			SCTP_INP_RUNLOCK(it->inp);
1276 			SCTP_ITERATOR_UNLOCK();
1277 			SCTP_INP_INFO_RUNLOCK();
1278 			SCTP_INP_INFO_RLOCK();
1279 			SCTP_ITERATOR_LOCK();
1280 			if (sctp_it_ctl.iterator_flags) {
1281 				/* We won't be staying here */
1282 				SCTP_INP_DECR_REF(it->inp);
1283 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1284 				if (sctp_it_ctl.iterator_flags &
1285 				    SCTP_ITERATOR_STOP_CUR_IT) {
1286 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1287 					goto done_with_iterator;
1288 				}
1289 				if (sctp_it_ctl.iterator_flags &
1290 				    SCTP_ITERATOR_STOP_CUR_INP) {
1291 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1292 					goto no_stcb;
1293 				}
1294 				/* If we reach here huh? */
1295 				printf("Unknown it ctl flag %x\n",
1296 				    sctp_it_ctl.iterator_flags);
1297 				sctp_it_ctl.iterator_flags = 0;
1298 			}
1299 			SCTP_INP_RLOCK(it->inp);
1300 			SCTP_INP_DECR_REF(it->inp);
1301 			SCTP_TCB_LOCK(it->stcb);
1302 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1303 			iteration_count = 0;
1304 		}
1305 		/* run function on this one */
1306 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1307 
1308 		/*
1309 		 * we lie here, it really needs to have its own type but
1310 		 * first I must verify that this won't effect things :-0
1311 		 */
1312 		if (it->no_chunk_output == 0)
1313 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1314 
1315 		SCTP_TCB_UNLOCK(it->stcb);
1316 next_assoc:
1317 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1318 		if (it->stcb == NULL) {
1319 			/* Run last function */
1320 			if (it->function_inp_end != NULL) {
1321 				inp_skip = (*it->function_inp_end) (it->inp,
1322 				    it->pointer,
1323 				    it->val);
1324 			}
1325 		}
1326 	}
1327 	SCTP_INP_RUNLOCK(it->inp);
1328 no_stcb:
1329 	/* done with all assocs on this endpoint, move on to next endpoint */
1330 	it->done_current_ep = 0;
1331 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1332 		it->inp = NULL;
1333 	} else {
1334 		it->inp = LIST_NEXT(it->inp, sctp_list);
1335 	}
1336 	if (it->inp == NULL) {
1337 		goto done_with_iterator;
1338 	}
1339 	goto select_a_new_ep;
1340 }
1341 
1342 void
1343 sctp_iterator_worker(void)
1344 {
1345 	struct sctp_iterator *it, *nit;
1346 
1347 	/* This function is called with the WQ lock in place */
1348 
1349 	sctp_it_ctl.iterator_running = 1;
1350 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1351 		sctp_it_ctl.cur_it = it;
1352 		/* now lets work on this one */
1353 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1354 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1355 		CURVNET_SET(it->vn);
1356 		sctp_iterator_work(it);
1357 		sctp_it_ctl.cur_it = NULL;
1358 		CURVNET_RESTORE();
1359 		SCTP_IPI_ITERATOR_WQ_LOCK();
1360 		/* sa_ignore FREED_MEMORY */
1361 	}
1362 	sctp_it_ctl.iterator_running = 0;
1363 	return;
1364 }
1365 
1366 
1367 static void
1368 sctp_handle_addr_wq(void)
1369 {
1370 	/* deal with the ADDR wq from the rtsock calls */
1371 	struct sctp_laddr *wi, *nwi;
1372 	struct sctp_asconf_iterator *asc;
1373 
1374 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1375 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1376 	if (asc == NULL) {
1377 		/* Try later, no memory */
1378 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1379 		    (struct sctp_inpcb *)NULL,
1380 		    (struct sctp_tcb *)NULL,
1381 		    (struct sctp_nets *)NULL);
1382 		return;
1383 	}
1384 	LIST_INIT(&asc->list_of_work);
1385 	asc->cnt = 0;
1386 
1387 	SCTP_WQ_ADDR_LOCK();
1388 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1389 		LIST_REMOVE(wi, sctp_nxt_addr);
1390 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1391 		asc->cnt++;
1392 	}
1393 	SCTP_WQ_ADDR_UNLOCK();
1394 
1395 	if (asc->cnt == 0) {
1396 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1397 	} else {
1398 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1399 		    sctp_asconf_iterator_stcb,
1400 		    NULL,	/* No ep end for boundall */
1401 		    SCTP_PCB_FLAGS_BOUNDALL,
1402 		    SCTP_PCB_ANY_FEATURES,
1403 		    SCTP_ASOC_ANY_STATE,
1404 		    (void *)asc, 0,
1405 		    sctp_asconf_iterator_end, NULL, 0);
1406 	}
1407 }
1408 
1409 void
1410 sctp_timeout_handler(void *t)
1411 {
1412 	struct sctp_inpcb *inp;
1413 	struct sctp_tcb *stcb;
1414 	struct sctp_nets *net;
1415 	struct sctp_timer *tmr;
1416 
1417 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1418 	struct socket *so;
1419 
1420 #endif
1421 	int did_output, type;
1422 
1423 	tmr = (struct sctp_timer *)t;
1424 	inp = (struct sctp_inpcb *)tmr->ep;
1425 	stcb = (struct sctp_tcb *)tmr->tcb;
1426 	net = (struct sctp_nets *)tmr->net;
1427 	CURVNET_SET((struct vnet *)tmr->vnet);
1428 	did_output = 1;
1429 
1430 #ifdef SCTP_AUDITING_ENABLED
1431 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1432 	sctp_auditing(3, inp, stcb, net);
1433 #endif
1434 
1435 	/* sanity checks... */
1436 	if (tmr->self != (void *)tmr) {
1437 		/*
1438 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1439 		 * tmr);
1440 		 */
1441 		CURVNET_RESTORE();
1442 		return;
1443 	}
1444 	tmr->stopped_from = 0xa001;
1445 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1446 		/*
1447 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1448 		 * tmr->type);
1449 		 */
1450 		CURVNET_RESTORE();
1451 		return;
1452 	}
1453 	tmr->stopped_from = 0xa002;
1454 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1455 		CURVNET_RESTORE();
1456 		return;
1457 	}
1458 	/* if this is an iterator timeout, get the struct and clear inp */
1459 	tmr->stopped_from = 0xa003;
1460 	type = tmr->type;
1461 	if (inp) {
1462 		SCTP_INP_INCR_REF(inp);
1463 		if ((inp->sctp_socket == NULL) &&
1464 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1465 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1466 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1467 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1468 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1469 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1470 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1471 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1472 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1473 		    ) {
1474 			SCTP_INP_DECR_REF(inp);
1475 			CURVNET_RESTORE();
1476 			return;
1477 		}
1478 	}
1479 	tmr->stopped_from = 0xa004;
1480 	if (stcb) {
1481 		atomic_add_int(&stcb->asoc.refcnt, 1);
1482 		if (stcb->asoc.state == 0) {
1483 			atomic_add_int(&stcb->asoc.refcnt, -1);
1484 			if (inp) {
1485 				SCTP_INP_DECR_REF(inp);
1486 			}
1487 			CURVNET_RESTORE();
1488 			return;
1489 		}
1490 	}
1491 	tmr->stopped_from = 0xa005;
1492 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1493 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1494 		if (inp) {
1495 			SCTP_INP_DECR_REF(inp);
1496 		}
1497 		if (stcb) {
1498 			atomic_add_int(&stcb->asoc.refcnt, -1);
1499 		}
1500 		CURVNET_RESTORE();
1501 		return;
1502 	}
1503 	tmr->stopped_from = 0xa006;
1504 
1505 	if (stcb) {
1506 		SCTP_TCB_LOCK(stcb);
1507 		atomic_add_int(&stcb->asoc.refcnt, -1);
1508 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1509 		    ((stcb->asoc.state == 0) ||
1510 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1511 			SCTP_TCB_UNLOCK(stcb);
1512 			if (inp) {
1513 				SCTP_INP_DECR_REF(inp);
1514 			}
1515 			CURVNET_RESTORE();
1516 			return;
1517 		}
1518 	}
1519 	/* record in stopped what t-o occured */
1520 	tmr->stopped_from = tmr->type;
1521 
1522 	/* mark as being serviced now */
1523 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1524 		/*
1525 		 * Callout has been rescheduled.
1526 		 */
1527 		goto get_out;
1528 	}
1529 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1530 		/*
1531 		 * Not active, so no action.
1532 		 */
1533 		goto get_out;
1534 	}
1535 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1536 
1537 	/* call the handler for the appropriate timer type */
1538 	switch (tmr->type) {
1539 	case SCTP_TIMER_TYPE_ZERO_COPY:
1540 		if (inp == NULL) {
1541 			break;
1542 		}
1543 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1544 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1545 		}
1546 		break;
1547 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1548 		if (inp == NULL) {
1549 			break;
1550 		}
1551 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1552 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1553 		}
1554 		break;
1555 	case SCTP_TIMER_TYPE_ADDR_WQ:
1556 		sctp_handle_addr_wq();
1557 		break;
1558 	case SCTP_TIMER_TYPE_SEND:
1559 		if ((stcb == NULL) || (inp == NULL)) {
1560 			break;
1561 		}
1562 		SCTP_STAT_INCR(sctps_timodata);
1563 		stcb->asoc.timodata++;
1564 		stcb->asoc.num_send_timers_up--;
1565 		if (stcb->asoc.num_send_timers_up < 0) {
1566 			stcb->asoc.num_send_timers_up = 0;
1567 		}
1568 		SCTP_TCB_LOCK_ASSERT(stcb);
1569 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1570 			/* no need to unlock on tcb its gone */
1571 
1572 			goto out_decr;
1573 		}
1574 		SCTP_TCB_LOCK_ASSERT(stcb);
1575 #ifdef SCTP_AUDITING_ENABLED
1576 		sctp_auditing(4, inp, stcb, net);
1577 #endif
1578 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1579 		if ((stcb->asoc.num_send_timers_up == 0) &&
1580 		    (stcb->asoc.sent_queue_cnt > 0)) {
1581 			struct sctp_tmit_chunk *chk;
1582 
1583 			/*
1584 			 * safeguard. If there on some on the sent queue
1585 			 * somewhere but no timers running something is
1586 			 * wrong... so we start a timer on the first chunk
1587 			 * on the send queue on whatever net it is sent to.
1588 			 */
1589 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1590 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1591 			    chk->whoTo);
1592 		}
1593 		break;
1594 	case SCTP_TIMER_TYPE_INIT:
1595 		if ((stcb == NULL) || (inp == NULL)) {
1596 			break;
1597 		}
1598 		SCTP_STAT_INCR(sctps_timoinit);
1599 		stcb->asoc.timoinit++;
1600 		if (sctp_t1init_timer(inp, stcb, net)) {
1601 			/* no need to unlock on tcb its gone */
1602 			goto out_decr;
1603 		}
1604 		/* We do output but not here */
1605 		did_output = 0;
1606 		break;
1607 	case SCTP_TIMER_TYPE_RECV:
1608 		if ((stcb == NULL) || (inp == NULL)) {
1609 			break;
1610 		}
1611 		SCTP_STAT_INCR(sctps_timosack);
1612 		stcb->asoc.timosack++;
1613 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1614 #ifdef SCTP_AUDITING_ENABLED
1615 		sctp_auditing(4, inp, stcb, net);
1616 #endif
1617 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1618 		break;
1619 	case SCTP_TIMER_TYPE_SHUTDOWN:
1620 		if ((stcb == NULL) || (inp == NULL)) {
1621 			break;
1622 		}
1623 		if (sctp_shutdown_timer(inp, stcb, net)) {
1624 			/* no need to unlock on tcb its gone */
1625 			goto out_decr;
1626 		}
1627 		SCTP_STAT_INCR(sctps_timoshutdown);
1628 		stcb->asoc.timoshutdown++;
1629 #ifdef SCTP_AUDITING_ENABLED
1630 		sctp_auditing(4, inp, stcb, net);
1631 #endif
1632 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1633 		break;
1634 	case SCTP_TIMER_TYPE_HEARTBEAT:
1635 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1636 			break;
1637 		}
1638 		SCTP_STAT_INCR(sctps_timoheartbeat);
1639 		stcb->asoc.timoheartbeat++;
1640 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1641 			/* no need to unlock on tcb its gone */
1642 			goto out_decr;
1643 		}
1644 #ifdef SCTP_AUDITING_ENABLED
1645 		sctp_auditing(4, inp, stcb, net);
1646 #endif
1647 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1648 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1649 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1650 		}
1651 		break;
1652 	case SCTP_TIMER_TYPE_COOKIE:
1653 		if ((stcb == NULL) || (inp == NULL)) {
1654 			break;
1655 		}
1656 		if (sctp_cookie_timer(inp, stcb, net)) {
1657 			/* no need to unlock on tcb its gone */
1658 			goto out_decr;
1659 		}
1660 		SCTP_STAT_INCR(sctps_timocookie);
1661 		stcb->asoc.timocookie++;
1662 #ifdef SCTP_AUDITING_ENABLED
1663 		sctp_auditing(4, inp, stcb, net);
1664 #endif
1665 		/*
1666 		 * We consider T3 and Cookie timer pretty much the same with
1667 		 * respect to where from in chunk_output.
1668 		 */
1669 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1670 		break;
1671 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1672 		{
1673 			struct timeval tv;
1674 			int i, secret;
1675 
1676 			if (inp == NULL) {
1677 				break;
1678 			}
1679 			SCTP_STAT_INCR(sctps_timosecret);
1680 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1681 			SCTP_INP_WLOCK(inp);
1682 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1683 			inp->sctp_ep.last_secret_number =
1684 			    inp->sctp_ep.current_secret_number;
1685 			inp->sctp_ep.current_secret_number++;
1686 			if (inp->sctp_ep.current_secret_number >=
1687 			    SCTP_HOW_MANY_SECRETS) {
1688 				inp->sctp_ep.current_secret_number = 0;
1689 			}
1690 			secret = (int)inp->sctp_ep.current_secret_number;
1691 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1692 				inp->sctp_ep.secret_key[secret][i] =
1693 				    sctp_select_initial_TSN(&inp->sctp_ep);
1694 			}
1695 			SCTP_INP_WUNLOCK(inp);
1696 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1697 		}
1698 		did_output = 0;
1699 		break;
1700 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1701 		if ((stcb == NULL) || (inp == NULL)) {
1702 			break;
1703 		}
1704 		SCTP_STAT_INCR(sctps_timopathmtu);
1705 		sctp_pathmtu_timer(inp, stcb, net);
1706 		did_output = 0;
1707 		break;
1708 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1709 		if ((stcb == NULL) || (inp == NULL)) {
1710 			break;
1711 		}
1712 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1713 			/* no need to unlock on tcb its gone */
1714 			goto out_decr;
1715 		}
1716 		SCTP_STAT_INCR(sctps_timoshutdownack);
1717 		stcb->asoc.timoshutdownack++;
1718 #ifdef SCTP_AUDITING_ENABLED
1719 		sctp_auditing(4, inp, stcb, net);
1720 #endif
1721 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1722 		break;
1723 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1724 		if ((stcb == NULL) || (inp == NULL)) {
1725 			break;
1726 		}
1727 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1728 		sctp_abort_an_association(inp, stcb,
1729 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1730 		/* no need to unlock on tcb its gone */
1731 		goto out_decr;
1732 
1733 	case SCTP_TIMER_TYPE_STRRESET:
1734 		if ((stcb == NULL) || (inp == NULL)) {
1735 			break;
1736 		}
1737 		if (sctp_strreset_timer(inp, stcb, net)) {
1738 			/* no need to unlock on tcb its gone */
1739 			goto out_decr;
1740 		}
1741 		SCTP_STAT_INCR(sctps_timostrmrst);
1742 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1743 		break;
1744 	case SCTP_TIMER_TYPE_ASCONF:
1745 		if ((stcb == NULL) || (inp == NULL)) {
1746 			break;
1747 		}
1748 		if (sctp_asconf_timer(inp, stcb, net)) {
1749 			/* no need to unlock on tcb its gone */
1750 			goto out_decr;
1751 		}
1752 		SCTP_STAT_INCR(sctps_timoasconf);
1753 #ifdef SCTP_AUDITING_ENABLED
1754 		sctp_auditing(4, inp, stcb, net);
1755 #endif
1756 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1757 		break;
1758 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1759 		if ((stcb == NULL) || (inp == NULL)) {
1760 			break;
1761 		}
1762 		sctp_delete_prim_timer(inp, stcb, net);
1763 		SCTP_STAT_INCR(sctps_timodelprim);
1764 		break;
1765 
1766 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1767 		if ((stcb == NULL) || (inp == NULL)) {
1768 			break;
1769 		}
1770 		SCTP_STAT_INCR(sctps_timoautoclose);
1771 		sctp_autoclose_timer(inp, stcb, net);
1772 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1773 		did_output = 0;
1774 		break;
1775 	case SCTP_TIMER_TYPE_ASOCKILL:
1776 		if ((stcb == NULL) || (inp == NULL)) {
1777 			break;
1778 		}
1779 		SCTP_STAT_INCR(sctps_timoassockill);
1780 		/* Can we free it yet? */
1781 		SCTP_INP_DECR_REF(inp);
1782 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1783 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1784 		so = SCTP_INP_SO(inp);
1785 		atomic_add_int(&stcb->asoc.refcnt, 1);
1786 		SCTP_TCB_UNLOCK(stcb);
1787 		SCTP_SOCKET_LOCK(so, 1);
1788 		SCTP_TCB_LOCK(stcb);
1789 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1790 #endif
1791 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1792 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1793 		SCTP_SOCKET_UNLOCK(so, 1);
1794 #endif
1795 		/*
1796 		 * free asoc, always unlocks (or destroy's) so prevent
1797 		 * duplicate unlock or unlock of a free mtx :-0
1798 		 */
1799 		stcb = NULL;
1800 		goto out_no_decr;
1801 	case SCTP_TIMER_TYPE_INPKILL:
1802 		SCTP_STAT_INCR(sctps_timoinpkill);
1803 		if (inp == NULL) {
1804 			break;
1805 		}
1806 		/*
1807 		 * special case, take away our increment since WE are the
1808 		 * killer
1809 		 */
1810 		SCTP_INP_DECR_REF(inp);
1811 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1812 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1813 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1814 		inp = NULL;
1815 		goto out_no_decr;
1816 	default:
1817 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1818 		    tmr->type);
1819 		break;
1820 	}
1821 #ifdef SCTP_AUDITING_ENABLED
1822 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1823 	if (inp)
1824 		sctp_auditing(5, inp, stcb, net);
1825 #endif
1826 	if ((did_output) && stcb) {
1827 		/*
1828 		 * Now we need to clean up the control chunk chain if an
1829 		 * ECNE is on it. It must be marked as UNSENT again so next
1830 		 * call will continue to send it until such time that we get
1831 		 * a CWR, to remove it. It is, however, less likely that we
1832 		 * will find a ecn echo on the chain though.
1833 		 */
1834 		sctp_fix_ecn_echo(&stcb->asoc);
1835 	}
1836 get_out:
1837 	if (stcb) {
1838 		SCTP_TCB_UNLOCK(stcb);
1839 	}
1840 out_decr:
1841 	if (inp) {
1842 		SCTP_INP_DECR_REF(inp);
1843 	}
1844 out_no_decr:
1845 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1846 	    type);
1847 	CURVNET_RESTORE();
1848 }
1849 
1850 void
1851 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1852     struct sctp_nets *net)
1853 {
1854 	uint32_t to_ticks;
1855 	struct sctp_timer *tmr;
1856 
1857 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1858 		return;
1859 
1860 	tmr = NULL;
1861 	if (stcb) {
1862 		SCTP_TCB_LOCK_ASSERT(stcb);
1863 	}
1864 	switch (t_type) {
1865 	case SCTP_TIMER_TYPE_ZERO_COPY:
1866 		tmr = &inp->sctp_ep.zero_copy_timer;
1867 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1868 		break;
1869 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1870 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1871 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1872 		break;
1873 	case SCTP_TIMER_TYPE_ADDR_WQ:
1874 		/* Only 1 tick away :-) */
1875 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1876 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1877 		break;
1878 	case SCTP_TIMER_TYPE_SEND:
1879 		/* Here we use the RTO timer */
1880 		{
1881 			int rto_val;
1882 
1883 			if ((stcb == NULL) || (net == NULL)) {
1884 				return;
1885 			}
1886 			tmr = &net->rxt_timer;
1887 			if (net->RTO == 0) {
1888 				rto_val = stcb->asoc.initial_rto;
1889 			} else {
1890 				rto_val = net->RTO;
1891 			}
1892 			to_ticks = MSEC_TO_TICKS(rto_val);
1893 		}
1894 		break;
1895 	case SCTP_TIMER_TYPE_INIT:
1896 		/*
1897 		 * Here we use the INIT timer default usually about 1
1898 		 * minute.
1899 		 */
1900 		if ((stcb == NULL) || (net == NULL)) {
1901 			return;
1902 		}
1903 		tmr = &net->rxt_timer;
1904 		if (net->RTO == 0) {
1905 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1906 		} else {
1907 			to_ticks = MSEC_TO_TICKS(net->RTO);
1908 		}
1909 		break;
1910 	case SCTP_TIMER_TYPE_RECV:
1911 		/*
1912 		 * Here we use the Delayed-Ack timer value from the inp
1913 		 * ususually about 200ms.
1914 		 */
1915 		if (stcb == NULL) {
1916 			return;
1917 		}
1918 		tmr = &stcb->asoc.dack_timer;
1919 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1920 		break;
1921 	case SCTP_TIMER_TYPE_SHUTDOWN:
1922 		/* Here we use the RTO of the destination. */
1923 		if ((stcb == NULL) || (net == NULL)) {
1924 			return;
1925 		}
1926 		if (net->RTO == 0) {
1927 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1928 		} else {
1929 			to_ticks = MSEC_TO_TICKS(net->RTO);
1930 		}
1931 		tmr = &net->rxt_timer;
1932 		break;
1933 	case SCTP_TIMER_TYPE_HEARTBEAT:
1934 		/*
1935 		 * the net is used here so that we can add in the RTO. Even
1936 		 * though we use a different timer. We also add the HB timer
1937 		 * PLUS a random jitter.
1938 		 */
1939 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1940 			return;
1941 		} else {
1942 			uint32_t rndval;
1943 			uint32_t jitter;
1944 
1945 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1946 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1947 				return;
1948 			}
1949 			if (net->RTO == 0) {
1950 				to_ticks = stcb->asoc.initial_rto;
1951 			} else {
1952 				to_ticks = net->RTO;
1953 			}
1954 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1955 			jitter = rndval % to_ticks;
1956 			if (jitter >= (to_ticks >> 1)) {
1957 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1958 			} else {
1959 				to_ticks = to_ticks - jitter;
1960 			}
1961 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1962 			    !(net->dest_state & SCTP_ADDR_PF)) {
1963 				to_ticks += net->heart_beat_delay;
1964 			}
1965 			/*
1966 			 * Now we must convert the to_ticks that are now in
1967 			 * ms to ticks.
1968 			 */
1969 			to_ticks = MSEC_TO_TICKS(to_ticks);
1970 			tmr = &net->hb_timer;
1971 		}
1972 		break;
1973 	case SCTP_TIMER_TYPE_COOKIE:
1974 		/*
1975 		 * Here we can use the RTO timer from the network since one
1976 		 * RTT was compelete. If a retran happened then we will be
1977 		 * using the RTO initial value.
1978 		 */
1979 		if ((stcb == NULL) || (net == NULL)) {
1980 			return;
1981 		}
1982 		if (net->RTO == 0) {
1983 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1984 		} else {
1985 			to_ticks = MSEC_TO_TICKS(net->RTO);
1986 		}
1987 		tmr = &net->rxt_timer;
1988 		break;
1989 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1990 		/*
1991 		 * nothing needed but the endpoint here ususually about 60
1992 		 * minutes.
1993 		 */
1994 		if (inp == NULL) {
1995 			return;
1996 		}
1997 		tmr = &inp->sctp_ep.signature_change;
1998 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1999 		break;
2000 	case SCTP_TIMER_TYPE_ASOCKILL:
2001 		if (stcb == NULL) {
2002 			return;
2003 		}
2004 		tmr = &stcb->asoc.strreset_timer;
2005 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2006 		break;
2007 	case SCTP_TIMER_TYPE_INPKILL:
2008 		/*
2009 		 * The inp is setup to die. We re-use the signature_chage
2010 		 * timer since that has stopped and we are in the GONE
2011 		 * state.
2012 		 */
2013 		if (inp == NULL) {
2014 			return;
2015 		}
2016 		tmr = &inp->sctp_ep.signature_change;
2017 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2018 		break;
2019 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2020 		/*
2021 		 * Here we use the value found in the EP for PMTU ususually
2022 		 * about 10 minutes.
2023 		 */
2024 		if ((stcb == NULL) || (inp == NULL)) {
2025 			return;
2026 		}
2027 		if (net == NULL) {
2028 			return;
2029 		}
2030 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2031 			return;
2032 		}
2033 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2034 		tmr = &net->pmtu_timer;
2035 		break;
2036 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2037 		/* Here we use the RTO of the destination */
2038 		if ((stcb == NULL) || (net == NULL)) {
2039 			return;
2040 		}
2041 		if (net->RTO == 0) {
2042 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2043 		} else {
2044 			to_ticks = MSEC_TO_TICKS(net->RTO);
2045 		}
2046 		tmr = &net->rxt_timer;
2047 		break;
2048 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2049 		/*
2050 		 * Here we use the endpoints shutdown guard timer usually
2051 		 * about 3 minutes.
2052 		 */
2053 		if ((inp == NULL) || (stcb == NULL)) {
2054 			return;
2055 		}
2056 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2057 		tmr = &stcb->asoc.shut_guard_timer;
2058 		break;
2059 	case SCTP_TIMER_TYPE_STRRESET:
2060 		/*
2061 		 * Here the timer comes from the stcb but its value is from
2062 		 * the net's RTO.
2063 		 */
2064 		if ((stcb == NULL) || (net == NULL)) {
2065 			return;
2066 		}
2067 		if (net->RTO == 0) {
2068 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2069 		} else {
2070 			to_ticks = MSEC_TO_TICKS(net->RTO);
2071 		}
2072 		tmr = &stcb->asoc.strreset_timer;
2073 		break;
2074 	case SCTP_TIMER_TYPE_ASCONF:
2075 		/*
2076 		 * Here the timer comes from the stcb but its value is from
2077 		 * the net's RTO.
2078 		 */
2079 		if ((stcb == NULL) || (net == NULL)) {
2080 			return;
2081 		}
2082 		if (net->RTO == 0) {
2083 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2084 		} else {
2085 			to_ticks = MSEC_TO_TICKS(net->RTO);
2086 		}
2087 		tmr = &stcb->asoc.asconf_timer;
2088 		break;
2089 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2090 		if ((stcb == NULL) || (net != NULL)) {
2091 			return;
2092 		}
2093 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2094 		tmr = &stcb->asoc.delete_prim_timer;
2095 		break;
2096 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2097 		if (stcb == NULL) {
2098 			return;
2099 		}
2100 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2101 			/*
2102 			 * Really an error since stcb is NOT set to
2103 			 * autoclose
2104 			 */
2105 			return;
2106 		}
2107 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2108 		tmr = &stcb->asoc.autoclose_timer;
2109 		break;
2110 	default:
2111 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2112 		    __FUNCTION__, t_type);
2113 		return;
2114 		break;
2115 	}
2116 	if ((to_ticks <= 0) || (tmr == NULL)) {
2117 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2118 		    __FUNCTION__, t_type, to_ticks, tmr);
2119 		return;
2120 	}
2121 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2122 		/*
2123 		 * we do NOT allow you to have it already running. if it is
2124 		 * we leave the current one up unchanged
2125 		 */
2126 		return;
2127 	}
2128 	/* At this point we can proceed */
2129 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2130 		stcb->asoc.num_send_timers_up++;
2131 	}
2132 	tmr->stopped_from = 0;
2133 	tmr->type = t_type;
2134 	tmr->ep = (void *)inp;
2135 	tmr->tcb = (void *)stcb;
2136 	tmr->net = (void *)net;
2137 	tmr->self = (void *)tmr;
2138 	tmr->vnet = (void *)curvnet;
2139 	tmr->ticks = sctp_get_tick_count();
2140 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2141 	return;
2142 }
2143 
2144 void
2145 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2146     struct sctp_nets *net, uint32_t from)
2147 {
2148 	struct sctp_timer *tmr;
2149 
2150 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2151 	    (inp == NULL))
2152 		return;
2153 
2154 	tmr = NULL;
2155 	if (stcb) {
2156 		SCTP_TCB_LOCK_ASSERT(stcb);
2157 	}
2158 	switch (t_type) {
2159 	case SCTP_TIMER_TYPE_ZERO_COPY:
2160 		tmr = &inp->sctp_ep.zero_copy_timer;
2161 		break;
2162 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2163 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2164 		break;
2165 	case SCTP_TIMER_TYPE_ADDR_WQ:
2166 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2167 		break;
2168 	case SCTP_TIMER_TYPE_SEND:
2169 		if ((stcb == NULL) || (net == NULL)) {
2170 			return;
2171 		}
2172 		tmr = &net->rxt_timer;
2173 		break;
2174 	case SCTP_TIMER_TYPE_INIT:
2175 		if ((stcb == NULL) || (net == NULL)) {
2176 			return;
2177 		}
2178 		tmr = &net->rxt_timer;
2179 		break;
2180 	case SCTP_TIMER_TYPE_RECV:
2181 		if (stcb == NULL) {
2182 			return;
2183 		}
2184 		tmr = &stcb->asoc.dack_timer;
2185 		break;
2186 	case SCTP_TIMER_TYPE_SHUTDOWN:
2187 		if ((stcb == NULL) || (net == NULL)) {
2188 			return;
2189 		}
2190 		tmr = &net->rxt_timer;
2191 		break;
2192 	case SCTP_TIMER_TYPE_HEARTBEAT:
2193 		if ((stcb == NULL) || (net == NULL)) {
2194 			return;
2195 		}
2196 		tmr = &net->hb_timer;
2197 		break;
2198 	case SCTP_TIMER_TYPE_COOKIE:
2199 		if ((stcb == NULL) || (net == NULL)) {
2200 			return;
2201 		}
2202 		tmr = &net->rxt_timer;
2203 		break;
2204 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2205 		/* nothing needed but the endpoint here */
2206 		tmr = &inp->sctp_ep.signature_change;
2207 		/*
2208 		 * We re-use the newcookie timer for the INP kill timer. We
2209 		 * must assure that we do not kill it by accident.
2210 		 */
2211 		break;
2212 	case SCTP_TIMER_TYPE_ASOCKILL:
2213 		/*
2214 		 * Stop the asoc kill timer.
2215 		 */
2216 		if (stcb == NULL) {
2217 			return;
2218 		}
2219 		tmr = &stcb->asoc.strreset_timer;
2220 		break;
2221 
2222 	case SCTP_TIMER_TYPE_INPKILL:
2223 		/*
2224 		 * The inp is setup to die. We re-use the signature_chage
2225 		 * timer since that has stopped and we are in the GONE
2226 		 * state.
2227 		 */
2228 		tmr = &inp->sctp_ep.signature_change;
2229 		break;
2230 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2231 		if ((stcb == NULL) || (net == NULL)) {
2232 			return;
2233 		}
2234 		tmr = &net->pmtu_timer;
2235 		break;
2236 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2237 		if ((stcb == NULL) || (net == NULL)) {
2238 			return;
2239 		}
2240 		tmr = &net->rxt_timer;
2241 		break;
2242 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2243 		if (stcb == NULL) {
2244 			return;
2245 		}
2246 		tmr = &stcb->asoc.shut_guard_timer;
2247 		break;
2248 	case SCTP_TIMER_TYPE_STRRESET:
2249 		if (stcb == NULL) {
2250 			return;
2251 		}
2252 		tmr = &stcb->asoc.strreset_timer;
2253 		break;
2254 	case SCTP_TIMER_TYPE_ASCONF:
2255 		if (stcb == NULL) {
2256 			return;
2257 		}
2258 		tmr = &stcb->asoc.asconf_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2261 		if (stcb == NULL) {
2262 			return;
2263 		}
2264 		tmr = &stcb->asoc.delete_prim_timer;
2265 		break;
2266 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2267 		if (stcb == NULL) {
2268 			return;
2269 		}
2270 		tmr = &stcb->asoc.autoclose_timer;
2271 		break;
2272 	default:
2273 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2274 		    __FUNCTION__, t_type);
2275 		break;
2276 	}
2277 	if (tmr == NULL) {
2278 		return;
2279 	}
2280 	if ((tmr->type != t_type) && tmr->type) {
2281 		/*
2282 		 * Ok we have a timer that is under joint use. Cookie timer
2283 		 * per chance with the SEND timer. We therefore are NOT
2284 		 * running the timer that the caller wants stopped.  So just
2285 		 * return.
2286 		 */
2287 		return;
2288 	}
2289 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2290 		stcb->asoc.num_send_timers_up--;
2291 		if (stcb->asoc.num_send_timers_up < 0) {
2292 			stcb->asoc.num_send_timers_up = 0;
2293 		}
2294 	}
2295 	tmr->self = NULL;
2296 	tmr->stopped_from = from;
2297 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2298 	return;
2299 }
2300 
2301 uint32_t
2302 sctp_calculate_len(struct mbuf *m)
2303 {
2304 	uint32_t tlen = 0;
2305 	struct mbuf *at;
2306 
2307 	at = m;
2308 	while (at) {
2309 		tlen += SCTP_BUF_LEN(at);
2310 		at = SCTP_BUF_NEXT(at);
2311 	}
2312 	return (tlen);
2313 }
2314 
2315 void
2316 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2317     struct sctp_association *asoc, uint32_t mtu)
2318 {
2319 	/*
2320 	 * Reset the P-MTU size on this association, this involves changing
2321 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2322 	 * allow the DF flag to be cleared.
2323 	 */
2324 	struct sctp_tmit_chunk *chk;
2325 	unsigned int eff_mtu, ovh;
2326 
2327 	asoc->smallest_mtu = mtu;
2328 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2329 		ovh = SCTP_MIN_OVERHEAD;
2330 	} else {
2331 		ovh = SCTP_MIN_V4_OVERHEAD;
2332 	}
2333 	eff_mtu = mtu - ovh;
2334 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2335 		if (chk->send_size > eff_mtu) {
2336 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2337 		}
2338 	}
2339 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2340 		if (chk->send_size > eff_mtu) {
2341 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2342 		}
2343 	}
2344 }
2345 
2346 
2347 /*
2348  * given an association and starting time of the current RTT period return
2349  * RTO in number of msecs net should point to the current network
2350  */
2351 
2352 uint32_t
2353 sctp_calculate_rto(struct sctp_tcb *stcb,
2354     struct sctp_association *asoc,
2355     struct sctp_nets *net,
2356     struct timeval *told,
2357     int safe, int rtt_from_sack)
2358 {
2359 	/*-
2360 	 * given an association and the starting time of the current RTT
2361 	 * period (in value1/value2) return RTO in number of msecs.
2362 	 */
2363 	int32_t rtt;		/* RTT in ms */
2364 	uint32_t new_rto;
2365 	int first_measure = 0;
2366 	struct timeval now, then, *old;
2367 
2368 	/* Copy it out for sparc64 */
2369 	if (safe == sctp_align_unsafe_makecopy) {
2370 		old = &then;
2371 		memcpy(&then, told, sizeof(struct timeval));
2372 	} else if (safe == sctp_align_safe_nocopy) {
2373 		old = told;
2374 	} else {
2375 		/* error */
2376 		SCTP_PRINTF("Huh, bad rto calc call\n");
2377 		return (0);
2378 	}
2379 	/************************/
2380 	/* 1. calculate new RTT */
2381 	/************************/
2382 	/* get the current time */
2383 	if (stcb->asoc.use_precise_time) {
2384 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2385 	} else {
2386 		(void)SCTP_GETTIME_TIMEVAL(&now);
2387 	}
2388 	timevalsub(&now, old);
2389 	/* store the current RTT in us */
2390 	net->rtt = (uint64_t) 10000000 *(uint64_t) now.tv_sec +
2391 	         (uint64_t) now.tv_usec;
2392 
2393 	/* computer rtt in ms */
2394 	rtt = net->rtt / 1000;
2395 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2396 		/*
2397 		 * Tell the CC module that a new update has just occurred
2398 		 * from a sack
2399 		 */
2400 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2401 	}
2402 	/*
2403 	 * Do we need to determine the lan? We do this only on sacks i.e.
2404 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2405 	 */
2406 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2407 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2408 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2409 			net->lan_type = SCTP_LAN_INTERNET;
2410 		} else {
2411 			net->lan_type = SCTP_LAN_LOCAL;
2412 		}
2413 	}
2414 	/***************************/
2415 	/* 2. update RTTVAR & SRTT */
2416 	/***************************/
2417 	/*-
2418 	 * Compute the scaled average lastsa and the
2419 	 * scaled variance lastsv as described in van Jacobson
2420 	 * Paper "Congestion Avoidance and Control", Annex A.
2421 	 *
2422 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2423 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2424 	 */
2425 	if (net->RTO_measured) {
2426 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2427 		net->lastsa += rtt;
2428 		if (rtt < 0) {
2429 			rtt = -rtt;
2430 		}
2431 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2432 		net->lastsv += rtt;
2433 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2434 			rto_logging(net, SCTP_LOG_RTTVAR);
2435 		}
2436 	} else {
2437 		/* First RTO measurment */
2438 		net->RTO_measured = 1;
2439 		first_measure = 1;
2440 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2441 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2442 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2443 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2444 		}
2445 	}
2446 	if (net->lastsv == 0) {
2447 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2448 	}
2449 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2450 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2451 	    (stcb->asoc.sat_network_lockout == 0)) {
2452 		stcb->asoc.sat_network = 1;
2453 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2454 		stcb->asoc.sat_network = 0;
2455 		stcb->asoc.sat_network_lockout = 1;
2456 	}
2457 	/* bound it, per C6/C7 in Section 5.3.1 */
2458 	if (new_rto < stcb->asoc.minrto) {
2459 		new_rto = stcb->asoc.minrto;
2460 	}
2461 	if (new_rto > stcb->asoc.maxrto) {
2462 		new_rto = stcb->asoc.maxrto;
2463 	}
2464 	/* we are now returning the RTO */
2465 	return (new_rto);
2466 }
2467 
2468 /*
2469  * return a pointer to a contiguous piece of data from the given mbuf chain
2470  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2471  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2472  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2473  */
2474 caddr_t
2475 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2476 {
2477 	uint32_t count;
2478 	uint8_t *ptr;
2479 
2480 	ptr = in_ptr;
2481 	if ((off < 0) || (len <= 0))
2482 		return (NULL);
2483 
2484 	/* find the desired start location */
2485 	while ((m != NULL) && (off > 0)) {
2486 		if (off < SCTP_BUF_LEN(m))
2487 			break;
2488 		off -= SCTP_BUF_LEN(m);
2489 		m = SCTP_BUF_NEXT(m);
2490 	}
2491 	if (m == NULL)
2492 		return (NULL);
2493 
2494 	/* is the current mbuf large enough (eg. contiguous)? */
2495 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2496 		return (mtod(m, caddr_t)+off);
2497 	} else {
2498 		/* else, it spans more than one mbuf, so save a temp copy... */
2499 		while ((m != NULL) && (len > 0)) {
2500 			count = min(SCTP_BUF_LEN(m) - off, len);
2501 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2502 			len -= count;
2503 			ptr += count;
2504 			off = 0;
2505 			m = SCTP_BUF_NEXT(m);
2506 		}
2507 		if ((m == NULL) && (len > 0))
2508 			return (NULL);
2509 		else
2510 			return ((caddr_t)in_ptr);
2511 	}
2512 }
2513 
2514 
2515 
2516 struct sctp_paramhdr *
2517 sctp_get_next_param(struct mbuf *m,
2518     int offset,
2519     struct sctp_paramhdr *pull,
2520     int pull_limit)
2521 {
2522 	/* This just provides a typed signature to Peter's Pull routine */
2523 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2524 	    (uint8_t *) pull));
2525 }
2526 
2527 
2528 int
2529 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2530 {
2531 	/*
2532 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2533 	 * padlen is > 3 this routine will fail.
2534 	 */
2535 	uint8_t *dp;
2536 	int i;
2537 
2538 	if (padlen > 3) {
2539 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2540 		return (ENOBUFS);
2541 	}
2542 	if (padlen <= M_TRAILINGSPACE(m)) {
2543 		/*
2544 		 * The easy way. We hope the majority of the time we hit
2545 		 * here :)
2546 		 */
2547 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2548 		SCTP_BUF_LEN(m) += padlen;
2549 	} else {
2550 		/* Hard way we must grow the mbuf */
2551 		struct mbuf *tmp;
2552 
2553 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2554 		if (tmp == NULL) {
2555 			/* Out of space GAK! we are in big trouble. */
2556 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2557 			return (ENOSPC);
2558 		}
2559 		/* setup and insert in middle */
2560 		SCTP_BUF_LEN(tmp) = padlen;
2561 		SCTP_BUF_NEXT(tmp) = NULL;
2562 		SCTP_BUF_NEXT(m) = tmp;
2563 		dp = mtod(tmp, uint8_t *);
2564 	}
2565 	/* zero out the pad */
2566 	for (i = 0; i < padlen; i++) {
2567 		*dp = 0;
2568 		dp++;
2569 	}
2570 	return (0);
2571 }
2572 
2573 int
2574 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2575 {
2576 	/* find the last mbuf in chain and pad it */
2577 	struct mbuf *m_at;
2578 
2579 	m_at = m;
2580 	if (last_mbuf) {
2581 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2582 	} else {
2583 		while (m_at) {
2584 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2585 				return (sctp_add_pad_tombuf(m_at, padval));
2586 			}
2587 			m_at = SCTP_BUF_NEXT(m_at);
2588 		}
2589 	}
2590 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2591 	return (EFAULT);
2592 }
2593 
2594 static void
2595 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2596     uint32_t error, int so_locked
2597 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2598     SCTP_UNUSED
2599 #endif
2600 )
2601 {
2602 	struct mbuf *m_notify;
2603 	struct sctp_assoc_change *sac;
2604 	struct sctp_queued_to_read *control;
2605 
2606 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2607 	struct socket *so;
2608 
2609 #endif
2610 
2611 	/*
2612 	 * For TCP model AND UDP connected sockets we will send an error up
2613 	 * when an ABORT comes in.
2614 	 */
2615 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2616 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2617 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2618 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2619 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2620 			stcb->sctp_socket->so_error = ECONNREFUSED;
2621 		} else {
2622 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2623 			stcb->sctp_socket->so_error = ECONNRESET;
2624 		}
2625 		/* Wake ANY sleepers */
2626 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2627 		so = SCTP_INP_SO(stcb->sctp_ep);
2628 		if (!so_locked) {
2629 			atomic_add_int(&stcb->asoc.refcnt, 1);
2630 			SCTP_TCB_UNLOCK(stcb);
2631 			SCTP_SOCKET_LOCK(so, 1);
2632 			SCTP_TCB_LOCK(stcb);
2633 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2634 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2635 				SCTP_SOCKET_UNLOCK(so, 1);
2636 				return;
2637 			}
2638 		}
2639 #endif
2640 		socantrcvmore(stcb->sctp_socket);
2641 		sorwakeup(stcb->sctp_socket);
2642 		sowwakeup(stcb->sctp_socket);
2643 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2644 		if (!so_locked) {
2645 			SCTP_SOCKET_UNLOCK(so, 1);
2646 		}
2647 #endif
2648 	}
2649 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2650 		/* event not enabled */
2651 		return;
2652 	}
2653 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2654 	if (m_notify == NULL)
2655 		/* no space left */
2656 		return;
2657 	SCTP_BUF_LEN(m_notify) = 0;
2658 
2659 	sac = mtod(m_notify, struct sctp_assoc_change *);
2660 	sac->sac_type = SCTP_ASSOC_CHANGE;
2661 	sac->sac_flags = 0;
2662 	sac->sac_length = sizeof(struct sctp_assoc_change);
2663 	sac->sac_state = event;
2664 	sac->sac_error = error;
2665 	/* XXX verify these stream counts */
2666 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2667 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2668 	sac->sac_assoc_id = sctp_get_associd(stcb);
2669 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2670 	SCTP_BUF_NEXT(m_notify) = NULL;
2671 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2672 	    0, 0, stcb->asoc.context, 0, 0, 0,
2673 	    m_notify);
2674 	if (control == NULL) {
2675 		/* no memory */
2676 		sctp_m_freem(m_notify);
2677 		return;
2678 	}
2679 	control->length = SCTP_BUF_LEN(m_notify);
2680 	/* not that we need this */
2681 	control->tail_mbuf = m_notify;
2682 	control->spec_flags = M_NOTIFICATION;
2683 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2684 	    control,
2685 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2686 	    so_locked);
2687 	if (event == SCTP_COMM_LOST) {
2688 		/* Wake up any sleeper */
2689 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2690 		so = SCTP_INP_SO(stcb->sctp_ep);
2691 		if (!so_locked) {
2692 			atomic_add_int(&stcb->asoc.refcnt, 1);
2693 			SCTP_TCB_UNLOCK(stcb);
2694 			SCTP_SOCKET_LOCK(so, 1);
2695 			SCTP_TCB_LOCK(stcb);
2696 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2697 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2698 				SCTP_SOCKET_UNLOCK(so, 1);
2699 				return;
2700 			}
2701 		}
2702 #endif
2703 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2704 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2705 		if (!so_locked) {
2706 			SCTP_SOCKET_UNLOCK(so, 1);
2707 		}
2708 #endif
2709 	}
2710 }
2711 
2712 static void
2713 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2714     struct sockaddr *sa, uint32_t error)
2715 {
2716 	struct mbuf *m_notify;
2717 	struct sctp_paddr_change *spc;
2718 	struct sctp_queued_to_read *control;
2719 
2720 	if ((stcb == NULL) ||
2721 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2722 		/* event not enabled */
2723 		return;
2724 	}
2725 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2726 	if (m_notify == NULL)
2727 		return;
2728 	SCTP_BUF_LEN(m_notify) = 0;
2729 	spc = mtod(m_notify, struct sctp_paddr_change *);
2730 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2731 	spc->spc_flags = 0;
2732 	spc->spc_length = sizeof(struct sctp_paddr_change);
2733 	switch (sa->sa_family) {
2734 #ifdef INET
2735 	case AF_INET:
2736 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2737 		break;
2738 #endif
2739 #ifdef INET6
2740 	case AF_INET6:
2741 		{
2742 			struct sockaddr_in6 *sin6;
2743 
2744 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2745 
2746 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2747 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2748 				if (sin6->sin6_scope_id == 0) {
2749 					/* recover scope_id for user */
2750 					(void)sa6_recoverscope(sin6);
2751 				} else {
2752 					/* clear embedded scope_id for user */
2753 					in6_clearscope(&sin6->sin6_addr);
2754 				}
2755 			}
2756 			break;
2757 		}
2758 #endif
2759 	default:
2760 		/* TSNH */
2761 		break;
2762 	}
2763 	spc->spc_state = state;
2764 	spc->spc_error = error;
2765 	spc->spc_assoc_id = sctp_get_associd(stcb);
2766 
2767 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2768 	SCTP_BUF_NEXT(m_notify) = NULL;
2769 
2770 	/* append to socket */
2771 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2772 	    0, 0, stcb->asoc.context, 0, 0, 0,
2773 	    m_notify);
2774 	if (control == NULL) {
2775 		/* no memory */
2776 		sctp_m_freem(m_notify);
2777 		return;
2778 	}
2779 	control->length = SCTP_BUF_LEN(m_notify);
2780 	control->spec_flags = M_NOTIFICATION;
2781 	/* not that we need this */
2782 	control->tail_mbuf = m_notify;
2783 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2784 	    control,
2785 	    &stcb->sctp_socket->so_rcv, 1,
2786 	    SCTP_READ_LOCK_NOT_HELD,
2787 	    SCTP_SO_NOT_LOCKED);
2788 }
2789 
2790 
2791 static void
2792 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2793     struct sctp_tmit_chunk *chk, int so_locked
2794 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2795     SCTP_UNUSED
2796 #endif
2797 )
2798 {
2799 	struct mbuf *m_notify;
2800 	struct sctp_send_failed *ssf;
2801 	struct sctp_queued_to_read *control;
2802 	int length;
2803 
2804 	if ((stcb == NULL) ||
2805 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2806 		/* event not enabled */
2807 		return;
2808 	}
2809 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2810 	if (m_notify == NULL)
2811 		/* no space left */
2812 		return;
2813 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2814 	length -= sizeof(struct sctp_data_chunk);
2815 	SCTP_BUF_LEN(m_notify) = 0;
2816 	ssf = mtod(m_notify, struct sctp_send_failed *);
2817 	ssf->ssf_type = SCTP_SEND_FAILED;
2818 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2819 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2820 	else
2821 		ssf->ssf_flags = SCTP_DATA_SENT;
2822 	ssf->ssf_length = length;
2823 	ssf->ssf_error = error;
2824 	/* not exactly what the user sent in, but should be close :) */
2825 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2826 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2827 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2828 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2829 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2830 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2831 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2832 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2833 
2834 	if (chk->data) {
2835 		/*
2836 		 * trim off the sctp chunk header(it should be there)
2837 		 */
2838 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2839 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2840 			sctp_mbuf_crush(chk->data);
2841 			chk->send_size -= sizeof(struct sctp_data_chunk);
2842 		}
2843 	}
2844 	SCTP_BUF_NEXT(m_notify) = chk->data;
2845 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2846 	/* Steal off the mbuf */
2847 	chk->data = NULL;
2848 	/*
2849 	 * For this case, we check the actual socket buffer, since the assoc
2850 	 * is going away we don't want to overfill the socket buffer for a
2851 	 * non-reader
2852 	 */
2853 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2854 		sctp_m_freem(m_notify);
2855 		return;
2856 	}
2857 	/* append to socket */
2858 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2859 	    0, 0, stcb->asoc.context, 0, 0, 0,
2860 	    m_notify);
2861 	if (control == NULL) {
2862 		/* no memory */
2863 		sctp_m_freem(m_notify);
2864 		return;
2865 	}
2866 	control->spec_flags = M_NOTIFICATION;
2867 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2868 	    control,
2869 	    &stcb->sctp_socket->so_rcv, 1,
2870 	    SCTP_READ_LOCK_NOT_HELD,
2871 	    so_locked);
2872 }
2873 
2874 
2875 static void
2876 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2877     struct sctp_stream_queue_pending *sp, int so_locked
2878 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2879     SCTP_UNUSED
2880 #endif
2881 )
2882 {
2883 	struct mbuf *m_notify;
2884 	struct sctp_send_failed *ssf;
2885 	struct sctp_queued_to_read *control;
2886 	int length;
2887 
2888 	if ((stcb == NULL) ||
2889 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2890 		/* event not enabled */
2891 		return;
2892 	}
2893 	length = sizeof(struct sctp_send_failed) + sp->length;
2894 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2895 	if (m_notify == NULL)
2896 		/* no space left */
2897 		return;
2898 	SCTP_BUF_LEN(m_notify) = 0;
2899 	ssf = mtod(m_notify, struct sctp_send_failed *);
2900 	ssf->ssf_type = SCTP_SEND_FAILED;
2901 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2902 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2903 	else
2904 		ssf->ssf_flags = SCTP_DATA_SENT;
2905 	ssf->ssf_length = length;
2906 	ssf->ssf_error = error;
2907 	/* not exactly what the user sent in, but should be close :) */
2908 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2909 	ssf->ssf_info.sinfo_stream = sp->stream;
2910 	ssf->ssf_info.sinfo_ssn = sp->strseq;
2911 	if (sp->some_taken) {
2912 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2913 	} else {
2914 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2915 	}
2916 	ssf->ssf_info.sinfo_ppid = sp->ppid;
2917 	ssf->ssf_info.sinfo_context = sp->context;
2918 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2919 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2920 	SCTP_BUF_NEXT(m_notify) = sp->data;
2921 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2922 
2923 	/* Steal off the mbuf */
2924 	sp->data = NULL;
2925 	/*
2926 	 * For this case, we check the actual socket buffer, since the assoc
2927 	 * is going away we don't want to overfill the socket buffer for a
2928 	 * non-reader
2929 	 */
2930 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2931 		sctp_m_freem(m_notify);
2932 		return;
2933 	}
2934 	/* append to socket */
2935 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2936 	    0, 0, stcb->asoc.context, 0, 0, 0,
2937 	    m_notify);
2938 	if (control == NULL) {
2939 		/* no memory */
2940 		sctp_m_freem(m_notify);
2941 		return;
2942 	}
2943 	control->spec_flags = M_NOTIFICATION;
2944 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2945 	    control,
2946 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
2947 }
2948 
2949 
2950 
2951 static void
2952 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
2953 {
2954 	struct mbuf *m_notify;
2955 	struct sctp_adaptation_event *sai;
2956 	struct sctp_queued_to_read *control;
2957 
2958 	if ((stcb == NULL) ||
2959 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
2960 		/* event not enabled */
2961 		return;
2962 	}
2963 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
2964 	if (m_notify == NULL)
2965 		/* no space left */
2966 		return;
2967 	SCTP_BUF_LEN(m_notify) = 0;
2968 	sai = mtod(m_notify, struct sctp_adaptation_event *);
2969 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
2970 	sai->sai_flags = 0;
2971 	sai->sai_length = sizeof(struct sctp_adaptation_event);
2972 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
2973 	sai->sai_assoc_id = sctp_get_associd(stcb);
2974 
2975 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
2976 	SCTP_BUF_NEXT(m_notify) = NULL;
2977 
2978 	/* append to socket */
2979 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2980 	    0, 0, stcb->asoc.context, 0, 0, 0,
2981 	    m_notify);
2982 	if (control == NULL) {
2983 		/* no memory */
2984 		sctp_m_freem(m_notify);
2985 		return;
2986 	}
2987 	control->length = SCTP_BUF_LEN(m_notify);
2988 	control->spec_flags = M_NOTIFICATION;
2989 	/* not that we need this */
2990 	control->tail_mbuf = m_notify;
2991 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2992 	    control,
2993 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2994 }
2995 
2996 /* This always must be called with the read-queue LOCKED in the INP */
2997 static void
2998 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
2999     uint32_t val, int so_locked
3000 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3001     SCTP_UNUSED
3002 #endif
3003 )
3004 {
3005 	struct mbuf *m_notify;
3006 	struct sctp_pdapi_event *pdapi;
3007 	struct sctp_queued_to_read *control;
3008 	struct sockbuf *sb;
3009 
3010 	if ((stcb == NULL) ||
3011 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3012 		/* event not enabled */
3013 		return;
3014 	}
3015 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3016 		return;
3017 	}
3018 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3019 	if (m_notify == NULL)
3020 		/* no space left */
3021 		return;
3022 	SCTP_BUF_LEN(m_notify) = 0;
3023 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3024 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3025 	pdapi->pdapi_flags = 0;
3026 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3027 	pdapi->pdapi_indication = error;
3028 	pdapi->pdapi_stream = (val >> 16);
3029 	pdapi->pdapi_seq = (val & 0x0000ffff);
3030 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3031 
3032 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3033 	SCTP_BUF_NEXT(m_notify) = NULL;
3034 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3035 	    0, 0, stcb->asoc.context, 0, 0, 0,
3036 	    m_notify);
3037 	if (control == NULL) {
3038 		/* no memory */
3039 		sctp_m_freem(m_notify);
3040 		return;
3041 	}
3042 	control->spec_flags = M_NOTIFICATION;
3043 	control->length = SCTP_BUF_LEN(m_notify);
3044 	/* not that we need this */
3045 	control->tail_mbuf = m_notify;
3046 	control->held_length = 0;
3047 	control->length = 0;
3048 	sb = &stcb->sctp_socket->so_rcv;
3049 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3050 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3051 	}
3052 	sctp_sballoc(stcb, sb, m_notify);
3053 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3054 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3055 	}
3056 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3057 	control->end_added = 1;
3058 	if (stcb->asoc.control_pdapi)
3059 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3060 	else {
3061 		/* we really should not see this case */
3062 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3063 	}
3064 	if (stcb->sctp_ep && stcb->sctp_socket) {
3065 		/* This should always be the case */
3066 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3067 		struct socket *so;
3068 
3069 		so = SCTP_INP_SO(stcb->sctp_ep);
3070 		if (!so_locked) {
3071 			atomic_add_int(&stcb->asoc.refcnt, 1);
3072 			SCTP_TCB_UNLOCK(stcb);
3073 			SCTP_SOCKET_LOCK(so, 1);
3074 			SCTP_TCB_LOCK(stcb);
3075 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3076 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3077 				SCTP_SOCKET_UNLOCK(so, 1);
3078 				return;
3079 			}
3080 		}
3081 #endif
3082 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3083 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3084 		if (!so_locked) {
3085 			SCTP_SOCKET_UNLOCK(so, 1);
3086 		}
3087 #endif
3088 	}
3089 }
3090 
3091 static void
3092 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3093 {
3094 	struct mbuf *m_notify;
3095 	struct sctp_shutdown_event *sse;
3096 	struct sctp_queued_to_read *control;
3097 
3098 	/*
3099 	 * For TCP model AND UDP connected sockets we will send an error up
3100 	 * when an SHUTDOWN completes
3101 	 */
3102 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3103 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3104 		/* mark socket closed for read/write and wakeup! */
3105 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3106 		struct socket *so;
3107 
3108 		so = SCTP_INP_SO(stcb->sctp_ep);
3109 		atomic_add_int(&stcb->asoc.refcnt, 1);
3110 		SCTP_TCB_UNLOCK(stcb);
3111 		SCTP_SOCKET_LOCK(so, 1);
3112 		SCTP_TCB_LOCK(stcb);
3113 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3114 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3115 			SCTP_SOCKET_UNLOCK(so, 1);
3116 			return;
3117 		}
3118 #endif
3119 		socantsendmore(stcb->sctp_socket);
3120 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3121 		SCTP_SOCKET_UNLOCK(so, 1);
3122 #endif
3123 	}
3124 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3125 		/* event not enabled */
3126 		return;
3127 	}
3128 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3129 	if (m_notify == NULL)
3130 		/* no space left */
3131 		return;
3132 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3133 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3134 	sse->sse_flags = 0;
3135 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3136 	sse->sse_assoc_id = sctp_get_associd(stcb);
3137 
3138 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3139 	SCTP_BUF_NEXT(m_notify) = NULL;
3140 
3141 	/* append to socket */
3142 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3143 	    0, 0, stcb->asoc.context, 0, 0, 0,
3144 	    m_notify);
3145 	if (control == NULL) {
3146 		/* no memory */
3147 		sctp_m_freem(m_notify);
3148 		return;
3149 	}
3150 	control->spec_flags = M_NOTIFICATION;
3151 	control->length = SCTP_BUF_LEN(m_notify);
3152 	/* not that we need this */
3153 	control->tail_mbuf = m_notify;
3154 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3155 	    control,
3156 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3157 }
3158 
3159 static void
3160 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3161     int so_locked
3162 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3163     SCTP_UNUSED
3164 #endif
3165 )
3166 {
3167 	struct mbuf *m_notify;
3168 	struct sctp_sender_dry_event *event;
3169 	struct sctp_queued_to_read *control;
3170 
3171 	if ((stcb == NULL) ||
3172 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3173 		/* event not enabled */
3174 		return;
3175 	}
3176 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3177 	if (m_notify == NULL) {
3178 		/* no space left */
3179 		return;
3180 	}
3181 	SCTP_BUF_LEN(m_notify) = 0;
3182 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3183 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3184 	event->sender_dry_flags = 0;
3185 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3186 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3187 
3188 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3189 	SCTP_BUF_NEXT(m_notify) = NULL;
3190 
3191 	/* append to socket */
3192 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3193 	    0, 0, stcb->asoc.context, 0, 0, 0,
3194 	    m_notify);
3195 	if (control == NULL) {
3196 		/* no memory */
3197 		sctp_m_freem(m_notify);
3198 		return;
3199 	}
3200 	control->length = SCTP_BUF_LEN(m_notify);
3201 	control->spec_flags = M_NOTIFICATION;
3202 	/* not that we need this */
3203 	control->tail_mbuf = m_notify;
3204 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3205 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3206 }
3207 
3208 
3209 void
3210 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3211 {
3212 	struct mbuf *m_notify;
3213 	struct sctp_queued_to_read *control;
3214 	struct sctp_stream_change_event *stradd;
3215 	int len;
3216 
3217 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3218 		/* event not enabled */
3219 		return;
3220 	}
3221 	if ((stcb->asoc.peer_req_out) && flag) {
3222 		/* Peer made the request, don't tell the local user */
3223 		stcb->asoc.peer_req_out = 0;
3224 		return;
3225 	}
3226 	stcb->asoc.peer_req_out = 0;
3227 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3228 	if (m_notify == NULL)
3229 		/* no space left */
3230 		return;
3231 	SCTP_BUF_LEN(m_notify) = 0;
3232 	len = sizeof(struct sctp_stream_change_event);
3233 	if (len > M_TRAILINGSPACE(m_notify)) {
3234 		/* never enough room */
3235 		sctp_m_freem(m_notify);
3236 		return;
3237 	}
3238 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3239 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3240 	stradd->strchange_flags = flag;
3241 	stradd->strchange_length = len;
3242 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3243 	stradd->strchange_instrms = numberin;
3244 	stradd->strchange_outstrms = numberout;
3245 	SCTP_BUF_LEN(m_notify) = len;
3246 	SCTP_BUF_NEXT(m_notify) = NULL;
3247 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3248 		/* no space */
3249 		sctp_m_freem(m_notify);
3250 		return;
3251 	}
3252 	/* append to socket */
3253 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3254 	    0, 0, stcb->asoc.context, 0, 0, 0,
3255 	    m_notify);
3256 	if (control == NULL) {
3257 		/* no memory */
3258 		sctp_m_freem(m_notify);
3259 		return;
3260 	}
3261 	control->spec_flags = M_NOTIFICATION;
3262 	control->length = SCTP_BUF_LEN(m_notify);
3263 	/* not that we need this */
3264 	control->tail_mbuf = m_notify;
3265 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3266 	    control,
3267 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3268 }
3269 
3270 void
3271 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3272 {
3273 	struct mbuf *m_notify;
3274 	struct sctp_queued_to_read *control;
3275 	struct sctp_assoc_reset_event *strasoc;
3276 	int len;
3277 
3278 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3279 		/* event not enabled */
3280 		return;
3281 	}
3282 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3283 	if (m_notify == NULL)
3284 		/* no space left */
3285 		return;
3286 	SCTP_BUF_LEN(m_notify) = 0;
3287 	len = sizeof(struct sctp_assoc_reset_event);
3288 	if (len > M_TRAILINGSPACE(m_notify)) {
3289 		/* never enough room */
3290 		sctp_m_freem(m_notify);
3291 		return;
3292 	}
3293 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3294 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3295 	strasoc->assocreset_flags = flag;
3296 	strasoc->assocreset_length = len;
3297 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3298 	strasoc->assocreset_local_tsn = sending_tsn;
3299 	strasoc->assocreset_remote_tsn = recv_tsn;
3300 	SCTP_BUF_LEN(m_notify) = len;
3301 	SCTP_BUF_NEXT(m_notify) = NULL;
3302 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3303 		/* no space */
3304 		sctp_m_freem(m_notify);
3305 		return;
3306 	}
3307 	/* append to socket */
3308 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3309 	    0, 0, stcb->asoc.context, 0, 0, 0,
3310 	    m_notify);
3311 	if (control == NULL) {
3312 		/* no memory */
3313 		sctp_m_freem(m_notify);
3314 		return;
3315 	}
3316 	control->spec_flags = M_NOTIFICATION;
3317 	control->length = SCTP_BUF_LEN(m_notify);
3318 	/* not that we need this */
3319 	control->tail_mbuf = m_notify;
3320 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3321 	    control,
3322 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3323 }
3324 
3325 
3326 
3327 static void
3328 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3329     int number_entries, uint16_t * list, int flag)
3330 {
3331 	struct mbuf *m_notify;
3332 	struct sctp_queued_to_read *control;
3333 	struct sctp_stream_reset_event *strreset;
3334 	int len;
3335 
3336 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3337 		/* event not enabled */
3338 		return;
3339 	}
3340 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3341 	if (m_notify == NULL)
3342 		/* no space left */
3343 		return;
3344 	SCTP_BUF_LEN(m_notify) = 0;
3345 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3346 	if (len > M_TRAILINGSPACE(m_notify)) {
3347 		/* never enough room */
3348 		sctp_m_freem(m_notify);
3349 		return;
3350 	}
3351 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3352 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3353 	strreset->strreset_flags = flag;
3354 	strreset->strreset_length = len;
3355 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3356 	if (number_entries) {
3357 		int i;
3358 
3359 		for (i = 0; i < number_entries; i++) {
3360 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3361 		}
3362 	}
3363 	SCTP_BUF_LEN(m_notify) = len;
3364 	SCTP_BUF_NEXT(m_notify) = NULL;
3365 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3366 		/* no space */
3367 		sctp_m_freem(m_notify);
3368 		return;
3369 	}
3370 	/* append to socket */
3371 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3372 	    0, 0, stcb->asoc.context, 0, 0, 0,
3373 	    m_notify);
3374 	if (control == NULL) {
3375 		/* no memory */
3376 		sctp_m_freem(m_notify);
3377 		return;
3378 	}
3379 	control->spec_flags = M_NOTIFICATION;
3380 	control->length = SCTP_BUF_LEN(m_notify);
3381 	/* not that we need this */
3382 	control->tail_mbuf = m_notify;
3383 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3384 	    control,
3385 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3386 }
3387 
3388 
3389 void
3390 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3391     uint32_t error, void *data, int so_locked
3392 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3393     SCTP_UNUSED
3394 #endif
3395 )
3396 {
3397 	if ((stcb == NULL) ||
3398 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3399 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3400 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3401 		/* If the socket is gone we are out of here */
3402 		return;
3403 	}
3404 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3405 		return;
3406 	}
3407 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3408 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3409 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3410 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3411 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3412 			/* Don't report these in front states */
3413 			return;
3414 		}
3415 	}
3416 	switch (notification) {
3417 	case SCTP_NOTIFY_ASSOC_UP:
3418 		if (stcb->asoc.assoc_up_sent == 0) {
3419 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, so_locked);
3420 			stcb->asoc.assoc_up_sent = 1;
3421 		}
3422 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3423 			sctp_notify_adaptation_layer(stcb);
3424 		}
3425 		if (stcb->asoc.peer_supports_auth == 0) {
3426 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3427 			    NULL, so_locked);
3428 		}
3429 		break;
3430 	case SCTP_NOTIFY_ASSOC_DOWN:
3431 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, so_locked);
3432 		break;
3433 	case SCTP_NOTIFY_INTERFACE_DOWN:
3434 		{
3435 			struct sctp_nets *net;
3436 
3437 			net = (struct sctp_nets *)data;
3438 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3439 			    (struct sockaddr *)&net->ro._l_addr, error);
3440 			break;
3441 		}
3442 	case SCTP_NOTIFY_INTERFACE_UP:
3443 		{
3444 			struct sctp_nets *net;
3445 
3446 			net = (struct sctp_nets *)data;
3447 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3448 			    (struct sockaddr *)&net->ro._l_addr, error);
3449 			break;
3450 		}
3451 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3452 		{
3453 			struct sctp_nets *net;
3454 
3455 			net = (struct sctp_nets *)data;
3456 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3457 			    (struct sockaddr *)&net->ro._l_addr, error);
3458 			break;
3459 		}
3460 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3461 		sctp_notify_send_failed2(stcb, error,
3462 		    (struct sctp_stream_queue_pending *)data, so_locked);
3463 		break;
3464 	case SCTP_NOTIFY_DG_FAIL:
3465 		sctp_notify_send_failed(stcb, error,
3466 		    (struct sctp_tmit_chunk *)data, so_locked);
3467 		break;
3468 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3469 		{
3470 			uint32_t val;
3471 
3472 			val = *((uint32_t *) data);
3473 
3474 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3475 			break;
3476 		}
3477 	case SCTP_NOTIFY_STRDATA_ERR:
3478 		break;
3479 	case SCTP_NOTIFY_ASSOC_ABORTED:
3480 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3481 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3482 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, so_locked);
3483 		} else {
3484 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, so_locked);
3485 		}
3486 		break;
3487 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3488 		break;
3489 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3490 		break;
3491 	case SCTP_NOTIFY_ASSOC_RESTART:
3492 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, so_locked);
3493 		if (stcb->asoc.peer_supports_auth == 0) {
3494 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3495 			    NULL, so_locked);
3496 		}
3497 		break;
3498 	case SCTP_NOTIFY_HB_RESP:
3499 		break;
3500 	case SCTP_NOTIFY_STR_RESET_SEND:
3501 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3502 		break;
3503 	case SCTP_NOTIFY_STR_RESET_RECV:
3504 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING);
3505 		break;
3506 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3507 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3508 		    (SCTP_STREAM_RESET_OUTGOING | SCTP_STREAM_RESET_INCOMING));
3509 		break;
3510 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3511 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3512 		    (SCTP_STREAM_RESET_OUTGOING | SCTP_STREAM_RESET_INCOMING));
3513 		break;
3514 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3515 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3516 		    error);
3517 		break;
3518 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3519 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3520 		    error);
3521 		break;
3522 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3523 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3524 		    error);
3525 		break;
3526 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3527 		break;
3528 	case SCTP_NOTIFY_ASCONF_FAILED:
3529 		break;
3530 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3531 		sctp_notify_shutdown_event(stcb);
3532 		break;
3533 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3534 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3535 		    (uint16_t) (uintptr_t) data,
3536 		    so_locked);
3537 		break;
3538 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3539 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3540 		    (uint16_t) (uintptr_t) data,
3541 		    so_locked);
3542 		break;
3543 	case SCTP_NOTIFY_NO_PEER_AUTH:
3544 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3545 		    (uint16_t) (uintptr_t) data,
3546 		    so_locked);
3547 		break;
3548 	case SCTP_NOTIFY_SENDER_DRY:
3549 		sctp_notify_sender_dry_event(stcb, so_locked);
3550 		break;
3551 	default:
3552 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3553 		    __FUNCTION__, notification, notification);
3554 		break;
3555 	}			/* end switch */
3556 }
3557 
3558 void
3559 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3560 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3561     SCTP_UNUSED
3562 #endif
3563 )
3564 {
3565 	struct sctp_association *asoc;
3566 	struct sctp_stream_out *outs;
3567 	struct sctp_tmit_chunk *chk, *nchk;
3568 	struct sctp_stream_queue_pending *sp, *nsp;
3569 	int i;
3570 
3571 	if (stcb == NULL) {
3572 		return;
3573 	}
3574 	asoc = &stcb->asoc;
3575 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3576 		/* already being freed */
3577 		return;
3578 	}
3579 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3580 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3581 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3582 		return;
3583 	}
3584 	/* now through all the gunk freeing chunks */
3585 	if (holds_lock == 0) {
3586 		SCTP_TCB_SEND_LOCK(stcb);
3587 	}
3588 	/* sent queue SHOULD be empty */
3589 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3590 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3591 		asoc->sent_queue_cnt--;
3592 		if (chk->data != NULL) {
3593 			sctp_free_bufspace(stcb, asoc, chk, 1);
3594 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3595 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3596 			if (chk->data) {
3597 				sctp_m_freem(chk->data);
3598 				chk->data = NULL;
3599 			}
3600 		}
3601 		sctp_free_a_chunk(stcb, chk, so_locked);
3602 		/* sa_ignore FREED_MEMORY */
3603 	}
3604 	/* pending send queue SHOULD be empty */
3605 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3606 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3607 		asoc->send_queue_cnt--;
3608 		if (chk->data != NULL) {
3609 			sctp_free_bufspace(stcb, asoc, chk, 1);
3610 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3611 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3612 			if (chk->data) {
3613 				sctp_m_freem(chk->data);
3614 				chk->data = NULL;
3615 			}
3616 		}
3617 		sctp_free_a_chunk(stcb, chk, so_locked);
3618 		/* sa_ignore FREED_MEMORY */
3619 	}
3620 	for (i = 0; i < asoc->streamoutcnt; i++) {
3621 		/* For each stream */
3622 		outs = &asoc->strmout[i];
3623 		/* clean up any sends there */
3624 		asoc->locked_on_sending = NULL;
3625 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3626 			asoc->stream_queue_cnt--;
3627 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3628 			sctp_free_spbufspace(stcb, asoc, sp);
3629 			if (sp->data) {
3630 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3631 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3632 				if (sp->data) {
3633 					sctp_m_freem(sp->data);
3634 					sp->data = NULL;
3635 				}
3636 			}
3637 			if (sp->net) {
3638 				sctp_free_remote_addr(sp->net);
3639 				sp->net = NULL;
3640 			}
3641 			/* Free the chunk */
3642 			sctp_free_a_strmoq(stcb, sp, so_locked);
3643 			/* sa_ignore FREED_MEMORY */
3644 		}
3645 	}
3646 
3647 	if (holds_lock == 0) {
3648 		SCTP_TCB_SEND_UNLOCK(stcb);
3649 	}
3650 }
3651 
3652 void
3653 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3654 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3655     SCTP_UNUSED
3656 #endif
3657 )
3658 {
3659 	if (stcb == NULL) {
3660 		return;
3661 	}
3662 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3663 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3664 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3665 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3666 	}
3667 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3668 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3669 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3670 		return;
3671 	}
3672 	/* Tell them we lost the asoc */
3673 	sctp_report_all_outbound(stcb, 1, so_locked);
3674 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3675 }
3676 
3677 void
3678 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3679     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3680     uint32_t vrf_id, uint16_t port)
3681 {
3682 	uint32_t vtag;
3683 
3684 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3685 	struct socket *so;
3686 
3687 #endif
3688 
3689 	vtag = 0;
3690 	if (stcb != NULL) {
3691 		/* We have a TCB to abort, send notification too */
3692 		vtag = stcb->asoc.peer_vtag;
3693 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3694 		/* get the assoc vrf id and table id */
3695 		vrf_id = stcb->asoc.vrf_id;
3696 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3697 	}
3698 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3699 	if (stcb != NULL) {
3700 		/* Ok, now lets free it */
3701 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3702 		so = SCTP_INP_SO(inp);
3703 		atomic_add_int(&stcb->asoc.refcnt, 1);
3704 		SCTP_TCB_UNLOCK(stcb);
3705 		SCTP_SOCKET_LOCK(so, 1);
3706 		SCTP_TCB_LOCK(stcb);
3707 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3708 #endif
3709 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3710 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3711 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3712 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3713 		}
3714 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3715 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3716 		SCTP_SOCKET_UNLOCK(so, 1);
3717 #endif
3718 	}
3719 }
3720 
3721 #ifdef SCTP_ASOCLOG_OF_TSNS
3722 void
3723 sctp_print_out_track_log(struct sctp_tcb *stcb)
3724 {
3725 #ifdef NOSIY_PRINTS
3726 	int i;
3727 
3728 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3729 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3730 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3731 		SCTP_PRINTF("None rcvd\n");
3732 		goto none_in;
3733 	}
3734 	if (stcb->asoc.tsn_in_wrapped) {
3735 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3736 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3737 			    stcb->asoc.in_tsnlog[i].tsn,
3738 			    stcb->asoc.in_tsnlog[i].strm,
3739 			    stcb->asoc.in_tsnlog[i].seq,
3740 			    stcb->asoc.in_tsnlog[i].flgs,
3741 			    stcb->asoc.in_tsnlog[i].sz);
3742 		}
3743 	}
3744 	if (stcb->asoc.tsn_in_at) {
3745 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3746 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3747 			    stcb->asoc.in_tsnlog[i].tsn,
3748 			    stcb->asoc.in_tsnlog[i].strm,
3749 			    stcb->asoc.in_tsnlog[i].seq,
3750 			    stcb->asoc.in_tsnlog[i].flgs,
3751 			    stcb->asoc.in_tsnlog[i].sz);
3752 		}
3753 	}
3754 none_in:
3755 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3756 	if ((stcb->asoc.tsn_out_at == 0) &&
3757 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3758 		SCTP_PRINTF("None sent\n");
3759 	}
3760 	if (stcb->asoc.tsn_out_wrapped) {
3761 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3762 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3763 			    stcb->asoc.out_tsnlog[i].tsn,
3764 			    stcb->asoc.out_tsnlog[i].strm,
3765 			    stcb->asoc.out_tsnlog[i].seq,
3766 			    stcb->asoc.out_tsnlog[i].flgs,
3767 			    stcb->asoc.out_tsnlog[i].sz);
3768 		}
3769 	}
3770 	if (stcb->asoc.tsn_out_at) {
3771 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3772 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3773 			    stcb->asoc.out_tsnlog[i].tsn,
3774 			    stcb->asoc.out_tsnlog[i].strm,
3775 			    stcb->asoc.out_tsnlog[i].seq,
3776 			    stcb->asoc.out_tsnlog[i].flgs,
3777 			    stcb->asoc.out_tsnlog[i].sz);
3778 		}
3779 	}
3780 #endif
3781 }
3782 
3783 #endif
3784 
3785 void
3786 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3787     int error, struct mbuf *op_err,
3788     int so_locked
3789 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3790     SCTP_UNUSED
3791 #endif
3792 )
3793 {
3794 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3795 	struct socket *so;
3796 
3797 #endif
3798 
3799 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3800 	so = SCTP_INP_SO(inp);
3801 #endif
3802 	if (stcb == NULL) {
3803 		/* Got to have a TCB */
3804 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3805 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3806 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3807 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3808 			}
3809 		}
3810 		return;
3811 	} else {
3812 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3813 	}
3814 	/* notify the ulp */
3815 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3816 		sctp_abort_notification(stcb, error, so_locked);
3817 	/* notify the peer */
3818 #if defined(SCTP_PANIC_ON_ABORT)
3819 	panic("aborting an association");
3820 #endif
3821 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3822 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3823 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3824 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3825 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3826 	}
3827 	/* now free the asoc */
3828 #ifdef SCTP_ASOCLOG_OF_TSNS
3829 	sctp_print_out_track_log(stcb);
3830 #endif
3831 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3832 	if (!so_locked) {
3833 		atomic_add_int(&stcb->asoc.refcnt, 1);
3834 		SCTP_TCB_UNLOCK(stcb);
3835 		SCTP_SOCKET_LOCK(so, 1);
3836 		SCTP_TCB_LOCK(stcb);
3837 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3838 	}
3839 #endif
3840 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3841 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3842 	if (!so_locked) {
3843 		SCTP_SOCKET_UNLOCK(so, 1);
3844 	}
3845 #endif
3846 }
3847 
3848 void
3849 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3850     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3851 {
3852 	struct sctp_chunkhdr *ch, chunk_buf;
3853 	unsigned int chk_length;
3854 	int contains_init_chunk;
3855 
3856 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3857 	/* Generate a TO address for future reference */
3858 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3859 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3860 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3861 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3862 		}
3863 	}
3864 	contains_init_chunk = 0;
3865 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3866 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3867 	while (ch != NULL) {
3868 		chk_length = ntohs(ch->chunk_length);
3869 		if (chk_length < sizeof(*ch)) {
3870 			/* break to abort land */
3871 			break;
3872 		}
3873 		switch (ch->chunk_type) {
3874 		case SCTP_INIT:
3875 			contains_init_chunk = 1;
3876 			break;
3877 		case SCTP_COOKIE_ECHO:
3878 			/* We hit here only if the assoc is being freed */
3879 			return;
3880 		case SCTP_PACKET_DROPPED:
3881 			/* we don't respond to pkt-dropped */
3882 			return;
3883 		case SCTP_ABORT_ASSOCIATION:
3884 			/* we don't respond with an ABORT to an ABORT */
3885 			return;
3886 		case SCTP_SHUTDOWN_COMPLETE:
3887 			/*
3888 			 * we ignore it since we are not waiting for it and
3889 			 * peer is gone
3890 			 */
3891 			return;
3892 		case SCTP_SHUTDOWN_ACK:
3893 			sctp_send_shutdown_complete2(m, sh, vrf_id, port);
3894 			return;
3895 		default:
3896 			break;
3897 		}
3898 		offset += SCTP_SIZE32(chk_length);
3899 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3900 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3901 	}
3902 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
3903 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
3904 	    (contains_init_chunk == 0))) {
3905 		sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
3906 	}
3907 }
3908 
3909 /*
3910  * check the inbound datagram to make sure there is not an abort inside it,
3911  * if there is return 1, else return 0.
3912  */
3913 int
3914 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3915 {
3916 	struct sctp_chunkhdr *ch;
3917 	struct sctp_init_chunk *init_chk, chunk_buf;
3918 	int offset;
3919 	unsigned int chk_length;
3920 
3921 	offset = iphlen + sizeof(struct sctphdr);
3922 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3923 	    (uint8_t *) & chunk_buf);
3924 	while (ch != NULL) {
3925 		chk_length = ntohs(ch->chunk_length);
3926 		if (chk_length < sizeof(*ch)) {
3927 			/* packet is probably corrupt */
3928 			break;
3929 		}
3930 		/* we seem to be ok, is it an abort? */
3931 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3932 			/* yep, tell them */
3933 			return (1);
3934 		}
3935 		if (ch->chunk_type == SCTP_INITIATION) {
3936 			/* need to update the Vtag */
3937 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
3938 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
3939 			if (init_chk != NULL) {
3940 				*vtagfill = ntohl(init_chk->init.initiate_tag);
3941 			}
3942 		}
3943 		/* Nope, move to the next chunk */
3944 		offset += SCTP_SIZE32(chk_length);
3945 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3946 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3947 	}
3948 	return (0);
3949 }
3950 
3951 /*
3952  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
3953  * set (i.e. it's 0) so, create this function to compare link local scopes
3954  */
3955 #ifdef INET6
3956 uint32_t
3957 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
3958 {
3959 	struct sockaddr_in6 a, b;
3960 
3961 	/* save copies */
3962 	a = *addr1;
3963 	b = *addr2;
3964 
3965 	if (a.sin6_scope_id == 0)
3966 		if (sa6_recoverscope(&a)) {
3967 			/* can't get scope, so can't match */
3968 			return (0);
3969 		}
3970 	if (b.sin6_scope_id == 0)
3971 		if (sa6_recoverscope(&b)) {
3972 			/* can't get scope, so can't match */
3973 			return (0);
3974 		}
3975 	if (a.sin6_scope_id != b.sin6_scope_id)
3976 		return (0);
3977 
3978 	return (1);
3979 }
3980 
3981 /*
3982  * returns a sockaddr_in6 with embedded scope recovered and removed
3983  */
3984 struct sockaddr_in6 *
3985 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
3986 {
3987 	/* check and strip embedded scope junk */
3988 	if (addr->sin6_family == AF_INET6) {
3989 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
3990 			if (addr->sin6_scope_id == 0) {
3991 				*store = *addr;
3992 				if (!sa6_recoverscope(store)) {
3993 					/* use the recovered scope */
3994 					addr = store;
3995 				}
3996 			} else {
3997 				/* else, return the original "to" addr */
3998 				in6_clearscope(&addr->sin6_addr);
3999 			}
4000 		}
4001 	}
4002 	return (addr);
4003 }
4004 
4005 #endif
4006 
4007 /*
4008  * are the two addresses the same?  currently a "scopeless" check returns: 1
4009  * if same, 0 if not
4010  */
4011 int
4012 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4013 {
4014 
4015 	/* must be valid */
4016 	if (sa1 == NULL || sa2 == NULL)
4017 		return (0);
4018 
4019 	/* must be the same family */
4020 	if (sa1->sa_family != sa2->sa_family)
4021 		return (0);
4022 
4023 	switch (sa1->sa_family) {
4024 #ifdef INET6
4025 	case AF_INET6:
4026 		{
4027 			/* IPv6 addresses */
4028 			struct sockaddr_in6 *sin6_1, *sin6_2;
4029 
4030 			sin6_1 = (struct sockaddr_in6 *)sa1;
4031 			sin6_2 = (struct sockaddr_in6 *)sa2;
4032 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4033 			    sin6_2));
4034 		}
4035 #endif
4036 #ifdef INET
4037 	case AF_INET:
4038 		{
4039 			/* IPv4 addresses */
4040 			struct sockaddr_in *sin_1, *sin_2;
4041 
4042 			sin_1 = (struct sockaddr_in *)sa1;
4043 			sin_2 = (struct sockaddr_in *)sa2;
4044 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4045 		}
4046 #endif
4047 	default:
4048 		/* we don't do these... */
4049 		return (0);
4050 	}
4051 }
4052 
4053 void
4054 sctp_print_address(struct sockaddr *sa)
4055 {
4056 #ifdef INET6
4057 	char ip6buf[INET6_ADDRSTRLEN];
4058 
4059 	ip6buf[0] = 0;
4060 #endif
4061 
4062 	switch (sa->sa_family) {
4063 #ifdef INET6
4064 	case AF_INET6:
4065 		{
4066 			struct sockaddr_in6 *sin6;
4067 
4068 			sin6 = (struct sockaddr_in6 *)sa;
4069 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4070 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4071 			    ntohs(sin6->sin6_port),
4072 			    sin6->sin6_scope_id);
4073 			break;
4074 		}
4075 #endif
4076 #ifdef INET
4077 	case AF_INET:
4078 		{
4079 			struct sockaddr_in *sin;
4080 			unsigned char *p;
4081 
4082 			sin = (struct sockaddr_in *)sa;
4083 			p = (unsigned char *)&sin->sin_addr;
4084 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4085 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4086 			break;
4087 		}
4088 #endif
4089 	default:
4090 		SCTP_PRINTF("?\n");
4091 		break;
4092 	}
4093 }
4094 
4095 void
4096 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4097 {
4098 	switch (iph->ip_v) {
4099 #ifdef INET
4100 	case IPVERSION:
4101 		{
4102 			struct sockaddr_in lsa, fsa;
4103 
4104 			bzero(&lsa, sizeof(lsa));
4105 			lsa.sin_len = sizeof(lsa);
4106 			lsa.sin_family = AF_INET;
4107 			lsa.sin_addr = iph->ip_src;
4108 			lsa.sin_port = sh->src_port;
4109 			bzero(&fsa, sizeof(fsa));
4110 			fsa.sin_len = sizeof(fsa);
4111 			fsa.sin_family = AF_INET;
4112 			fsa.sin_addr = iph->ip_dst;
4113 			fsa.sin_port = sh->dest_port;
4114 			SCTP_PRINTF("src: ");
4115 			sctp_print_address((struct sockaddr *)&lsa);
4116 			SCTP_PRINTF("dest: ");
4117 			sctp_print_address((struct sockaddr *)&fsa);
4118 			break;
4119 		}
4120 #endif
4121 #ifdef INET6
4122 	case IPV6_VERSION >> 4:
4123 		{
4124 			struct ip6_hdr *ip6;
4125 			struct sockaddr_in6 lsa6, fsa6;
4126 
4127 			ip6 = (struct ip6_hdr *)iph;
4128 			bzero(&lsa6, sizeof(lsa6));
4129 			lsa6.sin6_len = sizeof(lsa6);
4130 			lsa6.sin6_family = AF_INET6;
4131 			lsa6.sin6_addr = ip6->ip6_src;
4132 			lsa6.sin6_port = sh->src_port;
4133 			bzero(&fsa6, sizeof(fsa6));
4134 			fsa6.sin6_len = sizeof(fsa6);
4135 			fsa6.sin6_family = AF_INET6;
4136 			fsa6.sin6_addr = ip6->ip6_dst;
4137 			fsa6.sin6_port = sh->dest_port;
4138 			SCTP_PRINTF("src: ");
4139 			sctp_print_address((struct sockaddr *)&lsa6);
4140 			SCTP_PRINTF("dest: ");
4141 			sctp_print_address((struct sockaddr *)&fsa6);
4142 			break;
4143 		}
4144 #endif
4145 	default:
4146 		/* TSNH */
4147 		break;
4148 	}
4149 }
4150 
4151 void
4152 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4153     struct sctp_inpcb *new_inp,
4154     struct sctp_tcb *stcb,
4155     int waitflags)
4156 {
4157 	/*
4158 	 * go through our old INP and pull off any control structures that
4159 	 * belong to stcb and move then to the new inp.
4160 	 */
4161 	struct socket *old_so, *new_so;
4162 	struct sctp_queued_to_read *control, *nctl;
4163 	struct sctp_readhead tmp_queue;
4164 	struct mbuf *m;
4165 	int error = 0;
4166 
4167 	old_so = old_inp->sctp_socket;
4168 	new_so = new_inp->sctp_socket;
4169 	TAILQ_INIT(&tmp_queue);
4170 	error = sblock(&old_so->so_rcv, waitflags);
4171 	if (error) {
4172 		/*
4173 		 * Gak, can't get sblock, we have a problem. data will be
4174 		 * left stranded.. and we don't dare look at it since the
4175 		 * other thread may be reading something. Oh well, its a
4176 		 * screwed up app that does a peeloff OR a accept while
4177 		 * reading from the main socket... actually its only the
4178 		 * peeloff() case, since I think read will fail on a
4179 		 * listening socket..
4180 		 */
4181 		return;
4182 	}
4183 	/* lock the socket buffers */
4184 	SCTP_INP_READ_LOCK(old_inp);
4185 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4186 		/* Pull off all for out target stcb */
4187 		if (control->stcb == stcb) {
4188 			/* remove it we want it */
4189 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4190 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4191 			m = control->data;
4192 			while (m) {
4193 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4194 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4195 				}
4196 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4197 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4198 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4199 				}
4200 				m = SCTP_BUF_NEXT(m);
4201 			}
4202 		}
4203 	}
4204 	SCTP_INP_READ_UNLOCK(old_inp);
4205 	/* Remove the sb-lock on the old socket */
4206 
4207 	sbunlock(&old_so->so_rcv);
4208 	/* Now we move them over to the new socket buffer */
4209 	SCTP_INP_READ_LOCK(new_inp);
4210 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4211 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4212 		m = control->data;
4213 		while (m) {
4214 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4215 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4216 			}
4217 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4218 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4219 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4220 			}
4221 			m = SCTP_BUF_NEXT(m);
4222 		}
4223 	}
4224 	SCTP_INP_READ_UNLOCK(new_inp);
4225 }
4226 
4227 void
4228 sctp_add_to_readq(struct sctp_inpcb *inp,
4229     struct sctp_tcb *stcb,
4230     struct sctp_queued_to_read *control,
4231     struct sockbuf *sb,
4232     int end,
4233     int inp_read_lock_held,
4234     int so_locked
4235 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4236     SCTP_UNUSED
4237 #endif
4238 )
4239 {
4240 	/*
4241 	 * Here we must place the control on the end of the socket read
4242 	 * queue AND increment sb_cc so that select will work properly on
4243 	 * read.
4244 	 */
4245 	struct mbuf *m, *prev = NULL;
4246 
4247 	if (inp == NULL) {
4248 		/* Gak, TSNH!! */
4249 #ifdef INVARIANTS
4250 		panic("Gak, inp NULL on add_to_readq");
4251 #endif
4252 		return;
4253 	}
4254 	if (inp_read_lock_held == 0)
4255 		SCTP_INP_READ_LOCK(inp);
4256 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4257 		sctp_free_remote_addr(control->whoFrom);
4258 		if (control->data) {
4259 			sctp_m_freem(control->data);
4260 			control->data = NULL;
4261 		}
4262 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4263 		if (inp_read_lock_held == 0)
4264 			SCTP_INP_READ_UNLOCK(inp);
4265 		return;
4266 	}
4267 	if (!(control->spec_flags & M_NOTIFICATION)) {
4268 		atomic_add_int(&inp->total_recvs, 1);
4269 		if (!control->do_not_ref_stcb) {
4270 			atomic_add_int(&stcb->total_recvs, 1);
4271 		}
4272 	}
4273 	m = control->data;
4274 	control->held_length = 0;
4275 	control->length = 0;
4276 	while (m) {
4277 		if (SCTP_BUF_LEN(m) == 0) {
4278 			/* Skip mbufs with NO length */
4279 			if (prev == NULL) {
4280 				/* First one */
4281 				control->data = sctp_m_free(m);
4282 				m = control->data;
4283 			} else {
4284 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4285 				m = SCTP_BUF_NEXT(prev);
4286 			}
4287 			if (m == NULL) {
4288 				control->tail_mbuf = prev;
4289 			}
4290 			continue;
4291 		}
4292 		prev = m;
4293 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4294 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4295 		}
4296 		sctp_sballoc(stcb, sb, m);
4297 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4298 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4299 		}
4300 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4301 		m = SCTP_BUF_NEXT(m);
4302 	}
4303 	if (prev != NULL) {
4304 		control->tail_mbuf = prev;
4305 	} else {
4306 		/* Everything got collapsed out?? */
4307 		sctp_free_remote_addr(control->whoFrom);
4308 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4309 		if (inp_read_lock_held == 0)
4310 			SCTP_INP_READ_UNLOCK(inp);
4311 		return;
4312 	}
4313 	if (end) {
4314 		control->end_added = 1;
4315 	}
4316 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4317 	if (inp_read_lock_held == 0)
4318 		SCTP_INP_READ_UNLOCK(inp);
4319 	if (inp && inp->sctp_socket) {
4320 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4321 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4322 		} else {
4323 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4324 			struct socket *so;
4325 
4326 			so = SCTP_INP_SO(inp);
4327 			if (!so_locked) {
4328 				if (stcb) {
4329 					atomic_add_int(&stcb->asoc.refcnt, 1);
4330 					SCTP_TCB_UNLOCK(stcb);
4331 				}
4332 				SCTP_SOCKET_LOCK(so, 1);
4333 				if (stcb) {
4334 					SCTP_TCB_LOCK(stcb);
4335 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4336 				}
4337 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4338 					SCTP_SOCKET_UNLOCK(so, 1);
4339 					return;
4340 				}
4341 			}
4342 #endif
4343 			sctp_sorwakeup(inp, inp->sctp_socket);
4344 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4345 			if (!so_locked) {
4346 				SCTP_SOCKET_UNLOCK(so, 1);
4347 			}
4348 #endif
4349 		}
4350 	}
4351 }
4352 
4353 
4354 int
4355 sctp_append_to_readq(struct sctp_inpcb *inp,
4356     struct sctp_tcb *stcb,
4357     struct sctp_queued_to_read *control,
4358     struct mbuf *m,
4359     int end,
4360     int ctls_cumack,
4361     struct sockbuf *sb)
4362 {
4363 	/*
4364 	 * A partial delivery API event is underway. OR we are appending on
4365 	 * the reassembly queue.
4366 	 *
4367 	 * If PDAPI this means we need to add m to the end of the data.
4368 	 * Increase the length in the control AND increment the sb_cc.
4369 	 * Otherwise sb is NULL and all we need to do is put it at the end
4370 	 * of the mbuf chain.
4371 	 */
4372 	int len = 0;
4373 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4374 
4375 	if (inp) {
4376 		SCTP_INP_READ_LOCK(inp);
4377 	}
4378 	if (control == NULL) {
4379 get_out:
4380 		if (inp) {
4381 			SCTP_INP_READ_UNLOCK(inp);
4382 		}
4383 		return (-1);
4384 	}
4385 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4386 		SCTP_INP_READ_UNLOCK(inp);
4387 		return (0);
4388 	}
4389 	if (control->end_added) {
4390 		/* huh this one is complete? */
4391 		goto get_out;
4392 	}
4393 	mm = m;
4394 	if (mm == NULL) {
4395 		goto get_out;
4396 	}
4397 	while (mm) {
4398 		if (SCTP_BUF_LEN(mm) == 0) {
4399 			/* Skip mbufs with NO lenght */
4400 			if (prev == NULL) {
4401 				/* First one */
4402 				m = sctp_m_free(mm);
4403 				mm = m;
4404 			} else {
4405 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4406 				mm = SCTP_BUF_NEXT(prev);
4407 			}
4408 			continue;
4409 		}
4410 		prev = mm;
4411 		len += SCTP_BUF_LEN(mm);
4412 		if (sb) {
4413 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4414 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4415 			}
4416 			sctp_sballoc(stcb, sb, mm);
4417 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4418 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4419 			}
4420 		}
4421 		mm = SCTP_BUF_NEXT(mm);
4422 	}
4423 	if (prev) {
4424 		tail = prev;
4425 	} else {
4426 		/* Really there should always be a prev */
4427 		if (m == NULL) {
4428 			/* Huh nothing left? */
4429 #ifdef INVARIANTS
4430 			panic("Nothing left to add?");
4431 #else
4432 			goto get_out;
4433 #endif
4434 		}
4435 		tail = m;
4436 	}
4437 	if (control->tail_mbuf) {
4438 		/* append */
4439 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4440 		control->tail_mbuf = tail;
4441 	} else {
4442 		/* nothing there */
4443 #ifdef INVARIANTS
4444 		if (control->data != NULL) {
4445 			panic("This should NOT happen");
4446 		}
4447 #endif
4448 		control->data = m;
4449 		control->tail_mbuf = tail;
4450 	}
4451 	atomic_add_int(&control->length, len);
4452 	if (end) {
4453 		/* message is complete */
4454 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4455 			stcb->asoc.control_pdapi = NULL;
4456 		}
4457 		control->held_length = 0;
4458 		control->end_added = 1;
4459 	}
4460 	if (stcb == NULL) {
4461 		control->do_not_ref_stcb = 1;
4462 	}
4463 	/*
4464 	 * When we are appending in partial delivery, the cum-ack is used
4465 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4466 	 * is populated in the outbound sinfo structure from the true cumack
4467 	 * if the association exists...
4468 	 */
4469 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4470 	if (inp) {
4471 		SCTP_INP_READ_UNLOCK(inp);
4472 	}
4473 	if (inp && inp->sctp_socket) {
4474 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4475 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4476 		} else {
4477 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4478 			struct socket *so;
4479 
4480 			so = SCTP_INP_SO(inp);
4481 			if (stcb) {
4482 				atomic_add_int(&stcb->asoc.refcnt, 1);
4483 				SCTP_TCB_UNLOCK(stcb);
4484 			}
4485 			SCTP_SOCKET_LOCK(so, 1);
4486 			if (stcb) {
4487 				SCTP_TCB_LOCK(stcb);
4488 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4489 			}
4490 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4491 				SCTP_SOCKET_UNLOCK(so, 1);
4492 				return (0);
4493 			}
4494 #endif
4495 			sctp_sorwakeup(inp, inp->sctp_socket);
4496 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4497 			SCTP_SOCKET_UNLOCK(so, 1);
4498 #endif
4499 		}
4500 	}
4501 	return (0);
4502 }
4503 
4504 
4505 
4506 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4507  *************ALTERNATE ROUTING CODE
4508  */
4509 
4510 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4511  *************ALTERNATE ROUTING CODE
4512  */
4513 
4514 struct mbuf *
4515 sctp_generate_invmanparam(int err)
4516 {
4517 	/* Return a MBUF with a invalid mandatory parameter */
4518 	struct mbuf *m;
4519 
4520 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4521 	if (m) {
4522 		struct sctp_paramhdr *ph;
4523 
4524 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4525 		ph = mtod(m, struct sctp_paramhdr *);
4526 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4527 		ph->param_type = htons(err);
4528 	}
4529 	return (m);
4530 }
4531 
4532 #ifdef SCTP_MBCNT_LOGGING
4533 void
4534 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4535     struct sctp_tmit_chunk *tp1, int chk_cnt)
4536 {
4537 	if (tp1->data == NULL) {
4538 		return;
4539 	}
4540 	asoc->chunks_on_out_queue -= chk_cnt;
4541 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4542 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4543 		    asoc->total_output_queue_size,
4544 		    tp1->book_size,
4545 		    0,
4546 		    tp1->mbcnt);
4547 	}
4548 	if (asoc->total_output_queue_size >= tp1->book_size) {
4549 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4550 	} else {
4551 		asoc->total_output_queue_size = 0;
4552 	}
4553 
4554 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4555 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4556 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4557 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4558 		} else {
4559 			stcb->sctp_socket->so_snd.sb_cc = 0;
4560 
4561 		}
4562 	}
4563 }
4564 
4565 #endif
4566 
4567 int
4568 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4569     int reason, int so_locked
4570 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4571     SCTP_UNUSED
4572 #endif
4573 )
4574 {
4575 	struct sctp_stream_out *strq;
4576 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4577 	struct sctp_stream_queue_pending *sp;
4578 	uint16_t stream = 0, seq = 0;
4579 	uint8_t foundeom = 0;
4580 	int ret_sz = 0;
4581 	int notdone;
4582 	int do_wakeup_routine = 0;
4583 
4584 	stream = tp1->rec.data.stream_number;
4585 	seq = tp1->rec.data.stream_seq;
4586 	do {
4587 		ret_sz += tp1->book_size;
4588 		if (tp1->data != NULL) {
4589 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4590 				sctp_flight_size_decrease(tp1);
4591 				sctp_total_flight_decrease(stcb, tp1);
4592 			}
4593 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4594 			stcb->asoc.peers_rwnd += tp1->send_size;
4595 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4596 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4597 			if (tp1->data) {
4598 				sctp_m_freem(tp1->data);
4599 				tp1->data = NULL;
4600 			}
4601 			do_wakeup_routine = 1;
4602 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4603 				stcb->asoc.sent_queue_cnt_removeable--;
4604 			}
4605 		}
4606 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4607 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4608 		    SCTP_DATA_NOT_FRAG) {
4609 			/* not frag'ed we ae done   */
4610 			notdone = 0;
4611 			foundeom = 1;
4612 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4613 			/* end of frag, we are done */
4614 			notdone = 0;
4615 			foundeom = 1;
4616 		} else {
4617 			/*
4618 			 * Its a begin or middle piece, we must mark all of
4619 			 * it
4620 			 */
4621 			notdone = 1;
4622 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4623 		}
4624 	} while (tp1 && notdone);
4625 	if (foundeom == 0) {
4626 		/*
4627 		 * The multi-part message was scattered across the send and
4628 		 * sent queue.
4629 		 */
4630 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4631 			if ((tp1->rec.data.stream_number != stream) ||
4632 			    (tp1->rec.data.stream_seq != seq)) {
4633 				break;
4634 			}
4635 			/*
4636 			 * save to chk in case we have some on stream out
4637 			 * queue. If so and we have an un-transmitted one we
4638 			 * don't have to fudge the TSN.
4639 			 */
4640 			chk = tp1;
4641 			ret_sz += tp1->book_size;
4642 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4643 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4644 			if (tp1->data) {
4645 				sctp_m_freem(tp1->data);
4646 				tp1->data = NULL;
4647 			}
4648 			/* No flight involved here book the size to 0 */
4649 			tp1->book_size = 0;
4650 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4651 				foundeom = 1;
4652 			}
4653 			do_wakeup_routine = 1;
4654 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4655 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4656 			/*
4657 			 * on to the sent queue so we can wait for it to be
4658 			 * passed by.
4659 			 */
4660 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4661 			    sctp_next);
4662 			stcb->asoc.send_queue_cnt--;
4663 			stcb->asoc.sent_queue_cnt++;
4664 		}
4665 	}
4666 	if (foundeom == 0) {
4667 		/*
4668 		 * Still no eom found. That means there is stuff left on the
4669 		 * stream out queue.. yuck.
4670 		 */
4671 		strq = &stcb->asoc.strmout[stream];
4672 		SCTP_TCB_SEND_LOCK(stcb);
4673 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4674 			/* FIXME: Shouldn't this be a serial number check? */
4675 			if (sp->strseq > seq) {
4676 				break;
4677 			}
4678 			/* Check if its our SEQ */
4679 			if (sp->strseq == seq) {
4680 				sp->discard_rest = 1;
4681 				/*
4682 				 * We may need to put a chunk on the queue
4683 				 * that holds the TSN that would have been
4684 				 * sent with the LAST bit.
4685 				 */
4686 				if (chk == NULL) {
4687 					/* Yep, we have to */
4688 					sctp_alloc_a_chunk(stcb, chk);
4689 					if (chk == NULL) {
4690 						/*
4691 						 * we are hosed. All we can
4692 						 * do is nothing.. which
4693 						 * will cause an abort if
4694 						 * the peer is paying
4695 						 * attention.
4696 						 */
4697 						goto oh_well;
4698 					}
4699 					memset(chk, 0, sizeof(*chk));
4700 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4701 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4702 					chk->asoc = &stcb->asoc;
4703 					chk->rec.data.stream_seq = sp->strseq;
4704 					chk->rec.data.stream_number = sp->stream;
4705 					chk->rec.data.payloadtype = sp->ppid;
4706 					chk->rec.data.context = sp->context;
4707 					chk->flags = sp->act_flags;
4708 					if (sp->net)
4709 						chk->whoTo = sp->net;
4710 					else
4711 						chk->whoTo = stcb->asoc.primary_destination;
4712 					atomic_add_int(&chk->whoTo->ref_count, 1);
4713 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4714 					stcb->asoc.pr_sctp_cnt++;
4715 					chk->pr_sctp_on = 1;
4716 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4717 					stcb->asoc.sent_queue_cnt++;
4718 					stcb->asoc.pr_sctp_cnt++;
4719 				} else {
4720 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4721 				}
4722 		oh_well:
4723 				if (sp->data) {
4724 					/*
4725 					 * Pull any data to free up the SB
4726 					 * and allow sender to "add more"
4727 					 * whilc we will throw away :-)
4728 					 */
4729 					sctp_free_spbufspace(stcb, &stcb->asoc,
4730 					    sp);
4731 					ret_sz += sp->length;
4732 					do_wakeup_routine = 1;
4733 					sp->some_taken = 1;
4734 					sctp_m_freem(sp->data);
4735 					sp->length = 0;
4736 					sp->data = NULL;
4737 					sp->tail_mbuf = NULL;
4738 				}
4739 				break;
4740 			}
4741 		}		/* End tailq_foreach */
4742 		SCTP_TCB_SEND_UNLOCK(stcb);
4743 	}
4744 	if (do_wakeup_routine) {
4745 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4746 		struct socket *so;
4747 
4748 		so = SCTP_INP_SO(stcb->sctp_ep);
4749 		if (!so_locked) {
4750 			atomic_add_int(&stcb->asoc.refcnt, 1);
4751 			SCTP_TCB_UNLOCK(stcb);
4752 			SCTP_SOCKET_LOCK(so, 1);
4753 			SCTP_TCB_LOCK(stcb);
4754 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4755 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4756 				/* assoc was freed while we were unlocked */
4757 				SCTP_SOCKET_UNLOCK(so, 1);
4758 				return (ret_sz);
4759 			}
4760 		}
4761 #endif
4762 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4763 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4764 		if (!so_locked) {
4765 			SCTP_SOCKET_UNLOCK(so, 1);
4766 		}
4767 #endif
4768 	}
4769 	return (ret_sz);
4770 }
4771 
4772 /*
4773  * checks to see if the given address, sa, is one that is currently known by
4774  * the kernel note: can't distinguish the same address on multiple interfaces
4775  * and doesn't handle multiple addresses with different zone/scope id's note:
4776  * ifa_ifwithaddr() compares the entire sockaddr struct
4777  */
4778 struct sctp_ifa *
4779 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4780     int holds_lock)
4781 {
4782 	struct sctp_laddr *laddr;
4783 
4784 	if (holds_lock == 0) {
4785 		SCTP_INP_RLOCK(inp);
4786 	}
4787 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4788 		if (laddr->ifa == NULL)
4789 			continue;
4790 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4791 			continue;
4792 #ifdef INET
4793 		if (addr->sa_family == AF_INET) {
4794 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4795 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4796 				/* found him. */
4797 				if (holds_lock == 0) {
4798 					SCTP_INP_RUNLOCK(inp);
4799 				}
4800 				return (laddr->ifa);
4801 				break;
4802 			}
4803 		}
4804 #endif
4805 #ifdef INET6
4806 		if (addr->sa_family == AF_INET6) {
4807 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4808 			    &laddr->ifa->address.sin6)) {
4809 				/* found him. */
4810 				if (holds_lock == 0) {
4811 					SCTP_INP_RUNLOCK(inp);
4812 				}
4813 				return (laddr->ifa);
4814 				break;
4815 			}
4816 		}
4817 #endif
4818 	}
4819 	if (holds_lock == 0) {
4820 		SCTP_INP_RUNLOCK(inp);
4821 	}
4822 	return (NULL);
4823 }
4824 
4825 uint32_t
4826 sctp_get_ifa_hash_val(struct sockaddr *addr)
4827 {
4828 	switch (addr->sa_family) {
4829 #ifdef INET
4830 	case AF_INET:
4831 		{
4832 			struct sockaddr_in *sin;
4833 
4834 			sin = (struct sockaddr_in *)addr;
4835 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4836 		}
4837 #endif
4838 #ifdef INET6
4839 	case INET6:
4840 		{
4841 			struct sockaddr_in6 *sin6;
4842 			uint32_t hash_of_addr;
4843 
4844 			sin6 = (struct sockaddr_in6 *)addr;
4845 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4846 			    sin6->sin6_addr.s6_addr32[1] +
4847 			    sin6->sin6_addr.s6_addr32[2] +
4848 			    sin6->sin6_addr.s6_addr32[3]);
4849 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4850 			return (hash_of_addr);
4851 		}
4852 #endif
4853 	default:
4854 		break;
4855 	}
4856 	return (0);
4857 }
4858 
4859 struct sctp_ifa *
4860 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4861 {
4862 	struct sctp_ifa *sctp_ifap;
4863 	struct sctp_vrf *vrf;
4864 	struct sctp_ifalist *hash_head;
4865 	uint32_t hash_of_addr;
4866 
4867 	if (holds_lock == 0)
4868 		SCTP_IPI_ADDR_RLOCK();
4869 
4870 	vrf = sctp_find_vrf(vrf_id);
4871 	if (vrf == NULL) {
4872 stage_right:
4873 		if (holds_lock == 0)
4874 			SCTP_IPI_ADDR_RUNLOCK();
4875 		return (NULL);
4876 	}
4877 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4878 
4879 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4880 	if (hash_head == NULL) {
4881 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4882 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4883 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4884 		sctp_print_address(addr);
4885 		SCTP_PRINTF("No such bucket for address\n");
4886 		if (holds_lock == 0)
4887 			SCTP_IPI_ADDR_RUNLOCK();
4888 
4889 		return (NULL);
4890 	}
4891 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4892 		if (sctp_ifap == NULL) {
4893 #ifdef INVARIANTS
4894 			panic("Huh LIST_FOREACH corrupt");
4895 			goto stage_right;
4896 #else
4897 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4898 			goto stage_right;
4899 #endif
4900 		}
4901 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4902 			continue;
4903 #ifdef INET
4904 		if (addr->sa_family == AF_INET) {
4905 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4906 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4907 				/* found him. */
4908 				if (holds_lock == 0)
4909 					SCTP_IPI_ADDR_RUNLOCK();
4910 				return (sctp_ifap);
4911 				break;
4912 			}
4913 		}
4914 #endif
4915 #ifdef INET6
4916 		if (addr->sa_family == AF_INET6) {
4917 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4918 			    &sctp_ifap->address.sin6)) {
4919 				/* found him. */
4920 				if (holds_lock == 0)
4921 					SCTP_IPI_ADDR_RUNLOCK();
4922 				return (sctp_ifap);
4923 				break;
4924 			}
4925 		}
4926 #endif
4927 	}
4928 	if (holds_lock == 0)
4929 		SCTP_IPI_ADDR_RUNLOCK();
4930 	return (NULL);
4931 }
4932 
4933 static void
4934 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4935     uint32_t rwnd_req)
4936 {
4937 	/* User pulled some data, do we need a rwnd update? */
4938 	int r_unlocked = 0;
4939 	uint32_t dif, rwnd;
4940 	struct socket *so = NULL;
4941 
4942 	if (stcb == NULL)
4943 		return;
4944 
4945 	atomic_add_int(&stcb->asoc.refcnt, 1);
4946 
4947 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4948 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4949 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4950 		/* Pre-check If we are freeing no update */
4951 		goto no_lock;
4952 	}
4953 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4954 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4955 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4956 		goto out;
4957 	}
4958 	so = stcb->sctp_socket;
4959 	if (so == NULL) {
4960 		goto out;
4961 	}
4962 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4963 	/* Have you have freed enough to look */
4964 	*freed_so_far = 0;
4965 	/* Yep, its worth a look and the lock overhead */
4966 
4967 	/* Figure out what the rwnd would be */
4968 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4969 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4970 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4971 	} else {
4972 		dif = 0;
4973 	}
4974 	if (dif >= rwnd_req) {
4975 		if (hold_rlock) {
4976 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4977 			r_unlocked = 1;
4978 		}
4979 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4980 			/*
4981 			 * One last check before we allow the guy possibly
4982 			 * to get in. There is a race, where the guy has not
4983 			 * reached the gate. In that case
4984 			 */
4985 			goto out;
4986 		}
4987 		SCTP_TCB_LOCK(stcb);
4988 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4989 			/* No reports here */
4990 			SCTP_TCB_UNLOCK(stcb);
4991 			goto out;
4992 		}
4993 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4994 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
4995 
4996 		sctp_chunk_output(stcb->sctp_ep, stcb,
4997 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4998 		/* make sure no timer is running */
4999 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5000 		SCTP_TCB_UNLOCK(stcb);
5001 	} else {
5002 		/* Update how much we have pending */
5003 		stcb->freed_by_sorcv_sincelast = dif;
5004 	}
5005 out:
5006 	if (so && r_unlocked && hold_rlock) {
5007 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5008 	}
5009 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5010 no_lock:
5011 	atomic_add_int(&stcb->asoc.refcnt, -1);
5012 	return;
5013 }
5014 
5015 int
5016 sctp_sorecvmsg(struct socket *so,
5017     struct uio *uio,
5018     struct mbuf **mp,
5019     struct sockaddr *from,
5020     int fromlen,
5021     int *msg_flags,
5022     struct sctp_sndrcvinfo *sinfo,
5023     int filling_sinfo)
5024 {
5025 	/*
5026 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5027 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5028 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5029 	 * On the way out we may send out any combination of:
5030 	 * MSG_NOTIFICATION MSG_EOR
5031 	 *
5032 	 */
5033 	struct sctp_inpcb *inp = NULL;
5034 	int my_len = 0;
5035 	int cp_len = 0, error = 0;
5036 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5037 	struct mbuf *m = NULL;
5038 	struct sctp_tcb *stcb = NULL;
5039 	int wakeup_read_socket = 0;
5040 	int freecnt_applied = 0;
5041 	int out_flags = 0, in_flags = 0;
5042 	int block_allowed = 1;
5043 	uint32_t freed_so_far = 0;
5044 	uint32_t copied_so_far = 0;
5045 	int in_eeor_mode = 0;
5046 	int no_rcv_needed = 0;
5047 	uint32_t rwnd_req = 0;
5048 	int hold_sblock = 0;
5049 	int hold_rlock = 0;
5050 	int slen = 0;
5051 	uint32_t held_length = 0;
5052 	int sockbuf_lock = 0;
5053 
5054 	if (uio == NULL) {
5055 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5056 		return (EINVAL);
5057 	}
5058 	if (msg_flags) {
5059 		in_flags = *msg_flags;
5060 		if (in_flags & MSG_PEEK)
5061 			SCTP_STAT_INCR(sctps_read_peeks);
5062 	} else {
5063 		in_flags = 0;
5064 	}
5065 	slen = uio->uio_resid;
5066 
5067 	/* Pull in and set up our int flags */
5068 	if (in_flags & MSG_OOB) {
5069 		/* Out of band's NOT supported */
5070 		return (EOPNOTSUPP);
5071 	}
5072 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5073 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5074 		return (EINVAL);
5075 	}
5076 	if ((in_flags & (MSG_DONTWAIT
5077 	    | MSG_NBIO
5078 	    )) ||
5079 	    SCTP_SO_IS_NBIO(so)) {
5080 		block_allowed = 0;
5081 	}
5082 	/* setup the endpoint */
5083 	inp = (struct sctp_inpcb *)so->so_pcb;
5084 	if (inp == NULL) {
5085 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5086 		return (EFAULT);
5087 	}
5088 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5089 	/* Must be at least a MTU's worth */
5090 	if (rwnd_req < SCTP_MIN_RWND)
5091 		rwnd_req = SCTP_MIN_RWND;
5092 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5093 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5094 		sctp_misc_ints(SCTP_SORECV_ENTER,
5095 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5096 	}
5097 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5098 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5099 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5100 	}
5101 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5102 	sockbuf_lock = 1;
5103 	if (error) {
5104 		goto release_unlocked;
5105 	}
5106 restart:
5107 
5108 
5109 restart_nosblocks:
5110 	if (hold_sblock == 0) {
5111 		SOCKBUF_LOCK(&so->so_rcv);
5112 		hold_sblock = 1;
5113 	}
5114 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5115 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5116 		goto out;
5117 	}
5118 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5119 		if (so->so_error) {
5120 			error = so->so_error;
5121 			if ((in_flags & MSG_PEEK) == 0)
5122 				so->so_error = 0;
5123 			goto out;
5124 		} else {
5125 			if (so->so_rcv.sb_cc == 0) {
5126 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5127 				/* indicate EOF */
5128 				error = 0;
5129 				goto out;
5130 			}
5131 		}
5132 	}
5133 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5134 		/* we need to wait for data */
5135 		if ((so->so_rcv.sb_cc == 0) &&
5136 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5137 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5138 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5139 				/*
5140 				 * For active open side clear flags for
5141 				 * re-use passive open is blocked by
5142 				 * connect.
5143 				 */
5144 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5145 					/*
5146 					 * You were aborted, passive side
5147 					 * always hits here
5148 					 */
5149 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5150 					error = ECONNRESET;
5151 				}
5152 				so->so_state &= ~(SS_ISCONNECTING |
5153 				    SS_ISDISCONNECTING |
5154 				    SS_ISCONFIRMING |
5155 				    SS_ISCONNECTED);
5156 				if (error == 0) {
5157 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5158 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5159 						error = ENOTCONN;
5160 					}
5161 				}
5162 				goto out;
5163 			}
5164 		}
5165 		error = sbwait(&so->so_rcv);
5166 		if (error) {
5167 			goto out;
5168 		}
5169 		held_length = 0;
5170 		goto restart_nosblocks;
5171 	} else if (so->so_rcv.sb_cc == 0) {
5172 		if (so->so_error) {
5173 			error = so->so_error;
5174 			if ((in_flags & MSG_PEEK) == 0)
5175 				so->so_error = 0;
5176 		} else {
5177 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5178 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5179 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5180 					/*
5181 					 * For active open side clear flags
5182 					 * for re-use passive open is
5183 					 * blocked by connect.
5184 					 */
5185 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5186 						/*
5187 						 * You were aborted, passive
5188 						 * side always hits here
5189 						 */
5190 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5191 						error = ECONNRESET;
5192 					}
5193 					so->so_state &= ~(SS_ISCONNECTING |
5194 					    SS_ISDISCONNECTING |
5195 					    SS_ISCONFIRMING |
5196 					    SS_ISCONNECTED);
5197 					if (error == 0) {
5198 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5199 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5200 							error = ENOTCONN;
5201 						}
5202 					}
5203 					goto out;
5204 				}
5205 			}
5206 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5207 			error = EWOULDBLOCK;
5208 		}
5209 		goto out;
5210 	}
5211 	if (hold_sblock == 1) {
5212 		SOCKBUF_UNLOCK(&so->so_rcv);
5213 		hold_sblock = 0;
5214 	}
5215 	/* we possibly have data we can read */
5216 	/* sa_ignore FREED_MEMORY */
5217 	control = TAILQ_FIRST(&inp->read_queue);
5218 	if (control == NULL) {
5219 		/*
5220 		 * This could be happening since the appender did the
5221 		 * increment but as not yet did the tailq insert onto the
5222 		 * read_queue
5223 		 */
5224 		if (hold_rlock == 0) {
5225 			SCTP_INP_READ_LOCK(inp);
5226 		}
5227 		control = TAILQ_FIRST(&inp->read_queue);
5228 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5229 #ifdef INVARIANTS
5230 			panic("Huh, its non zero and nothing on control?");
5231 #endif
5232 			so->so_rcv.sb_cc = 0;
5233 		}
5234 		SCTP_INP_READ_UNLOCK(inp);
5235 		hold_rlock = 0;
5236 		goto restart;
5237 	}
5238 	if ((control->length == 0) &&
5239 	    (control->do_not_ref_stcb)) {
5240 		/*
5241 		 * Clean up code for freeing assoc that left behind a
5242 		 * pdapi.. maybe a peer in EEOR that just closed after
5243 		 * sending and never indicated a EOR.
5244 		 */
5245 		if (hold_rlock == 0) {
5246 			hold_rlock = 1;
5247 			SCTP_INP_READ_LOCK(inp);
5248 		}
5249 		control->held_length = 0;
5250 		if (control->data) {
5251 			/* Hmm there is data here .. fix */
5252 			struct mbuf *m_tmp;
5253 			int cnt = 0;
5254 
5255 			m_tmp = control->data;
5256 			while (m_tmp) {
5257 				cnt += SCTP_BUF_LEN(m_tmp);
5258 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5259 					control->tail_mbuf = m_tmp;
5260 					control->end_added = 1;
5261 				}
5262 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5263 			}
5264 			control->length = cnt;
5265 		} else {
5266 			/* remove it */
5267 			TAILQ_REMOVE(&inp->read_queue, control, next);
5268 			/* Add back any hiddend data */
5269 			sctp_free_remote_addr(control->whoFrom);
5270 			sctp_free_a_readq(stcb, control);
5271 		}
5272 		if (hold_rlock) {
5273 			hold_rlock = 0;
5274 			SCTP_INP_READ_UNLOCK(inp);
5275 		}
5276 		goto restart;
5277 	}
5278 	if ((control->length == 0) &&
5279 	    (control->end_added == 1)) {
5280 		/*
5281 		 * Do we also need to check for (control->pdapi_aborted ==
5282 		 * 1)?
5283 		 */
5284 		if (hold_rlock == 0) {
5285 			hold_rlock = 1;
5286 			SCTP_INP_READ_LOCK(inp);
5287 		}
5288 		TAILQ_REMOVE(&inp->read_queue, control, next);
5289 		if (control->data) {
5290 #ifdef INVARIANTS
5291 			panic("control->data not null but control->length == 0");
5292 #else
5293 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5294 			sctp_m_freem(control->data);
5295 			control->data = NULL;
5296 #endif
5297 		}
5298 		if (control->aux_data) {
5299 			sctp_m_free(control->aux_data);
5300 			control->aux_data = NULL;
5301 		}
5302 		sctp_free_remote_addr(control->whoFrom);
5303 		sctp_free_a_readq(stcb, control);
5304 		if (hold_rlock) {
5305 			hold_rlock = 0;
5306 			SCTP_INP_READ_UNLOCK(inp);
5307 		}
5308 		goto restart;
5309 	}
5310 	if (control->length == 0) {
5311 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5312 		    (filling_sinfo)) {
5313 			/* find a more suitable one then this */
5314 			ctl = TAILQ_NEXT(control, next);
5315 			while (ctl) {
5316 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5317 				    (ctl->some_taken ||
5318 				    (ctl->spec_flags & M_NOTIFICATION) ||
5319 				    ((ctl->do_not_ref_stcb == 0) &&
5320 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5321 				    ) {
5322 					/*-
5323 					 * If we have a different TCB next, and there is data
5324 					 * present. If we have already taken some (pdapi), OR we can
5325 					 * ref the tcb and no delivery as started on this stream, we
5326 					 * take it. Note we allow a notification on a different
5327 					 * assoc to be delivered..
5328 					 */
5329 					control = ctl;
5330 					goto found_one;
5331 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5332 					    (ctl->length) &&
5333 					    ((ctl->some_taken) ||
5334 					    ((ctl->do_not_ref_stcb == 0) &&
5335 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5336 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5337 					/*-
5338 					 * If we have the same tcb, and there is data present, and we
5339 					 * have the strm interleave feature present. Then if we have
5340 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5341 					 * not started a delivery for this stream, we can take it.
5342 					 * Note we do NOT allow a notificaiton on the same assoc to
5343 					 * be delivered.
5344 					 */
5345 					control = ctl;
5346 					goto found_one;
5347 				}
5348 				ctl = TAILQ_NEXT(ctl, next);
5349 			}
5350 		}
5351 		/*
5352 		 * if we reach here, not suitable replacement is available
5353 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5354 		 * into the our held count, and its time to sleep again.
5355 		 */
5356 		held_length = so->so_rcv.sb_cc;
5357 		control->held_length = so->so_rcv.sb_cc;
5358 		goto restart;
5359 	}
5360 	/* Clear the held length since there is something to read */
5361 	control->held_length = 0;
5362 	if (hold_rlock) {
5363 		SCTP_INP_READ_UNLOCK(inp);
5364 		hold_rlock = 0;
5365 	}
5366 found_one:
5367 	/*
5368 	 * If we reach here, control has a some data for us to read off.
5369 	 * Note that stcb COULD be NULL.
5370 	 */
5371 	control->some_taken++;
5372 	if (hold_sblock) {
5373 		SOCKBUF_UNLOCK(&so->so_rcv);
5374 		hold_sblock = 0;
5375 	}
5376 	stcb = control->stcb;
5377 	if (stcb) {
5378 		if ((control->do_not_ref_stcb == 0) &&
5379 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5380 			if (freecnt_applied == 0)
5381 				stcb = NULL;
5382 		} else if (control->do_not_ref_stcb == 0) {
5383 			/* you can't free it on me please */
5384 			/*
5385 			 * The lock on the socket buffer protects us so the
5386 			 * free code will stop. But since we used the
5387 			 * socketbuf lock and the sender uses the tcb_lock
5388 			 * to increment, we need to use the atomic add to
5389 			 * the refcnt
5390 			 */
5391 			if (freecnt_applied) {
5392 #ifdef INVARIANTS
5393 				panic("refcnt already incremented");
5394 #else
5395 				printf("refcnt already incremented?\n");
5396 #endif
5397 			} else {
5398 				atomic_add_int(&stcb->asoc.refcnt, 1);
5399 				freecnt_applied = 1;
5400 			}
5401 			/*
5402 			 * Setup to remember how much we have not yet told
5403 			 * the peer our rwnd has opened up. Note we grab the
5404 			 * value from the tcb from last time. Note too that
5405 			 * sack sending clears this when a sack is sent,
5406 			 * which is fine. Once we hit the rwnd_req, we then
5407 			 * will go to the sctp_user_rcvd() that will not
5408 			 * lock until it KNOWs it MUST send a WUP-SACK.
5409 			 */
5410 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5411 			stcb->freed_by_sorcv_sincelast = 0;
5412 		}
5413 	}
5414 	if (stcb &&
5415 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5416 	    control->do_not_ref_stcb == 0) {
5417 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5418 	}
5419 	/* First lets get off the sinfo and sockaddr info */
5420 	if ((sinfo) && filling_sinfo) {
5421 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5422 		nxt = TAILQ_NEXT(control, next);
5423 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5424 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5425 			struct sctp_extrcvinfo *s_extra;
5426 
5427 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5428 			if ((nxt) &&
5429 			    (nxt->length)) {
5430 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5431 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5432 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5433 				}
5434 				if (nxt->spec_flags & M_NOTIFICATION) {
5435 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5436 				}
5437 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5438 				s_extra->sreinfo_next_length = nxt->length;
5439 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5440 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5441 				if (nxt->tail_mbuf != NULL) {
5442 					if (nxt->end_added) {
5443 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5444 					}
5445 				}
5446 			} else {
5447 				/*
5448 				 * we explicitly 0 this, since the memcpy
5449 				 * got some other things beyond the older
5450 				 * sinfo_ that is on the control's structure
5451 				 * :-D
5452 				 */
5453 				nxt = NULL;
5454 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5455 				s_extra->sreinfo_next_aid = 0;
5456 				s_extra->sreinfo_next_length = 0;
5457 				s_extra->sreinfo_next_ppid = 0;
5458 				s_extra->sreinfo_next_stream = 0;
5459 			}
5460 		}
5461 		/*
5462 		 * update off the real current cum-ack, if we have an stcb.
5463 		 */
5464 		if ((control->do_not_ref_stcb == 0) && stcb)
5465 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5466 		/*
5467 		 * mask off the high bits, we keep the actual chunk bits in
5468 		 * there.
5469 		 */
5470 		sinfo->sinfo_flags &= 0x00ff;
5471 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5472 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5473 		}
5474 	}
5475 #ifdef SCTP_ASOCLOG_OF_TSNS
5476 	{
5477 		int index, newindex;
5478 		struct sctp_pcbtsn_rlog *entry;
5479 
5480 		do {
5481 			index = inp->readlog_index;
5482 			newindex = index + 1;
5483 			if (newindex >= SCTP_READ_LOG_SIZE) {
5484 				newindex = 0;
5485 			}
5486 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5487 		entry = &inp->readlog[index];
5488 		entry->vtag = control->sinfo_assoc_id;
5489 		entry->strm = control->sinfo_stream;
5490 		entry->seq = control->sinfo_ssn;
5491 		entry->sz = control->length;
5492 		entry->flgs = control->sinfo_flags;
5493 	}
5494 #endif
5495 	if (fromlen && from) {
5496 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
5497 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5498 #ifdef INET6
5499 		case AF_INET6:
5500 			((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5501 			break;
5502 #endif
5503 #ifdef INET
5504 		case AF_INET:
5505 			((struct sockaddr_in *)from)->sin_port = control->port_from;
5506 			break;
5507 #endif
5508 		default:
5509 			break;
5510 		}
5511 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5512 
5513 #if defined(INET) && defined(INET6)
5514 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5515 		    (from->sa_family == AF_INET) &&
5516 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5517 			struct sockaddr_in *sin;
5518 			struct sockaddr_in6 sin6;
5519 
5520 			sin = (struct sockaddr_in *)from;
5521 			bzero(&sin6, sizeof(sin6));
5522 			sin6.sin6_family = AF_INET6;
5523 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5524 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5525 			bcopy(&sin->sin_addr,
5526 			    &sin6.sin6_addr.s6_addr32[3],
5527 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5528 			sin6.sin6_port = sin->sin_port;
5529 			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
5530 		}
5531 #endif
5532 #if defined(INET6)
5533 		{
5534 			struct sockaddr_in6 lsa6, *from6;
5535 
5536 			from6 = (struct sockaddr_in6 *)from;
5537 			sctp_recover_scope_mac(from6, (&lsa6));
5538 		}
5539 #endif
5540 	}
5541 	/* now copy out what data we can */
5542 	if (mp == NULL) {
5543 		/* copy out each mbuf in the chain up to length */
5544 get_more_data:
5545 		m = control->data;
5546 		while (m) {
5547 			/* Move out all we can */
5548 			cp_len = (int)uio->uio_resid;
5549 			my_len = (int)SCTP_BUF_LEN(m);
5550 			if (cp_len > my_len) {
5551 				/* not enough in this buf */
5552 				cp_len = my_len;
5553 			}
5554 			if (hold_rlock) {
5555 				SCTP_INP_READ_UNLOCK(inp);
5556 				hold_rlock = 0;
5557 			}
5558 			if (cp_len > 0)
5559 				error = uiomove(mtod(m, char *), cp_len, uio);
5560 			/* re-read */
5561 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5562 				goto release;
5563 			}
5564 			if ((control->do_not_ref_stcb == 0) && stcb &&
5565 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5566 				no_rcv_needed = 1;
5567 			}
5568 			if (error) {
5569 				/* error we are out of here */
5570 				goto release;
5571 			}
5572 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5573 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5574 			    ((control->end_added == 0) ||
5575 			    (control->end_added &&
5576 			    (TAILQ_NEXT(control, next) == NULL)))
5577 			    ) {
5578 				SCTP_INP_READ_LOCK(inp);
5579 				hold_rlock = 1;
5580 			}
5581 			if (cp_len == SCTP_BUF_LEN(m)) {
5582 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5583 				    (control->end_added)) {
5584 					out_flags |= MSG_EOR;
5585 					if ((control->do_not_ref_stcb == 0) &&
5586 					    (control->stcb != NULL) &&
5587 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5588 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5589 				}
5590 				if (control->spec_flags & M_NOTIFICATION) {
5591 					out_flags |= MSG_NOTIFICATION;
5592 				}
5593 				/* we ate up the mbuf */
5594 				if (in_flags & MSG_PEEK) {
5595 					/* just looking */
5596 					m = SCTP_BUF_NEXT(m);
5597 					copied_so_far += cp_len;
5598 				} else {
5599 					/* dispose of the mbuf */
5600 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5601 						sctp_sblog(&so->so_rcv,
5602 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5603 					}
5604 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5605 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5606 						sctp_sblog(&so->so_rcv,
5607 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5608 					}
5609 					copied_so_far += cp_len;
5610 					freed_so_far += cp_len;
5611 					freed_so_far += MSIZE;
5612 					atomic_subtract_int(&control->length, cp_len);
5613 					control->data = sctp_m_free(m);
5614 					m = control->data;
5615 					/*
5616 					 * been through it all, must hold sb
5617 					 * lock ok to null tail
5618 					 */
5619 					if (control->data == NULL) {
5620 #ifdef INVARIANTS
5621 						if ((control->end_added == 0) ||
5622 						    (TAILQ_NEXT(control, next) == NULL)) {
5623 							/*
5624 							 * If the end is not
5625 							 * added, OR the
5626 							 * next is NOT null
5627 							 * we MUST have the
5628 							 * lock.
5629 							 */
5630 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5631 								panic("Hmm we don't own the lock?");
5632 							}
5633 						}
5634 #endif
5635 						control->tail_mbuf = NULL;
5636 #ifdef INVARIANTS
5637 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5638 							panic("end_added, nothing left and no MSG_EOR");
5639 						}
5640 #endif
5641 					}
5642 				}
5643 			} else {
5644 				/* Do we need to trim the mbuf? */
5645 				if (control->spec_flags & M_NOTIFICATION) {
5646 					out_flags |= MSG_NOTIFICATION;
5647 				}
5648 				if ((in_flags & MSG_PEEK) == 0) {
5649 					SCTP_BUF_RESV_UF(m, cp_len);
5650 					SCTP_BUF_LEN(m) -= cp_len;
5651 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5652 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5653 					}
5654 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5655 					if ((control->do_not_ref_stcb == 0) &&
5656 					    stcb) {
5657 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5658 					}
5659 					copied_so_far += cp_len;
5660 					freed_so_far += cp_len;
5661 					freed_so_far += MSIZE;
5662 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5663 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5664 						    SCTP_LOG_SBRESULT, 0);
5665 					}
5666 					atomic_subtract_int(&control->length, cp_len);
5667 				} else {
5668 					copied_so_far += cp_len;
5669 				}
5670 			}
5671 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5672 				break;
5673 			}
5674 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5675 			    (control->do_not_ref_stcb == 0) &&
5676 			    (freed_so_far >= rwnd_req)) {
5677 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5678 			}
5679 		}		/* end while(m) */
5680 		/*
5681 		 * At this point we have looked at it all and we either have
5682 		 * a MSG_EOR/or read all the user wants... <OR>
5683 		 * control->length == 0.
5684 		 */
5685 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5686 			/* we are done with this control */
5687 			if (control->length == 0) {
5688 				if (control->data) {
5689 #ifdef INVARIANTS
5690 					panic("control->data not null at read eor?");
5691 #else
5692 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5693 					sctp_m_freem(control->data);
5694 					control->data = NULL;
5695 #endif
5696 				}
5697 		done_with_control:
5698 				if (TAILQ_NEXT(control, next) == NULL) {
5699 					/*
5700 					 * If we don't have a next we need a
5701 					 * lock, if there is a next
5702 					 * interrupt is filling ahead of us
5703 					 * and we don't need a lock to
5704 					 * remove this guy (which is the
5705 					 * head of the queue).
5706 					 */
5707 					if (hold_rlock == 0) {
5708 						SCTP_INP_READ_LOCK(inp);
5709 						hold_rlock = 1;
5710 					}
5711 				}
5712 				TAILQ_REMOVE(&inp->read_queue, control, next);
5713 				/* Add back any hiddend data */
5714 				if (control->held_length) {
5715 					held_length = 0;
5716 					control->held_length = 0;
5717 					wakeup_read_socket = 1;
5718 				}
5719 				if (control->aux_data) {
5720 					sctp_m_free(control->aux_data);
5721 					control->aux_data = NULL;
5722 				}
5723 				no_rcv_needed = control->do_not_ref_stcb;
5724 				sctp_free_remote_addr(control->whoFrom);
5725 				control->data = NULL;
5726 				sctp_free_a_readq(stcb, control);
5727 				control = NULL;
5728 				if ((freed_so_far >= rwnd_req) &&
5729 				    (no_rcv_needed == 0))
5730 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5731 
5732 			} else {
5733 				/*
5734 				 * The user did not read all of this
5735 				 * message, turn off the returned MSG_EOR
5736 				 * since we are leaving more behind on the
5737 				 * control to read.
5738 				 */
5739 #ifdef INVARIANTS
5740 				if (control->end_added &&
5741 				    (control->data == NULL) &&
5742 				    (control->tail_mbuf == NULL)) {
5743 					panic("Gak, control->length is corrupt?");
5744 				}
5745 #endif
5746 				no_rcv_needed = control->do_not_ref_stcb;
5747 				out_flags &= ~MSG_EOR;
5748 			}
5749 		}
5750 		if (out_flags & MSG_EOR) {
5751 			goto release;
5752 		}
5753 		if ((uio->uio_resid == 0) ||
5754 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5755 		    ) {
5756 			goto release;
5757 		}
5758 		/*
5759 		 * If I hit here the receiver wants more and this message is
5760 		 * NOT done (pd-api). So two questions. Can we block? if not
5761 		 * we are done. Did the user NOT set MSG_WAITALL?
5762 		 */
5763 		if (block_allowed == 0) {
5764 			goto release;
5765 		}
5766 		/*
5767 		 * We need to wait for more data a few things: - We don't
5768 		 * sbunlock() so we don't get someone else reading. - We
5769 		 * must be sure to account for the case where what is added
5770 		 * is NOT to our control when we wakeup.
5771 		 */
5772 
5773 		/*
5774 		 * Do we need to tell the transport a rwnd update might be
5775 		 * needed before we go to sleep?
5776 		 */
5777 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5778 		    ((freed_so_far >= rwnd_req) &&
5779 		    (control->do_not_ref_stcb == 0) &&
5780 		    (no_rcv_needed == 0))) {
5781 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5782 		}
5783 wait_some_more:
5784 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5785 			goto release;
5786 		}
5787 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5788 			goto release;
5789 
5790 		if (hold_rlock == 1) {
5791 			SCTP_INP_READ_UNLOCK(inp);
5792 			hold_rlock = 0;
5793 		}
5794 		if (hold_sblock == 0) {
5795 			SOCKBUF_LOCK(&so->so_rcv);
5796 			hold_sblock = 1;
5797 		}
5798 		if ((copied_so_far) && (control->length == 0) &&
5799 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5800 			goto release;
5801 		}
5802 		if (so->so_rcv.sb_cc <= control->held_length) {
5803 			error = sbwait(&so->so_rcv);
5804 			if (error) {
5805 				goto release;
5806 			}
5807 			control->held_length = 0;
5808 		}
5809 		if (hold_sblock) {
5810 			SOCKBUF_UNLOCK(&so->so_rcv);
5811 			hold_sblock = 0;
5812 		}
5813 		if (control->length == 0) {
5814 			/* still nothing here */
5815 			if (control->end_added == 1) {
5816 				/* he aborted, or is done i.e.did a shutdown */
5817 				out_flags |= MSG_EOR;
5818 				if (control->pdapi_aborted) {
5819 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5820 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5821 
5822 					out_flags |= MSG_TRUNC;
5823 				} else {
5824 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5825 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5826 				}
5827 				goto done_with_control;
5828 			}
5829 			if (so->so_rcv.sb_cc > held_length) {
5830 				control->held_length = so->so_rcv.sb_cc;
5831 				held_length = 0;
5832 			}
5833 			goto wait_some_more;
5834 		} else if (control->data == NULL) {
5835 			/*
5836 			 * we must re-sync since data is probably being
5837 			 * added
5838 			 */
5839 			SCTP_INP_READ_LOCK(inp);
5840 			if ((control->length > 0) && (control->data == NULL)) {
5841 				/*
5842 				 * big trouble.. we have the lock and its
5843 				 * corrupt?
5844 				 */
5845 #ifdef INVARIANTS
5846 				panic("Impossible data==NULL length !=0");
5847 #endif
5848 				out_flags |= MSG_EOR;
5849 				out_flags |= MSG_TRUNC;
5850 				control->length = 0;
5851 				SCTP_INP_READ_UNLOCK(inp);
5852 				goto done_with_control;
5853 			}
5854 			SCTP_INP_READ_UNLOCK(inp);
5855 			/* We will fall around to get more data */
5856 		}
5857 		goto get_more_data;
5858 	} else {
5859 		/*-
5860 		 * Give caller back the mbuf chain,
5861 		 * store in uio_resid the length
5862 		 */
5863 		wakeup_read_socket = 0;
5864 		if ((control->end_added == 0) ||
5865 		    (TAILQ_NEXT(control, next) == NULL)) {
5866 			/* Need to get rlock */
5867 			if (hold_rlock == 0) {
5868 				SCTP_INP_READ_LOCK(inp);
5869 				hold_rlock = 1;
5870 			}
5871 		}
5872 		if (control->end_added) {
5873 			out_flags |= MSG_EOR;
5874 			if ((control->do_not_ref_stcb == 0) &&
5875 			    (control->stcb != NULL) &&
5876 			    ((control->spec_flags & M_NOTIFICATION) == 0))
5877 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5878 		}
5879 		if (control->spec_flags & M_NOTIFICATION) {
5880 			out_flags |= MSG_NOTIFICATION;
5881 		}
5882 		uio->uio_resid = control->length;
5883 		*mp = control->data;
5884 		m = control->data;
5885 		while (m) {
5886 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5887 				sctp_sblog(&so->so_rcv,
5888 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5889 			}
5890 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5891 			freed_so_far += SCTP_BUF_LEN(m);
5892 			freed_so_far += MSIZE;
5893 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5894 				sctp_sblog(&so->so_rcv,
5895 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5896 			}
5897 			m = SCTP_BUF_NEXT(m);
5898 		}
5899 		control->data = control->tail_mbuf = NULL;
5900 		control->length = 0;
5901 		if (out_flags & MSG_EOR) {
5902 			/* Done with this control */
5903 			goto done_with_control;
5904 		}
5905 	}
5906 release:
5907 	if (hold_rlock == 1) {
5908 		SCTP_INP_READ_UNLOCK(inp);
5909 		hold_rlock = 0;
5910 	}
5911 	if (hold_sblock == 1) {
5912 		SOCKBUF_UNLOCK(&so->so_rcv);
5913 		hold_sblock = 0;
5914 	}
5915 	sbunlock(&so->so_rcv);
5916 	sockbuf_lock = 0;
5917 
5918 release_unlocked:
5919 	if (hold_sblock) {
5920 		SOCKBUF_UNLOCK(&so->so_rcv);
5921 		hold_sblock = 0;
5922 	}
5923 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5924 		if ((freed_so_far >= rwnd_req) &&
5925 		    (control && (control->do_not_ref_stcb == 0)) &&
5926 		    (no_rcv_needed == 0))
5927 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5928 	}
5929 out:
5930 	if (msg_flags) {
5931 		*msg_flags = out_flags;
5932 	}
5933 	if (((out_flags & MSG_EOR) == 0) &&
5934 	    ((in_flags & MSG_PEEK) == 0) &&
5935 	    (sinfo) &&
5936 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5937 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
5938 		struct sctp_extrcvinfo *s_extra;
5939 
5940 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5941 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5942 	}
5943 	if (hold_rlock == 1) {
5944 		SCTP_INP_READ_UNLOCK(inp);
5945 	}
5946 	if (hold_sblock) {
5947 		SOCKBUF_UNLOCK(&so->so_rcv);
5948 	}
5949 	if (sockbuf_lock) {
5950 		sbunlock(&so->so_rcv);
5951 	}
5952 	if (freecnt_applied) {
5953 		/*
5954 		 * The lock on the socket buffer protects us so the free
5955 		 * code will stop. But since we used the socketbuf lock and
5956 		 * the sender uses the tcb_lock to increment, we need to use
5957 		 * the atomic add to the refcnt.
5958 		 */
5959 		if (stcb == NULL) {
5960 #ifdef INVARIANTS
5961 			panic("stcb for refcnt has gone NULL?");
5962 			goto stage_left;
5963 #else
5964 			goto stage_left;
5965 #endif
5966 		}
5967 		atomic_add_int(&stcb->asoc.refcnt, -1);
5968 		/* Save the value back for next time */
5969 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5970 	}
5971 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5972 		if (stcb) {
5973 			sctp_misc_ints(SCTP_SORECV_DONE,
5974 			    freed_so_far,
5975 			    ((uio) ? (slen - uio->uio_resid) : slen),
5976 			    stcb->asoc.my_rwnd,
5977 			    so->so_rcv.sb_cc);
5978 		} else {
5979 			sctp_misc_ints(SCTP_SORECV_DONE,
5980 			    freed_so_far,
5981 			    ((uio) ? (slen - uio->uio_resid) : slen),
5982 			    0,
5983 			    so->so_rcv.sb_cc);
5984 		}
5985 	}
5986 stage_left:
5987 	if (wakeup_read_socket) {
5988 		sctp_sorwakeup(inp, so);
5989 	}
5990 	return (error);
5991 }
5992 
5993 
5994 #ifdef SCTP_MBUF_LOGGING
5995 struct mbuf *
5996 sctp_m_free(struct mbuf *m)
5997 {
5998 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5999 		if (SCTP_BUF_IS_EXTENDED(m)) {
6000 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6001 		}
6002 	}
6003 	return (m_free(m));
6004 }
6005 
6006 void
6007 sctp_m_freem(struct mbuf *mb)
6008 {
6009 	while (mb != NULL)
6010 		mb = sctp_m_free(mb);
6011 }
6012 
6013 #endif
6014 
6015 int
6016 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6017 {
6018 	/*
6019 	 * Given a local address. For all associations that holds the
6020 	 * address, request a peer-set-primary.
6021 	 */
6022 	struct sctp_ifa *ifa;
6023 	struct sctp_laddr *wi;
6024 
6025 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6026 	if (ifa == NULL) {
6027 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6028 		return (EADDRNOTAVAIL);
6029 	}
6030 	/*
6031 	 * Now that we have the ifa we must awaken the iterator with this
6032 	 * message.
6033 	 */
6034 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6035 	if (wi == NULL) {
6036 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6037 		return (ENOMEM);
6038 	}
6039 	/* Now incr the count and int wi structure */
6040 	SCTP_INCR_LADDR_COUNT();
6041 	bzero(wi, sizeof(*wi));
6042 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6043 	wi->ifa = ifa;
6044 	wi->action = SCTP_SET_PRIM_ADDR;
6045 	atomic_add_int(&ifa->refcount, 1);
6046 
6047 	/* Now add it to the work queue */
6048 	SCTP_WQ_ADDR_LOCK();
6049 	/*
6050 	 * Should this really be a tailq? As it is we will process the
6051 	 * newest first :-0
6052 	 */
6053 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6054 	SCTP_WQ_ADDR_UNLOCK();
6055 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6056 	    (struct sctp_inpcb *)NULL,
6057 	    (struct sctp_tcb *)NULL,
6058 	    (struct sctp_nets *)NULL);
6059 	return (0);
6060 }
6061 
6062 
6063 int
6064 sctp_soreceive(struct socket *so,
6065     struct sockaddr **psa,
6066     struct uio *uio,
6067     struct mbuf **mp0,
6068     struct mbuf **controlp,
6069     int *flagsp)
6070 {
6071 	int error, fromlen;
6072 	uint8_t sockbuf[256];
6073 	struct sockaddr *from;
6074 	struct sctp_extrcvinfo sinfo;
6075 	int filling_sinfo = 1;
6076 	struct sctp_inpcb *inp;
6077 
6078 	inp = (struct sctp_inpcb *)so->so_pcb;
6079 	/* pickup the assoc we are reading from */
6080 	if (inp == NULL) {
6081 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6082 		return (EINVAL);
6083 	}
6084 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6085 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6086 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6087 	    (controlp == NULL)) {
6088 		/* user does not want the sndrcv ctl */
6089 		filling_sinfo = 0;
6090 	}
6091 	if (psa) {
6092 		from = (struct sockaddr *)sockbuf;
6093 		fromlen = sizeof(sockbuf);
6094 		from->sa_len = 0;
6095 	} else {
6096 		from = NULL;
6097 		fromlen = 0;
6098 	}
6099 
6100 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6101 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6102 	if ((controlp) && (filling_sinfo)) {
6103 		/* copy back the sinfo in a CMSG format */
6104 		if (filling_sinfo)
6105 			*controlp = sctp_build_ctl_nchunk(inp,
6106 			    (struct sctp_sndrcvinfo *)&sinfo);
6107 		else
6108 			*controlp = NULL;
6109 	}
6110 	if (psa) {
6111 		/* copy back the address info */
6112 		if (from && from->sa_len) {
6113 			*psa = sodupsockaddr(from, M_NOWAIT);
6114 		} else {
6115 			*psa = NULL;
6116 		}
6117 	}
6118 	return (error);
6119 }
6120 
6121 
6122 
6123 
6124 
6125 int
6126 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6127     int totaddr, int *error)
6128 {
6129 	int added = 0;
6130 	int i;
6131 	struct sctp_inpcb *inp;
6132 	struct sockaddr *sa;
6133 	size_t incr = 0;
6134 
6135 #ifdef INET
6136 	struct sockaddr_in *sin;
6137 
6138 #endif
6139 #ifdef INET6
6140 	struct sockaddr_in6 *sin6;
6141 
6142 #endif
6143 
6144 	sa = addr;
6145 	inp = stcb->sctp_ep;
6146 	*error = 0;
6147 	for (i = 0; i < totaddr; i++) {
6148 		switch (sa->sa_family) {
6149 #ifdef INET
6150 		case AF_INET:
6151 			incr = sizeof(struct sockaddr_in);
6152 			sin = (struct sockaddr_in *)sa;
6153 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6154 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6155 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6156 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6157 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6158 				*error = EINVAL;
6159 				goto out_now;
6160 			}
6161 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6162 				/* assoc gone no un-lock */
6163 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6164 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6165 				*error = ENOBUFS;
6166 				goto out_now;
6167 			}
6168 			added++;
6169 			break;
6170 #endif
6171 #ifdef INET6
6172 		case AF_INET6:
6173 			incr = sizeof(struct sockaddr_in6);
6174 			sin6 = (struct sockaddr_in6 *)sa;
6175 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6176 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6177 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6178 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6179 				*error = EINVAL;
6180 				goto out_now;
6181 			}
6182 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6183 				/* assoc gone no un-lock */
6184 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6185 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6186 				*error = ENOBUFS;
6187 				goto out_now;
6188 			}
6189 			added++;
6190 			break;
6191 #endif
6192 		default:
6193 			break;
6194 		}
6195 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6196 	}
6197 out_now:
6198 	return (added);
6199 }
6200 
6201 struct sctp_tcb *
6202 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6203     int *totaddr, int *num_v4, int *num_v6, int *error,
6204     int limit, int *bad_addr)
6205 {
6206 	struct sockaddr *sa;
6207 	struct sctp_tcb *stcb = NULL;
6208 	size_t incr, at, i;
6209 
6210 	at = incr = 0;
6211 	sa = addr;
6212 
6213 	*error = *num_v6 = *num_v4 = 0;
6214 	/* account and validate addresses */
6215 	for (i = 0; i < (size_t)*totaddr; i++) {
6216 		switch (sa->sa_family) {
6217 #ifdef INET
6218 		case AF_INET:
6219 			(*num_v4) += 1;
6220 			incr = sizeof(struct sockaddr_in);
6221 			if (sa->sa_len != incr) {
6222 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6223 				*error = EINVAL;
6224 				*bad_addr = 1;
6225 				return (NULL);
6226 			}
6227 			break;
6228 #endif
6229 #ifdef INET6
6230 		case AF_INET6:
6231 			{
6232 				struct sockaddr_in6 *sin6;
6233 
6234 				sin6 = (struct sockaddr_in6 *)sa;
6235 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6236 					/* Must be non-mapped for connectx */
6237 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6238 					*error = EINVAL;
6239 					*bad_addr = 1;
6240 					return (NULL);
6241 				}
6242 				(*num_v6) += 1;
6243 				incr = sizeof(struct sockaddr_in6);
6244 				if (sa->sa_len != incr) {
6245 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6246 					*error = EINVAL;
6247 					*bad_addr = 1;
6248 					return (NULL);
6249 				}
6250 				break;
6251 			}
6252 #endif
6253 		default:
6254 			*totaddr = i;
6255 			/* we are done */
6256 			break;
6257 		}
6258 		if (i == (size_t)*totaddr) {
6259 			break;
6260 		}
6261 		SCTP_INP_INCR_REF(inp);
6262 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6263 		if (stcb != NULL) {
6264 			/* Already have or am bring up an association */
6265 			return (stcb);
6266 		} else {
6267 			SCTP_INP_DECR_REF(inp);
6268 		}
6269 		if ((at + incr) > (size_t)limit) {
6270 			*totaddr = i;
6271 			break;
6272 		}
6273 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6274 	}
6275 	return ((struct sctp_tcb *)NULL);
6276 }
6277 
6278 /*
6279  * sctp_bindx(ADD) for one address.
6280  * assumes all arguments are valid/checked by caller.
6281  */
6282 void
6283 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6284     struct sockaddr *sa, sctp_assoc_t assoc_id,
6285     uint32_t vrf_id, int *error, void *p)
6286 {
6287 	struct sockaddr *addr_touse;
6288 
6289 #ifdef INET6
6290 	struct sockaddr_in sin;
6291 
6292 #endif
6293 
6294 	/* see if we're bound all already! */
6295 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6296 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6297 		*error = EINVAL;
6298 		return;
6299 	}
6300 	addr_touse = sa;
6301 #ifdef INET6
6302 	if (sa->sa_family == AF_INET6) {
6303 		struct sockaddr_in6 *sin6;
6304 
6305 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6306 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6307 			*error = EINVAL;
6308 			return;
6309 		}
6310 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6311 			/* can only bind v6 on PF_INET6 sockets */
6312 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6313 			*error = EINVAL;
6314 			return;
6315 		}
6316 		sin6 = (struct sockaddr_in6 *)addr_touse;
6317 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6318 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6319 			    SCTP_IPV6_V6ONLY(inp)) {
6320 				/* can't bind v4-mapped on PF_INET sockets */
6321 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6322 				*error = EINVAL;
6323 				return;
6324 			}
6325 			in6_sin6_2_sin(&sin, sin6);
6326 			addr_touse = (struct sockaddr *)&sin;
6327 		}
6328 	}
6329 #endif
6330 #ifdef INET
6331 	if (sa->sa_family == AF_INET) {
6332 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6333 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6334 			*error = EINVAL;
6335 			return;
6336 		}
6337 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6338 		    SCTP_IPV6_V6ONLY(inp)) {
6339 			/* can't bind v4 on PF_INET sockets */
6340 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6341 			*error = EINVAL;
6342 			return;
6343 		}
6344 	}
6345 #endif
6346 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6347 		if (p == NULL) {
6348 			/* Can't get proc for Net/Open BSD */
6349 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6350 			*error = EINVAL;
6351 			return;
6352 		}
6353 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6354 		return;
6355 	}
6356 	/*
6357 	 * No locks required here since bind and mgmt_ep_sa all do their own
6358 	 * locking. If we do something for the FIX: below we may need to
6359 	 * lock in that case.
6360 	 */
6361 	if (assoc_id == 0) {
6362 		/* add the address */
6363 		struct sctp_inpcb *lep;
6364 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6365 
6366 		/* validate the incoming port */
6367 		if ((lsin->sin_port != 0) &&
6368 		    (lsin->sin_port != inp->sctp_lport)) {
6369 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6370 			*error = EINVAL;
6371 			return;
6372 		} else {
6373 			/* user specified 0 port, set it to existing port */
6374 			lsin->sin_port = inp->sctp_lport;
6375 		}
6376 
6377 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6378 		if (lep != NULL) {
6379 			/*
6380 			 * We must decrement the refcount since we have the
6381 			 * ep already and are binding. No remove going on
6382 			 * here.
6383 			 */
6384 			SCTP_INP_DECR_REF(lep);
6385 		}
6386 		if (lep == inp) {
6387 			/* already bound to it.. ok */
6388 			return;
6389 		} else if (lep == NULL) {
6390 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6391 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6392 			    SCTP_ADD_IP_ADDRESS,
6393 			    vrf_id, NULL);
6394 		} else {
6395 			*error = EADDRINUSE;
6396 		}
6397 		if (*error)
6398 			return;
6399 	} else {
6400 		/*
6401 		 * FIX: decide whether we allow assoc based bindx
6402 		 */
6403 	}
6404 }
6405 
6406 /*
6407  * sctp_bindx(DELETE) for one address.
6408  * assumes all arguments are valid/checked by caller.
6409  */
6410 void
6411 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6412     struct sockaddr *sa, sctp_assoc_t assoc_id,
6413     uint32_t vrf_id, int *error)
6414 {
6415 	struct sockaddr *addr_touse;
6416 
6417 #ifdef INET6
6418 	struct sockaddr_in sin;
6419 
6420 #endif
6421 
6422 	/* see if we're bound all already! */
6423 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6424 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6425 		*error = EINVAL;
6426 		return;
6427 	}
6428 	addr_touse = sa;
6429 #if defined(INET6)
6430 	if (sa->sa_family == AF_INET6) {
6431 		struct sockaddr_in6 *sin6;
6432 
6433 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6434 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6435 			*error = EINVAL;
6436 			return;
6437 		}
6438 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6439 			/* can only bind v6 on PF_INET6 sockets */
6440 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6441 			*error = EINVAL;
6442 			return;
6443 		}
6444 		sin6 = (struct sockaddr_in6 *)addr_touse;
6445 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6446 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6447 			    SCTP_IPV6_V6ONLY(inp)) {
6448 				/* can't bind mapped-v4 on PF_INET sockets */
6449 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6450 				*error = EINVAL;
6451 				return;
6452 			}
6453 			in6_sin6_2_sin(&sin, sin6);
6454 			addr_touse = (struct sockaddr *)&sin;
6455 		}
6456 	}
6457 #endif
6458 #ifdef INET
6459 	if (sa->sa_family == AF_INET) {
6460 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6461 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6462 			*error = EINVAL;
6463 			return;
6464 		}
6465 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6466 		    SCTP_IPV6_V6ONLY(inp)) {
6467 			/* can't bind v4 on PF_INET sockets */
6468 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6469 			*error = EINVAL;
6470 			return;
6471 		}
6472 	}
6473 #endif
6474 	/*
6475 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6476 	 * below is ever changed we may need to lock before calling
6477 	 * association level binding.
6478 	 */
6479 	if (assoc_id == 0) {
6480 		/* delete the address */
6481 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6482 		    SCTP_DEL_IP_ADDRESS,
6483 		    vrf_id, NULL);
6484 	} else {
6485 		/*
6486 		 * FIX: decide whether we allow assoc based bindx
6487 		 */
6488 	}
6489 }
6490 
6491 /*
6492  * returns the valid local address count for an assoc, taking into account
6493  * all scoping rules
6494  */
6495 int
6496 sctp_local_addr_count(struct sctp_tcb *stcb)
6497 {
6498 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6499 	int ipv4_addr_legal, ipv6_addr_legal;
6500 	struct sctp_vrf *vrf;
6501 	struct sctp_ifn *sctp_ifn;
6502 	struct sctp_ifa *sctp_ifa;
6503 	int count = 0;
6504 
6505 	/* Turn on all the appropriate scopes */
6506 	loopback_scope = stcb->asoc.loopback_scope;
6507 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6508 	local_scope = stcb->asoc.local_scope;
6509 	site_scope = stcb->asoc.site_scope;
6510 	ipv4_addr_legal = ipv6_addr_legal = 0;
6511 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6512 		ipv6_addr_legal = 1;
6513 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6514 			ipv4_addr_legal = 1;
6515 		}
6516 	} else {
6517 		ipv4_addr_legal = 1;
6518 	}
6519 
6520 	SCTP_IPI_ADDR_RLOCK();
6521 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6522 	if (vrf == NULL) {
6523 		/* no vrf, no addresses */
6524 		SCTP_IPI_ADDR_RUNLOCK();
6525 		return (0);
6526 	}
6527 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6528 		/*
6529 		 * bound all case: go through all ifns on the vrf
6530 		 */
6531 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6532 			if ((loopback_scope == 0) &&
6533 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6534 				continue;
6535 			}
6536 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6537 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6538 					continue;
6539 				switch (sctp_ifa->address.sa.sa_family) {
6540 #ifdef INET
6541 				case AF_INET:
6542 					if (ipv4_addr_legal) {
6543 						struct sockaddr_in *sin;
6544 
6545 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6546 						if (sin->sin_addr.s_addr == 0) {
6547 							/*
6548 							 * skip unspecified
6549 							 * addrs
6550 							 */
6551 							continue;
6552 						}
6553 						if ((ipv4_local_scope == 0) &&
6554 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6555 							continue;
6556 						}
6557 						/* count this one */
6558 						count++;
6559 					} else {
6560 						continue;
6561 					}
6562 					break;
6563 #endif
6564 #ifdef INET6
6565 				case AF_INET6:
6566 					if (ipv6_addr_legal) {
6567 						struct sockaddr_in6 *sin6;
6568 
6569 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6570 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6571 							continue;
6572 						}
6573 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6574 							if (local_scope == 0)
6575 								continue;
6576 							if (sin6->sin6_scope_id == 0) {
6577 								if (sa6_recoverscope(sin6) != 0)
6578 									/*
6579 									 *
6580 									 * bad
6581 									 *
6582 									 * li
6583 									 * nk
6584 									 *
6585 									 * loc
6586 									 * al
6587 									 *
6588 									 * add
6589 									 * re
6590 									 * ss
6591 									 * */
6592 									continue;
6593 							}
6594 						}
6595 						if ((site_scope == 0) &&
6596 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6597 							continue;
6598 						}
6599 						/* count this one */
6600 						count++;
6601 					}
6602 					break;
6603 #endif
6604 				default:
6605 					/* TSNH */
6606 					break;
6607 				}
6608 			}
6609 		}
6610 	} else {
6611 		/*
6612 		 * subset bound case
6613 		 */
6614 		struct sctp_laddr *laddr;
6615 
6616 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6617 		    sctp_nxt_addr) {
6618 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6619 				continue;
6620 			}
6621 			/* count this one */
6622 			count++;
6623 		}
6624 	}
6625 	SCTP_IPI_ADDR_RUNLOCK();
6626 	return (count);
6627 }
6628 
6629 #if defined(SCTP_LOCAL_TRACE_BUF)
6630 
6631 void
6632 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6633 {
6634 	uint32_t saveindex, newindex;
6635 
6636 	do {
6637 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6638 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6639 			newindex = 1;
6640 		} else {
6641 			newindex = saveindex + 1;
6642 		}
6643 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6644 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6645 		saveindex = 0;
6646 	}
6647 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6648 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6649 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6650 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6651 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6652 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6653 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6654 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6655 }
6656 
6657 #endif
6658 /* XXX: Remove the #ifdef after tunneling over IPv6 works also on FreeBSD. */
6659 #ifdef INET
6660 /* We will need to add support
6661  * to bind the ports and such here
6662  * so we can do UDP tunneling. In
6663  * the mean-time, we return error
6664  */
6665 #include <netinet/udp.h>
6666 #include <netinet/udp_var.h>
6667 #include <sys/proc.h>
6668 #ifdef INET6
6669 #include <netinet6/sctp6_var.h>
6670 #endif
6671 
6672 static void
6673 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6674 {
6675 	struct ip *iph;
6676 	struct mbuf *sp, *last;
6677 	struct udphdr *uhdr;
6678 	uint16_t port = 0;
6679 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6680 
6681 	/*
6682 	 * Split out the mbuf chain. Leave the IP header in m, place the
6683 	 * rest in the sp.
6684 	 */
6685 	if ((m->m_flags & M_PKTHDR) == 0) {
6686 		/* Can't handle one that is not a pkt hdr */
6687 		goto out;
6688 	}
6689 	/* pull the src port */
6690 	iph = mtod(m, struct ip *);
6691 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6692 
6693 	port = uhdr->uh_sport;
6694 	sp = m_split(m, off, M_DONTWAIT);
6695 	if (sp == NULL) {
6696 		/* Gak, drop packet, we can't do a split */
6697 		goto out;
6698 	}
6699 	if (sp->m_pkthdr.len < header_size) {
6700 		/* Gak, packet can't have an SCTP header in it - to small */
6701 		m_freem(sp);
6702 		goto out;
6703 	}
6704 	/* ok now pull up the UDP header and SCTP header together */
6705 	sp = m_pullup(sp, header_size);
6706 	if (sp == NULL) {
6707 		/* Gak pullup failed */
6708 		goto out;
6709 	}
6710 	/* trim out the UDP header */
6711 	m_adj(sp, sizeof(struct udphdr));
6712 
6713 	/* Now reconstruct the mbuf chain */
6714 	/* 1) find last one */
6715 	last = m;
6716 	while (last->m_next != NULL) {
6717 		last = last->m_next;
6718 	}
6719 	last->m_next = sp;
6720 	m->m_pkthdr.len += sp->m_pkthdr.len;
6721 	last = m;
6722 	while (last != NULL) {
6723 		last = last->m_next;
6724 	}
6725 	/* Now its ready for sctp_input or sctp6_input */
6726 	iph = mtod(m, struct ip *);
6727 	switch (iph->ip_v) {
6728 #ifdef INET
6729 	case IPVERSION:
6730 		{
6731 			uint16_t len;
6732 
6733 			/* its IPv4 */
6734 			len = SCTP_GET_IPV4_LENGTH(iph);
6735 			len -= sizeof(struct udphdr);
6736 			SCTP_GET_IPV4_LENGTH(iph) = len;
6737 			sctp_input_with_port(m, off, port);
6738 			break;
6739 		}
6740 #endif
6741 #ifdef INET6
6742 	case IPV6_VERSION >> 4:
6743 		{
6744 			/* its IPv6 - NOT supported */
6745 			goto out;
6746 			break;
6747 
6748 		}
6749 #endif
6750 	default:
6751 		{
6752 			m_freem(m);
6753 			break;
6754 		}
6755 	}
6756 	return;
6757 out:
6758 	m_freem(m);
6759 }
6760 
6761 void
6762 sctp_over_udp_stop(void)
6763 {
6764 	struct socket *sop;
6765 
6766 	/*
6767 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6768 	 * for writting!
6769 	 */
6770 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6771 		/* Nothing to do */
6772 		return;
6773 	}
6774 	sop = SCTP_BASE_INFO(udp_tun_socket);
6775 	soclose(sop);
6776 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6777 }
6778 
6779 int
6780 sctp_over_udp_start(void)
6781 {
6782 	uint16_t port;
6783 	int ret;
6784 	struct sockaddr_in sin;
6785 	struct socket *sop = NULL;
6786 	struct thread *th;
6787 	struct ucred *cred;
6788 
6789 	/*
6790 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6791 	 * for writting!
6792 	 */
6793 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6794 	if (port == 0) {
6795 		/* Must have a port set */
6796 		return (EINVAL);
6797 	}
6798 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6799 		/* Already running -- must stop first */
6800 		return (EALREADY);
6801 	}
6802 	th = curthread;
6803 	cred = th->td_ucred;
6804 	if ((ret = socreate(PF_INET, &sop,
6805 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6806 		return (ret);
6807 	}
6808 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6809 	/* call the special UDP hook */
6810 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6811 	if (ret) {
6812 		goto exit_stage_left;
6813 	}
6814 	/* Ok we have a socket, bind it to the port */
6815 	memset(&sin, 0, sizeof(sin));
6816 	sin.sin_len = sizeof(sin);
6817 	sin.sin_family = AF_INET;
6818 	sin.sin_port = htons(port);
6819 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6820 	if (ret) {
6821 		/* Close up we cant get the port */
6822 exit_stage_left:
6823 		sctp_over_udp_stop();
6824 		return (ret);
6825 	}
6826 	/*
6827 	 * Ok we should now get UDP packets directly to our input routine
6828 	 * sctp_recv_upd_tunneled_packet().
6829 	 */
6830 	return (0);
6831 }
6832 
6833 #endif
6834