xref: /freebsd/sys/netinet/sctputil.c (revision 675be9115aae86ad6b3d877155d4fd7822892105)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *   this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *   the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #endif
45 #include <netinet/sctp_header.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_bsd_addr.h>
53 
54 
55 #ifndef KTR_SCTP
56 #define KTR_SCTP KTR_SUBSYS
57 #endif
58 
59 extern struct sctp_cc_functions sctp_cc_functions[];
60 extern struct sctp_ss_functions sctp_ss_functions[];
61 
62 void
63 sctp_sblog(struct sockbuf *sb,
64     struct sctp_tcb *stcb, int from, int incr)
65 {
66 	struct sctp_cwnd_log sctp_clog;
67 
68 	sctp_clog.x.sb.stcb = stcb;
69 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
70 	if (stcb)
71 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
72 	else
73 		sctp_clog.x.sb.stcb_sbcc = 0;
74 	sctp_clog.x.sb.incr = incr;
75 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
76 	    SCTP_LOG_EVENT_SB,
77 	    from,
78 	    sctp_clog.x.misc.log1,
79 	    sctp_clog.x.misc.log2,
80 	    sctp_clog.x.misc.log3,
81 	    sctp_clog.x.misc.log4);
82 }
83 
84 void
85 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
86 {
87 	struct sctp_cwnd_log sctp_clog;
88 
89 	sctp_clog.x.close.inp = (void *)inp;
90 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
91 	if (stcb) {
92 		sctp_clog.x.close.stcb = (void *)stcb;
93 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
94 	} else {
95 		sctp_clog.x.close.stcb = 0;
96 		sctp_clog.x.close.state = 0;
97 	}
98 	sctp_clog.x.close.loc = loc;
99 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
100 	    SCTP_LOG_EVENT_CLOSE,
101 	    0,
102 	    sctp_clog.x.misc.log1,
103 	    sctp_clog.x.misc.log2,
104 	    sctp_clog.x.misc.log3,
105 	    sctp_clog.x.misc.log4);
106 }
107 
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 
125 }
126 
127 void
128 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
129 {
130 	struct sctp_cwnd_log sctp_clog;
131 
132 	sctp_clog.x.strlog.stcb = stcb;
133 	sctp_clog.x.strlog.n_tsn = tsn;
134 	sctp_clog.x.strlog.n_sseq = sseq;
135 	sctp_clog.x.strlog.e_tsn = 0;
136 	sctp_clog.x.strlog.e_sseq = 0;
137 	sctp_clog.x.strlog.strm = stream;
138 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
139 	    SCTP_LOG_EVENT_STRM,
140 	    from,
141 	    sctp_clog.x.misc.log1,
142 	    sctp_clog.x.misc.log2,
143 	    sctp_clog.x.misc.log3,
144 	    sctp_clog.x.misc.log4);
145 
146 }
147 
148 void
149 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
150 {
151 	struct sctp_cwnd_log sctp_clog;
152 
153 	sctp_clog.x.nagle.stcb = (void *)stcb;
154 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
155 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
156 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
157 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
158 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
159 	    SCTP_LOG_EVENT_NAGLE,
160 	    action,
161 	    sctp_clog.x.misc.log1,
162 	    sctp_clog.x.misc.log2,
163 	    sctp_clog.x.misc.log3,
164 	    sctp_clog.x.misc.log4);
165 }
166 
167 
168 void
169 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
170 {
171 	struct sctp_cwnd_log sctp_clog;
172 
173 	sctp_clog.x.sack.cumack = cumack;
174 	sctp_clog.x.sack.oldcumack = old_cumack;
175 	sctp_clog.x.sack.tsn = tsn;
176 	sctp_clog.x.sack.numGaps = gaps;
177 	sctp_clog.x.sack.numDups = dups;
178 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
179 	    SCTP_LOG_EVENT_SACK,
180 	    from,
181 	    sctp_clog.x.misc.log1,
182 	    sctp_clog.x.misc.log2,
183 	    sctp_clog.x.misc.log3,
184 	    sctp_clog.x.misc.log4);
185 }
186 
187 void
188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
189 {
190 	struct sctp_cwnd_log sctp_clog;
191 
192 	memset(&sctp_clog, 0, sizeof(sctp_clog));
193 	sctp_clog.x.map.base = map;
194 	sctp_clog.x.map.cum = cum;
195 	sctp_clog.x.map.high = high;
196 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
197 	    SCTP_LOG_EVENT_MAP,
198 	    from,
199 	    sctp_clog.x.misc.log1,
200 	    sctp_clog.x.misc.log2,
201 	    sctp_clog.x.misc.log3,
202 	    sctp_clog.x.misc.log4);
203 }
204 
205 void
206 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
207     int from)
208 {
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
213 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
214 	sctp_clog.x.fr.tsn = tsn;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_FR,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 
223 }
224 
225 
226 void
227 sctp_log_mb(struct mbuf *m, int from)
228 {
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	sctp_clog.x.mb.mp = m;
232 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
233 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
234 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
235 	if (SCTP_BUF_IS_EXTENDED(m)) {
236 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
237 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
238 	} else {
239 		sctp_clog.x.mb.ext = 0;
240 		sctp_clog.x.mb.refcnt = 0;
241 	}
242 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
243 	    SCTP_LOG_EVENT_MBUF,
244 	    from,
245 	    sctp_clog.x.misc.log1,
246 	    sctp_clog.x.misc.log2,
247 	    sctp_clog.x.misc.log3,
248 	    sctp_clog.x.misc.log4);
249 }
250 
251 
252 void
253 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
254     int from)
255 {
256 	struct sctp_cwnd_log sctp_clog;
257 
258 	if (control == NULL) {
259 		SCTP_PRINTF("Gak log of NULL?\n");
260 		return;
261 	}
262 	sctp_clog.x.strlog.stcb = control->stcb;
263 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
264 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
265 	sctp_clog.x.strlog.strm = control->sinfo_stream;
266 	if (poschk != NULL) {
267 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
268 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
269 	} else {
270 		sctp_clog.x.strlog.e_tsn = 0;
271 		sctp_clog.x.strlog.e_sseq = 0;
272 	}
273 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
274 	    SCTP_LOG_EVENT_STRM,
275 	    from,
276 	    sctp_clog.x.misc.log1,
277 	    sctp_clog.x.misc.log2,
278 	    sctp_clog.x.misc.log3,
279 	    sctp_clog.x.misc.log4);
280 
281 }
282 
283 void
284 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
285 {
286 	struct sctp_cwnd_log sctp_clog;
287 
288 	sctp_clog.x.cwnd.net = net;
289 	if (stcb->asoc.send_queue_cnt > 255)
290 		sctp_clog.x.cwnd.cnt_in_send = 255;
291 	else
292 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
293 	if (stcb->asoc.stream_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_str = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
297 
298 	if (net) {
299 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
300 		sctp_clog.x.cwnd.inflight = net->flight_size;
301 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
302 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
303 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
304 	}
305 	if (SCTP_CWNDLOG_PRESEND == from) {
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
307 	}
308 	sctp_clog.x.cwnd.cwnd_augment = augment;
309 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
310 	    SCTP_LOG_EVENT_CWND,
311 	    from,
312 	    sctp_clog.x.misc.log1,
313 	    sctp_clog.x.misc.log2,
314 	    sctp_clog.x.misc.log3,
315 	    sctp_clog.x.misc.log4);
316 
317 }
318 
319 void
320 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
321 {
322 	struct sctp_cwnd_log sctp_clog;
323 
324 	memset(&sctp_clog, 0, sizeof(sctp_clog));
325 	if (inp) {
326 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
327 
328 	} else {
329 		sctp_clog.x.lock.sock = (void *)NULL;
330 	}
331 	sctp_clog.x.lock.inp = (void *)inp;
332 	if (stcb) {
333 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
334 	} else {
335 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
336 	}
337 	if (inp) {
338 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
339 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
340 	} else {
341 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
342 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
343 	}
344 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
345 	if (inp && (inp->sctp_socket)) {
346 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
347 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
348 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
349 	} else {
350 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
351 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
352 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
353 	}
354 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
355 	    SCTP_LOG_LOCK_EVENT,
356 	    from,
357 	    sctp_clog.x.misc.log1,
358 	    sctp_clog.x.misc.log2,
359 	    sctp_clog.x.misc.log3,
360 	    sctp_clog.x.misc.log4);
361 
362 }
363 
364 void
365 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
366 {
367 	struct sctp_cwnd_log sctp_clog;
368 
369 	memset(&sctp_clog, 0, sizeof(sctp_clog));
370 	sctp_clog.x.cwnd.net = net;
371 	sctp_clog.x.cwnd.cwnd_new_value = error;
372 	sctp_clog.x.cwnd.inflight = net->flight_size;
373 	sctp_clog.x.cwnd.cwnd_augment = burst;
374 	if (stcb->asoc.send_queue_cnt > 255)
375 		sctp_clog.x.cwnd.cnt_in_send = 255;
376 	else
377 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
378 	if (stcb->asoc.stream_queue_cnt > 255)
379 		sctp_clog.x.cwnd.cnt_in_str = 255;
380 	else
381 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
382 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
383 	    SCTP_LOG_EVENT_MAXBURST,
384 	    from,
385 	    sctp_clog.x.misc.log1,
386 	    sctp_clog.x.misc.log2,
387 	    sctp_clog.x.misc.log3,
388 	    sctp_clog.x.misc.log4);
389 
390 }
391 
392 void
393 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
394 {
395 	struct sctp_cwnd_log sctp_clog;
396 
397 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
398 	sctp_clog.x.rwnd.send_size = snd_size;
399 	sctp_clog.x.rwnd.overhead = overhead;
400 	sctp_clog.x.rwnd.new_rwnd = 0;
401 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
402 	    SCTP_LOG_EVENT_RWND,
403 	    from,
404 	    sctp_clog.x.misc.log1,
405 	    sctp_clog.x.misc.log2,
406 	    sctp_clog.x.misc.log3,
407 	    sctp_clog.x.misc.log4);
408 }
409 
410 void
411 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
412 {
413 	struct sctp_cwnd_log sctp_clog;
414 
415 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
416 	sctp_clog.x.rwnd.send_size = flight_size;
417 	sctp_clog.x.rwnd.overhead = overhead;
418 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
419 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
420 	    SCTP_LOG_EVENT_RWND,
421 	    from,
422 	    sctp_clog.x.misc.log1,
423 	    sctp_clog.x.misc.log2,
424 	    sctp_clog.x.misc.log3,
425 	    sctp_clog.x.misc.log4);
426 }
427 
428 void
429 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
430 {
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
434 	sctp_clog.x.mbcnt.size_change = book;
435 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
436 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_MBCNT,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 
445 }
446 
447 void
448 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
449 {
450 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
451 	    SCTP_LOG_MISC_EVENT,
452 	    from,
453 	    a, b, c, d);
454 }
455 
456 void
457 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
458 {
459 	struct sctp_cwnd_log sctp_clog;
460 
461 	sctp_clog.x.wake.stcb = (void *)stcb;
462 	sctp_clog.x.wake.wake_cnt = wake_cnt;
463 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
464 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
465 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
466 
467 	if (stcb->asoc.stream_queue_cnt < 0xff)
468 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
469 	else
470 		sctp_clog.x.wake.stream_qcnt = 0xff;
471 
472 	if (stcb->asoc.chunks_on_out_queue < 0xff)
473 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
474 	else
475 		sctp_clog.x.wake.chunks_on_oque = 0xff;
476 
477 	sctp_clog.x.wake.sctpflags = 0;
478 	/* set in the defered mode stuff */
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
480 		sctp_clog.x.wake.sctpflags |= 1;
481 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
482 		sctp_clog.x.wake.sctpflags |= 2;
483 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
484 		sctp_clog.x.wake.sctpflags |= 4;
485 	/* what about the sb */
486 	if (stcb->sctp_socket) {
487 		struct socket *so = stcb->sctp_socket;
488 
489 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
490 	} else {
491 		sctp_clog.x.wake.sbflags = 0xff;
492 	}
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_EVENT_WAKE,
495 	    from,
496 	    sctp_clog.x.misc.log1,
497 	    sctp_clog.x.misc.log2,
498 	    sctp_clog.x.misc.log3,
499 	    sctp_clog.x.misc.log4);
500 
501 }
502 
503 void
504 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
505 {
506 	struct sctp_cwnd_log sctp_clog;
507 
508 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
509 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
510 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
511 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
512 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
513 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
514 	sctp_clog.x.blk.sndlen = sendlen;
515 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
516 	    SCTP_LOG_EVENT_BLOCK,
517 	    from,
518 	    sctp_clog.x.misc.log1,
519 	    sctp_clog.x.misc.log2,
520 	    sctp_clog.x.misc.log3,
521 	    sctp_clog.x.misc.log4);
522 
523 }
524 
525 int
526 sctp_fill_stat_log(void *optval, size_t *optsize)
527 {
528 	/* May need to fix this if ktrdump does not work */
529 	return (0);
530 }
531 
532 #ifdef SCTP_AUDITING_ENABLED
533 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
534 static int sctp_audit_indx = 0;
535 
536 static
537 void
538 sctp_print_audit_report(void)
539 {
540 	int i;
541 	int cnt;
542 
543 	cnt = 0;
544 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
545 		if ((sctp_audit_data[i][0] == 0xe0) &&
546 		    (sctp_audit_data[i][1] == 0x01)) {
547 			cnt = 0;
548 			SCTP_PRINTF("\n");
549 		} else if (sctp_audit_data[i][0] == 0xf0) {
550 			cnt = 0;
551 			SCTP_PRINTF("\n");
552 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
553 		    (sctp_audit_data[i][1] == 0x01)) {
554 			SCTP_PRINTF("\n");
555 			cnt = 0;
556 		}
557 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
558 		    (uint32_t) sctp_audit_data[i][1]);
559 		cnt++;
560 		if ((cnt % 14) == 0)
561 			SCTP_PRINTF("\n");
562 	}
563 	for (i = 0; i < sctp_audit_indx; i++) {
564 		if ((sctp_audit_data[i][0] == 0xe0) &&
565 		    (sctp_audit_data[i][1] == 0x01)) {
566 			cnt = 0;
567 			SCTP_PRINTF("\n");
568 		} else if (sctp_audit_data[i][0] == 0xf0) {
569 			cnt = 0;
570 			SCTP_PRINTF("\n");
571 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
572 		    (sctp_audit_data[i][1] == 0x01)) {
573 			SCTP_PRINTF("\n");
574 			cnt = 0;
575 		}
576 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
577 		    (uint32_t) sctp_audit_data[i][1]);
578 		cnt++;
579 		if ((cnt % 14) == 0)
580 			SCTP_PRINTF("\n");
581 	}
582 	SCTP_PRINTF("\n");
583 }
584 
585 void
586 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
587     struct sctp_nets *net)
588 {
589 	int resend_cnt, tot_out, rep, tot_book_cnt;
590 	struct sctp_nets *lnet;
591 	struct sctp_tmit_chunk *chk;
592 
593 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
594 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
595 	sctp_audit_indx++;
596 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
597 		sctp_audit_indx = 0;
598 	}
599 	if (inp == NULL) {
600 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
601 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
602 		sctp_audit_indx++;
603 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
604 			sctp_audit_indx = 0;
605 		}
606 		return;
607 	}
608 	if (stcb == NULL) {
609 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
610 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
611 		sctp_audit_indx++;
612 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
613 			sctp_audit_indx = 0;
614 		}
615 		return;
616 	}
617 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
618 	sctp_audit_data[sctp_audit_indx][1] =
619 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
620 	sctp_audit_indx++;
621 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
622 		sctp_audit_indx = 0;
623 	}
624 	rep = 0;
625 	tot_book_cnt = 0;
626 	resend_cnt = tot_out = 0;
627 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
628 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
629 			resend_cnt++;
630 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
631 			tot_out += chk->book_size;
632 			tot_book_cnt++;
633 		}
634 	}
635 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
636 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
637 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
638 		sctp_audit_indx++;
639 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
640 			sctp_audit_indx = 0;
641 		}
642 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
643 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
644 		rep = 1;
645 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
646 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
647 		sctp_audit_data[sctp_audit_indx][1] =
648 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
649 		sctp_audit_indx++;
650 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
651 			sctp_audit_indx = 0;
652 		}
653 	}
654 	if (tot_out != stcb->asoc.total_flight) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		rep = 1;
662 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
663 		    (int)stcb->asoc.total_flight);
664 		stcb->asoc.total_flight = tot_out;
665 	}
666 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
667 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
668 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
669 		sctp_audit_indx++;
670 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
671 			sctp_audit_indx = 0;
672 		}
673 		rep = 1;
674 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
675 
676 		stcb->asoc.total_flight_count = tot_book_cnt;
677 	}
678 	tot_out = 0;
679 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
680 		tot_out += lnet->flight_size;
681 	}
682 	if (tot_out != stcb->asoc.total_flight) {
683 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
684 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
685 		sctp_audit_indx++;
686 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
687 			sctp_audit_indx = 0;
688 		}
689 		rep = 1;
690 		SCTP_PRINTF("real flight:%d net total was %d\n",
691 		    stcb->asoc.total_flight, tot_out);
692 		/* now corrective action */
693 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
694 
695 			tot_out = 0;
696 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
697 				if ((chk->whoTo == lnet) &&
698 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
699 					tot_out += chk->book_size;
700 				}
701 			}
702 			if (lnet->flight_size != tot_out) {
703 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
704 				    lnet, lnet->flight_size,
705 				    tot_out);
706 				lnet->flight_size = tot_out;
707 			}
708 		}
709 	}
710 	if (rep) {
711 		sctp_print_audit_report();
712 	}
713 }
714 
715 void
716 sctp_audit_log(uint8_t ev, uint8_t fd)
717 {
718 
719 	sctp_audit_data[sctp_audit_indx][0] = ev;
720 	sctp_audit_data[sctp_audit_indx][1] = fd;
721 	sctp_audit_indx++;
722 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
723 		sctp_audit_indx = 0;
724 	}
725 }
726 
727 #endif
728 
729 /*
730  * sctp_stop_timers_for_shutdown() should be called
731  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
732  * state to make sure that all timers are stopped.
733  */
734 void
735 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
736 {
737 	struct sctp_association *asoc;
738 	struct sctp_nets *net;
739 
740 	asoc = &stcb->asoc;
741 
742 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
743 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
747 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
748 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
749 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
750 	}
751 }
752 
753 /*
754  * a list of sizes based on typical mtu's, used only if next hop size not
755  * returned.
756  */
757 static uint32_t sctp_mtu_sizes[] = {
758 	68,
759 	296,
760 	508,
761 	512,
762 	544,
763 	576,
764 	1006,
765 	1492,
766 	1500,
767 	1536,
768 	2002,
769 	2048,
770 	4352,
771 	4464,
772 	8166,
773 	17914,
774 	32000,
775 	65535
776 };
777 
778 /*
779  * Return the largest MTU smaller than val. If there is no
780  * entry, just return val.
781  */
782 uint32_t
783 sctp_get_prev_mtu(uint32_t val)
784 {
785 	uint32_t i;
786 
787 	if (val <= sctp_mtu_sizes[0]) {
788 		return (val);
789 	}
790 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
791 		if (val <= sctp_mtu_sizes[i]) {
792 			break;
793 		}
794 	}
795 	return (sctp_mtu_sizes[i - 1]);
796 }
797 
798 /*
799  * Return the smallest MTU larger than val. If there is no
800  * entry, just return val.
801  */
802 uint32_t
803 sctp_get_next_mtu(struct sctp_inpcb *inp, uint32_t val)
804 {
805 	/* select another MTU that is just bigger than this one */
806 	uint32_t i;
807 
808 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
809 		if (val < sctp_mtu_sizes[i]) {
810 			return (sctp_mtu_sizes[i]);
811 		}
812 	}
813 	return (val);
814 }
815 
816 void
817 sctp_fill_random_store(struct sctp_pcb *m)
818 {
819 	/*
820 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
821 	 * our counter. The result becomes our good random numbers and we
822 	 * then setup to give these out. Note that we do no locking to
823 	 * protect this. This is ok, since if competing folks call this we
824 	 * will get more gobbled gook in the random store which is what we
825 	 * want. There is a danger that two guys will use the same random
826 	 * numbers, but thats ok too since that is random as well :->
827 	 */
828 	m->store_at = 0;
829 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
830 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
831 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
832 	m->random_counter++;
833 }
834 
835 uint32_t
836 sctp_select_initial_TSN(struct sctp_pcb *inp)
837 {
838 	/*
839 	 * A true implementation should use random selection process to get
840 	 * the initial stream sequence number, using RFC1750 as a good
841 	 * guideline
842 	 */
843 	uint32_t x, *xp;
844 	uint8_t *p;
845 	int store_at, new_store;
846 
847 	if (inp->initial_sequence_debug != 0) {
848 		uint32_t ret;
849 
850 		ret = inp->initial_sequence_debug;
851 		inp->initial_sequence_debug++;
852 		return (ret);
853 	}
854 retry:
855 	store_at = inp->store_at;
856 	new_store = store_at + sizeof(uint32_t);
857 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
858 		new_store = 0;
859 	}
860 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
861 		goto retry;
862 	}
863 	if (new_store == 0) {
864 		/* Refill the random store */
865 		sctp_fill_random_store(inp);
866 	}
867 	p = &inp->random_store[store_at];
868 	xp = (uint32_t *) p;
869 	x = *xp;
870 	return (x);
871 }
872 
873 uint32_t
874 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
875 {
876 	uint32_t x, not_done;
877 	struct timeval now;
878 
879 	(void)SCTP_GETTIME_TIMEVAL(&now);
880 	not_done = 1;
881 	while (not_done) {
882 		x = sctp_select_initial_TSN(&inp->sctp_ep);
883 		if (x == 0) {
884 			/* we never use 0 */
885 			continue;
886 		}
887 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
888 			not_done = 0;
889 		}
890 	}
891 	return (x);
892 }
893 
894 int
895 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
896     uint32_t override_tag, uint32_t vrf_id)
897 {
898 	struct sctp_association *asoc;
899 
900 	/*
901 	 * Anything set to zero is taken care of by the allocation routine's
902 	 * bzero
903 	 */
904 
905 	/*
906 	 * Up front select what scoping to apply on addresses I tell my peer
907 	 * Not sure what to do with these right now, we will need to come up
908 	 * with a way to set them. We may need to pass them through from the
909 	 * caller in the sctp_aloc_assoc() function.
910 	 */
911 	int i;
912 
913 	asoc = &stcb->asoc;
914 	/* init all variables to a known value. */
915 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
916 	asoc->max_burst = m->sctp_ep.max_burst;
917 	asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
918 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
919 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
920 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
921 	asoc->ecn_allowed = m->sctp_ecn_enable;
922 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
923 	asoc->sctp_cmt_pf = (uint8_t) 0;
924 	asoc->sctp_frag_point = m->sctp_frag_point;
925 	asoc->sctp_features = m->sctp_features;
926 	asoc->default_dscp = m->sctp_ep.default_dscp;
927 #ifdef INET6
928 	if (m->sctp_ep.default_flowlabel) {
929 		asoc->default_flowlabel = m->sctp_ep.default_flowlabel;
930 	} else {
931 		if (m->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
932 			asoc->default_flowlabel = sctp_select_initial_TSN(&m->sctp_ep);
933 			asoc->default_flowlabel &= 0x000fffff;
934 			asoc->default_flowlabel |= 0x80000000;
935 		} else {
936 			asoc->default_flowlabel = 0;
937 		}
938 	}
939 #endif
940 	asoc->sb_send_resv = 0;
941 	if (override_tag) {
942 		asoc->my_vtag = override_tag;
943 	} else {
944 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
945 	}
946 	/* Get the nonce tags */
947 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
948 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
949 	asoc->vrf_id = vrf_id;
950 
951 #ifdef SCTP_ASOCLOG_OF_TSNS
952 	asoc->tsn_in_at = 0;
953 	asoc->tsn_out_at = 0;
954 	asoc->tsn_in_wrapped = 0;
955 	asoc->tsn_out_wrapped = 0;
956 	asoc->cumack_log_at = 0;
957 	asoc->cumack_log_atsnt = 0;
958 #endif
959 #ifdef SCTP_FS_SPEC_LOG
960 	asoc->fs_index = 0;
961 #endif
962 	asoc->refcnt = 0;
963 	asoc->assoc_up_sent = 0;
964 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
965 	    sctp_select_initial_TSN(&m->sctp_ep);
966 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
967 	/* we are optimisitic here */
968 	asoc->peer_supports_pktdrop = 1;
969 	asoc->peer_supports_nat = 0;
970 	asoc->sent_queue_retran_cnt = 0;
971 
972 	/* for CMT */
973 	asoc->last_net_cmt_send_started = NULL;
974 
975 	/* This will need to be adjusted */
976 	asoc->last_acked_seq = asoc->init_seq_number - 1;
977 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
978 	asoc->asconf_seq_in = asoc->last_acked_seq;
979 
980 	/* here we are different, we hold the next one we expect */
981 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
982 
983 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
984 	asoc->initial_rto = m->sctp_ep.initial_rto;
985 
986 	asoc->max_init_times = m->sctp_ep.max_init_times;
987 	asoc->max_send_times = m->sctp_ep.max_send_times;
988 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
989 	asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold;
990 	asoc->free_chunk_cnt = 0;
991 
992 	asoc->iam_blocking = 0;
993 
994 	asoc->context = m->sctp_context;
995 	asoc->def_send = m->def_send;
996 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
997 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
998 	asoc->pr_sctp_cnt = 0;
999 	asoc->total_output_queue_size = 0;
1000 
1001 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1002 		struct in6pcb *inp6;
1003 
1004 		/* Its a V6 socket */
1005 		inp6 = (struct in6pcb *)m;
1006 		asoc->ipv6_addr_legal = 1;
1007 		/* Now look at the binding flag to see if V4 will be legal */
1008 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1009 			asoc->ipv4_addr_legal = 1;
1010 		} else {
1011 			/* V4 addresses are NOT legal on the association */
1012 			asoc->ipv4_addr_legal = 0;
1013 		}
1014 	} else {
1015 		/* Its a V4 socket, no - V6 */
1016 		asoc->ipv4_addr_legal = 1;
1017 		asoc->ipv6_addr_legal = 0;
1018 	}
1019 
1020 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1021 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1022 
1023 	asoc->smallest_mtu = m->sctp_frag_point;
1024 	asoc->minrto = m->sctp_ep.sctp_minrto;
1025 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1026 
1027 	asoc->locked_on_sending = NULL;
1028 	asoc->stream_locked_on = 0;
1029 	asoc->ecn_echo_cnt_onq = 0;
1030 	asoc->stream_locked = 0;
1031 
1032 	asoc->send_sack = 1;
1033 
1034 	LIST_INIT(&asoc->sctp_restricted_addrs);
1035 
1036 	TAILQ_INIT(&asoc->nets);
1037 	TAILQ_INIT(&asoc->pending_reply_queue);
1038 	TAILQ_INIT(&asoc->asconf_ack_sent);
1039 	/* Setup to fill the hb random cache at first HB */
1040 	asoc->hb_random_idx = 4;
1041 
1042 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1043 
1044 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1045 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1046 
1047 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1048 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1049 
1050 	/*
1051 	 * Now the stream parameters, here we allocate space for all streams
1052 	 * that we request by default.
1053 	 */
1054 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1055 	    m->sctp_ep.pre_open_stream_count;
1056 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1057 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1058 	    SCTP_M_STRMO);
1059 	if (asoc->strmout == NULL) {
1060 		/* big trouble no memory */
1061 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1062 		return (ENOMEM);
1063 	}
1064 	for (i = 0; i < asoc->streamoutcnt; i++) {
1065 		/*
1066 		 * inbound side must be set to 0xffff, also NOTE when we get
1067 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1068 		 * count (streamoutcnt) but first check if we sent to any of
1069 		 * the upper streams that were dropped (if some were). Those
1070 		 * that were dropped must be notified to the upper layer as
1071 		 * failed to send.
1072 		 */
1073 		asoc->strmout[i].next_sequence_sent = 0x0;
1074 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1075 		asoc->strmout[i].stream_no = i;
1076 		asoc->strmout[i].last_msg_incomplete = 0;
1077 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1078 	}
1079 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1080 
1081 	/* Now the mapping array */
1082 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1083 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1084 	    SCTP_M_MAP);
1085 	if (asoc->mapping_array == NULL) {
1086 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1087 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1088 		return (ENOMEM);
1089 	}
1090 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1091 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1092 	    SCTP_M_MAP);
1093 	if (asoc->nr_mapping_array == NULL) {
1094 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1095 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1096 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1097 		return (ENOMEM);
1098 	}
1099 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1100 
1101 	/* Now the init of the other outqueues */
1102 	TAILQ_INIT(&asoc->free_chunks);
1103 	TAILQ_INIT(&asoc->control_send_queue);
1104 	TAILQ_INIT(&asoc->asconf_send_queue);
1105 	TAILQ_INIT(&asoc->send_queue);
1106 	TAILQ_INIT(&asoc->sent_queue);
1107 	TAILQ_INIT(&asoc->reasmqueue);
1108 	TAILQ_INIT(&asoc->resetHead);
1109 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1110 	TAILQ_INIT(&asoc->asconf_queue);
1111 	/* authentication fields */
1112 	asoc->authinfo.random = NULL;
1113 	asoc->authinfo.active_keyid = 0;
1114 	asoc->authinfo.assoc_key = NULL;
1115 	asoc->authinfo.assoc_keyid = 0;
1116 	asoc->authinfo.recv_key = NULL;
1117 	asoc->authinfo.recv_keyid = 0;
1118 	LIST_INIT(&asoc->shared_keys);
1119 	asoc->marked_retrans = 0;
1120 	asoc->port = m->sctp_ep.port;
1121 	asoc->timoinit = 0;
1122 	asoc->timodata = 0;
1123 	asoc->timosack = 0;
1124 	asoc->timoshutdown = 0;
1125 	asoc->timoheartbeat = 0;
1126 	asoc->timocookie = 0;
1127 	asoc->timoshutdownack = 0;
1128 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1129 	asoc->discontinuity_time = asoc->start_time;
1130 	/*
1131 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1132 	 * freed later when the association is freed.
1133 	 */
1134 	return (0);
1135 }
1136 
1137 void
1138 sctp_print_mapping_array(struct sctp_association *asoc)
1139 {
1140 	unsigned int i, limit;
1141 
1142 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1143 	    asoc->mapping_array_size,
1144 	    asoc->mapping_array_base_tsn,
1145 	    asoc->cumulative_tsn,
1146 	    asoc->highest_tsn_inside_map,
1147 	    asoc->highest_tsn_inside_nr_map);
1148 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1149 		if (asoc->mapping_array[limit - 1]) {
1150 			break;
1151 		}
1152 	}
1153 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1154 	for (i = 0; i < limit; i++) {
1155 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1156 	}
1157 	if (limit % 16)
1158 		printf("\n");
1159 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1160 		if (asoc->nr_mapping_array[limit - 1]) {
1161 			break;
1162 		}
1163 	}
1164 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1165 	for (i = 0; i < limit; i++) {
1166 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1167 	}
1168 	if (limit % 16)
1169 		printf("\n");
1170 }
1171 
1172 int
1173 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1174 {
1175 	/* mapping array needs to grow */
1176 	uint8_t *new_array1, *new_array2;
1177 	uint32_t new_size;
1178 
1179 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1180 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1181 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1182 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1183 		/* can't get more, forget it */
1184 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1185 		if (new_array1) {
1186 			SCTP_FREE(new_array1, SCTP_M_MAP);
1187 		}
1188 		if (new_array2) {
1189 			SCTP_FREE(new_array2, SCTP_M_MAP);
1190 		}
1191 		return (-1);
1192 	}
1193 	memset(new_array1, 0, new_size);
1194 	memset(new_array2, 0, new_size);
1195 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1196 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1197 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1198 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1199 	asoc->mapping_array = new_array1;
1200 	asoc->nr_mapping_array = new_array2;
1201 	asoc->mapping_array_size = new_size;
1202 	return (0);
1203 }
1204 
1205 
1206 static void
1207 sctp_iterator_work(struct sctp_iterator *it)
1208 {
1209 	int iteration_count = 0;
1210 	int inp_skip = 0;
1211 	int first_in = 1;
1212 	struct sctp_inpcb *tinp;
1213 
1214 	SCTP_INP_INFO_RLOCK();
1215 	SCTP_ITERATOR_LOCK();
1216 	if (it->inp) {
1217 		SCTP_INP_RLOCK(it->inp);
1218 		SCTP_INP_DECR_REF(it->inp);
1219 	}
1220 	if (it->inp == NULL) {
1221 		/* iterator is complete */
1222 done_with_iterator:
1223 		SCTP_ITERATOR_UNLOCK();
1224 		SCTP_INP_INFO_RUNLOCK();
1225 		if (it->function_atend != NULL) {
1226 			(*it->function_atend) (it->pointer, it->val);
1227 		}
1228 		SCTP_FREE(it, SCTP_M_ITER);
1229 		return;
1230 	}
1231 select_a_new_ep:
1232 	if (first_in) {
1233 		first_in = 0;
1234 	} else {
1235 		SCTP_INP_RLOCK(it->inp);
1236 	}
1237 	while (((it->pcb_flags) &&
1238 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1239 	    ((it->pcb_features) &&
1240 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1241 		/* endpoint flags or features don't match, so keep looking */
1242 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1243 			SCTP_INP_RUNLOCK(it->inp);
1244 			goto done_with_iterator;
1245 		}
1246 		tinp = it->inp;
1247 		it->inp = LIST_NEXT(it->inp, sctp_list);
1248 		SCTP_INP_RUNLOCK(tinp);
1249 		if (it->inp == NULL) {
1250 			goto done_with_iterator;
1251 		}
1252 		SCTP_INP_RLOCK(it->inp);
1253 	}
1254 	/* now go through each assoc which is in the desired state */
1255 	if (it->done_current_ep == 0) {
1256 		if (it->function_inp != NULL)
1257 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1258 		it->done_current_ep = 1;
1259 	}
1260 	if (it->stcb == NULL) {
1261 		/* run the per instance function */
1262 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1263 	}
1264 	if ((inp_skip) || it->stcb == NULL) {
1265 		if (it->function_inp_end != NULL) {
1266 			inp_skip = (*it->function_inp_end) (it->inp,
1267 			    it->pointer,
1268 			    it->val);
1269 		}
1270 		SCTP_INP_RUNLOCK(it->inp);
1271 		goto no_stcb;
1272 	}
1273 	while (it->stcb) {
1274 		SCTP_TCB_LOCK(it->stcb);
1275 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1276 			/* not in the right state... keep looking */
1277 			SCTP_TCB_UNLOCK(it->stcb);
1278 			goto next_assoc;
1279 		}
1280 		/* see if we have limited out the iterator loop */
1281 		iteration_count++;
1282 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1283 			/* Pause to let others grab the lock */
1284 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1285 			SCTP_TCB_UNLOCK(it->stcb);
1286 			SCTP_INP_INCR_REF(it->inp);
1287 			SCTP_INP_RUNLOCK(it->inp);
1288 			SCTP_ITERATOR_UNLOCK();
1289 			SCTP_INP_INFO_RUNLOCK();
1290 			SCTP_INP_INFO_RLOCK();
1291 			SCTP_ITERATOR_LOCK();
1292 			if (sctp_it_ctl.iterator_flags) {
1293 				/* We won't be staying here */
1294 				SCTP_INP_DECR_REF(it->inp);
1295 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1296 				if (sctp_it_ctl.iterator_flags &
1297 				    SCTP_ITERATOR_STOP_CUR_IT) {
1298 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1299 					goto done_with_iterator;
1300 				}
1301 				if (sctp_it_ctl.iterator_flags &
1302 				    SCTP_ITERATOR_STOP_CUR_INP) {
1303 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1304 					goto no_stcb;
1305 				}
1306 				/* If we reach here huh? */
1307 				printf("Unknown it ctl flag %x\n",
1308 				    sctp_it_ctl.iterator_flags);
1309 				sctp_it_ctl.iterator_flags = 0;
1310 			}
1311 			SCTP_INP_RLOCK(it->inp);
1312 			SCTP_INP_DECR_REF(it->inp);
1313 			SCTP_TCB_LOCK(it->stcb);
1314 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1315 			iteration_count = 0;
1316 		}
1317 		/* run function on this one */
1318 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1319 
1320 		/*
1321 		 * we lie here, it really needs to have its own type but
1322 		 * first I must verify that this won't effect things :-0
1323 		 */
1324 		if (it->no_chunk_output == 0)
1325 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1326 
1327 		SCTP_TCB_UNLOCK(it->stcb);
1328 next_assoc:
1329 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1330 		if (it->stcb == NULL) {
1331 			/* Run last function */
1332 			if (it->function_inp_end != NULL) {
1333 				inp_skip = (*it->function_inp_end) (it->inp,
1334 				    it->pointer,
1335 				    it->val);
1336 			}
1337 		}
1338 	}
1339 	SCTP_INP_RUNLOCK(it->inp);
1340 no_stcb:
1341 	/* done with all assocs on this endpoint, move on to next endpoint */
1342 	it->done_current_ep = 0;
1343 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1344 		it->inp = NULL;
1345 	} else {
1346 		it->inp = LIST_NEXT(it->inp, sctp_list);
1347 	}
1348 	if (it->inp == NULL) {
1349 		goto done_with_iterator;
1350 	}
1351 	goto select_a_new_ep;
1352 }
1353 
1354 void
1355 sctp_iterator_worker(void)
1356 {
1357 	struct sctp_iterator *it, *nit;
1358 
1359 	/* This function is called with the WQ lock in place */
1360 
1361 	sctp_it_ctl.iterator_running = 1;
1362 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1363 		sctp_it_ctl.cur_it = it;
1364 		/* now lets work on this one */
1365 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1366 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1367 		CURVNET_SET(it->vn);
1368 		sctp_iterator_work(it);
1369 		sctp_it_ctl.cur_it = NULL;
1370 		CURVNET_RESTORE();
1371 		SCTP_IPI_ITERATOR_WQ_LOCK();
1372 		/* sa_ignore FREED_MEMORY */
1373 	}
1374 	sctp_it_ctl.iterator_running = 0;
1375 	return;
1376 }
1377 
1378 
1379 static void
1380 sctp_handle_addr_wq(void)
1381 {
1382 	/* deal with the ADDR wq from the rtsock calls */
1383 	struct sctp_laddr *wi, *nwi;
1384 	struct sctp_asconf_iterator *asc;
1385 
1386 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1387 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1388 	if (asc == NULL) {
1389 		/* Try later, no memory */
1390 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1391 		    (struct sctp_inpcb *)NULL,
1392 		    (struct sctp_tcb *)NULL,
1393 		    (struct sctp_nets *)NULL);
1394 		return;
1395 	}
1396 	LIST_INIT(&asc->list_of_work);
1397 	asc->cnt = 0;
1398 
1399 	SCTP_WQ_ADDR_LOCK();
1400 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1401 		LIST_REMOVE(wi, sctp_nxt_addr);
1402 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1403 		asc->cnt++;
1404 	}
1405 	SCTP_WQ_ADDR_UNLOCK();
1406 
1407 	if (asc->cnt == 0) {
1408 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1409 	} else {
1410 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1411 		    sctp_asconf_iterator_stcb,
1412 		    NULL,	/* No ep end for boundall */
1413 		    SCTP_PCB_FLAGS_BOUNDALL,
1414 		    SCTP_PCB_ANY_FEATURES,
1415 		    SCTP_ASOC_ANY_STATE,
1416 		    (void *)asc, 0,
1417 		    sctp_asconf_iterator_end, NULL, 0);
1418 	}
1419 }
1420 
1421 int retcode = 0;
1422 int cur_oerr = 0;
1423 
1424 void
1425 sctp_timeout_handler(void *t)
1426 {
1427 	struct sctp_inpcb *inp;
1428 	struct sctp_tcb *stcb;
1429 	struct sctp_nets *net;
1430 	struct sctp_timer *tmr;
1431 
1432 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1433 	struct socket *so;
1434 
1435 #endif
1436 	int did_output, type;
1437 
1438 	tmr = (struct sctp_timer *)t;
1439 	inp = (struct sctp_inpcb *)tmr->ep;
1440 	stcb = (struct sctp_tcb *)tmr->tcb;
1441 	net = (struct sctp_nets *)tmr->net;
1442 	CURVNET_SET((struct vnet *)tmr->vnet);
1443 	did_output = 1;
1444 
1445 #ifdef SCTP_AUDITING_ENABLED
1446 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1447 	sctp_auditing(3, inp, stcb, net);
1448 #endif
1449 
1450 	/* sanity checks... */
1451 	if (tmr->self != (void *)tmr) {
1452 		/*
1453 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1454 		 * tmr);
1455 		 */
1456 		CURVNET_RESTORE();
1457 		return;
1458 	}
1459 	tmr->stopped_from = 0xa001;
1460 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1461 		/*
1462 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1463 		 * tmr->type);
1464 		 */
1465 		CURVNET_RESTORE();
1466 		return;
1467 	}
1468 	tmr->stopped_from = 0xa002;
1469 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1470 		CURVNET_RESTORE();
1471 		return;
1472 	}
1473 	/* if this is an iterator timeout, get the struct and clear inp */
1474 	tmr->stopped_from = 0xa003;
1475 	type = tmr->type;
1476 	if (inp) {
1477 		SCTP_INP_INCR_REF(inp);
1478 		if ((inp->sctp_socket == 0) &&
1479 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1480 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1481 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1482 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1483 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1484 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1485 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1486 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1487 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1488 		    ) {
1489 			SCTP_INP_DECR_REF(inp);
1490 			CURVNET_RESTORE();
1491 			return;
1492 		}
1493 	}
1494 	tmr->stopped_from = 0xa004;
1495 	if (stcb) {
1496 		atomic_add_int(&stcb->asoc.refcnt, 1);
1497 		if (stcb->asoc.state == 0) {
1498 			atomic_add_int(&stcb->asoc.refcnt, -1);
1499 			if (inp) {
1500 				SCTP_INP_DECR_REF(inp);
1501 			}
1502 			CURVNET_RESTORE();
1503 			return;
1504 		}
1505 	}
1506 	tmr->stopped_from = 0xa005;
1507 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1508 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1509 		if (inp) {
1510 			SCTP_INP_DECR_REF(inp);
1511 		}
1512 		if (stcb) {
1513 			atomic_add_int(&stcb->asoc.refcnt, -1);
1514 		}
1515 		CURVNET_RESTORE();
1516 		return;
1517 	}
1518 	tmr->stopped_from = 0xa006;
1519 
1520 	if (stcb) {
1521 		SCTP_TCB_LOCK(stcb);
1522 		atomic_add_int(&stcb->asoc.refcnt, -1);
1523 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1524 		    ((stcb->asoc.state == 0) ||
1525 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1526 			SCTP_TCB_UNLOCK(stcb);
1527 			if (inp) {
1528 				SCTP_INP_DECR_REF(inp);
1529 			}
1530 			CURVNET_RESTORE();
1531 			return;
1532 		}
1533 	}
1534 	/* record in stopped what t-o occured */
1535 	tmr->stopped_from = tmr->type;
1536 
1537 	/* mark as being serviced now */
1538 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1539 		/*
1540 		 * Callout has been rescheduled.
1541 		 */
1542 		goto get_out;
1543 	}
1544 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1545 		/*
1546 		 * Not active, so no action.
1547 		 */
1548 		goto get_out;
1549 	}
1550 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1551 
1552 	/* call the handler for the appropriate timer type */
1553 	switch (tmr->type) {
1554 	case SCTP_TIMER_TYPE_ZERO_COPY:
1555 		if (inp == NULL) {
1556 			break;
1557 		}
1558 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1559 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1560 		}
1561 		break;
1562 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1563 		if (inp == NULL) {
1564 			break;
1565 		}
1566 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1567 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1568 		}
1569 		break;
1570 	case SCTP_TIMER_TYPE_ADDR_WQ:
1571 		sctp_handle_addr_wq();
1572 		break;
1573 	case SCTP_TIMER_TYPE_SEND:
1574 		if ((stcb == NULL) || (inp == NULL)) {
1575 			break;
1576 		}
1577 		SCTP_STAT_INCR(sctps_timodata);
1578 		stcb->asoc.timodata++;
1579 		stcb->asoc.num_send_timers_up--;
1580 		if (stcb->asoc.num_send_timers_up < 0) {
1581 			stcb->asoc.num_send_timers_up = 0;
1582 		}
1583 		SCTP_TCB_LOCK_ASSERT(stcb);
1584 		cur_oerr = stcb->asoc.overall_error_count;
1585 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1586 		if (retcode) {
1587 			/* no need to unlock on tcb its gone */
1588 
1589 			goto out_decr;
1590 		}
1591 		SCTP_TCB_LOCK_ASSERT(stcb);
1592 #ifdef SCTP_AUDITING_ENABLED
1593 		sctp_auditing(4, inp, stcb, net);
1594 #endif
1595 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1596 		if ((stcb->asoc.num_send_timers_up == 0) &&
1597 		    (stcb->asoc.sent_queue_cnt > 0)) {
1598 			struct sctp_tmit_chunk *chk;
1599 
1600 			/*
1601 			 * safeguard. If there on some on the sent queue
1602 			 * somewhere but no timers running something is
1603 			 * wrong... so we start a timer on the first chunk
1604 			 * on the send queue on whatever net it is sent to.
1605 			 */
1606 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1607 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1608 			    chk->whoTo);
1609 		}
1610 		break;
1611 	case SCTP_TIMER_TYPE_INIT:
1612 		if ((stcb == NULL) || (inp == NULL)) {
1613 			break;
1614 		}
1615 		SCTP_STAT_INCR(sctps_timoinit);
1616 		stcb->asoc.timoinit++;
1617 		if (sctp_t1init_timer(inp, stcb, net)) {
1618 			/* no need to unlock on tcb its gone */
1619 			goto out_decr;
1620 		}
1621 		/* We do output but not here */
1622 		did_output = 0;
1623 		break;
1624 	case SCTP_TIMER_TYPE_RECV:
1625 		if ((stcb == NULL) || (inp == NULL)) {
1626 			break;
1627 		}
1628 		SCTP_STAT_INCR(sctps_timosack);
1629 		stcb->asoc.timosack++;
1630 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1631 #ifdef SCTP_AUDITING_ENABLED
1632 		sctp_auditing(4, inp, stcb, net);
1633 #endif
1634 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1635 		break;
1636 	case SCTP_TIMER_TYPE_SHUTDOWN:
1637 		if ((stcb == NULL) || (inp == NULL)) {
1638 			break;
1639 		}
1640 		if (sctp_shutdown_timer(inp, stcb, net)) {
1641 			/* no need to unlock on tcb its gone */
1642 			goto out_decr;
1643 		}
1644 		SCTP_STAT_INCR(sctps_timoshutdown);
1645 		stcb->asoc.timoshutdown++;
1646 #ifdef SCTP_AUDITING_ENABLED
1647 		sctp_auditing(4, inp, stcb, net);
1648 #endif
1649 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1650 		break;
1651 	case SCTP_TIMER_TYPE_HEARTBEAT:
1652 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1653 			break;
1654 		}
1655 		SCTP_STAT_INCR(sctps_timoheartbeat);
1656 		stcb->asoc.timoheartbeat++;
1657 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1658 			/* no need to unlock on tcb its gone */
1659 			goto out_decr;
1660 		}
1661 #ifdef SCTP_AUDITING_ENABLED
1662 		sctp_auditing(4, inp, stcb, net);
1663 #endif
1664 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1665 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1666 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1667 		}
1668 		break;
1669 	case SCTP_TIMER_TYPE_COOKIE:
1670 		if ((stcb == NULL) || (inp == NULL)) {
1671 			break;
1672 		}
1673 		if (sctp_cookie_timer(inp, stcb, net)) {
1674 			/* no need to unlock on tcb its gone */
1675 			goto out_decr;
1676 		}
1677 		SCTP_STAT_INCR(sctps_timocookie);
1678 		stcb->asoc.timocookie++;
1679 #ifdef SCTP_AUDITING_ENABLED
1680 		sctp_auditing(4, inp, stcb, net);
1681 #endif
1682 		/*
1683 		 * We consider T3 and Cookie timer pretty much the same with
1684 		 * respect to where from in chunk_output.
1685 		 */
1686 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1687 		break;
1688 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1689 		{
1690 			struct timeval tv;
1691 			int i, secret;
1692 
1693 			if (inp == NULL) {
1694 				break;
1695 			}
1696 			SCTP_STAT_INCR(sctps_timosecret);
1697 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1698 			SCTP_INP_WLOCK(inp);
1699 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1700 			inp->sctp_ep.last_secret_number =
1701 			    inp->sctp_ep.current_secret_number;
1702 			inp->sctp_ep.current_secret_number++;
1703 			if (inp->sctp_ep.current_secret_number >=
1704 			    SCTP_HOW_MANY_SECRETS) {
1705 				inp->sctp_ep.current_secret_number = 0;
1706 			}
1707 			secret = (int)inp->sctp_ep.current_secret_number;
1708 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1709 				inp->sctp_ep.secret_key[secret][i] =
1710 				    sctp_select_initial_TSN(&inp->sctp_ep);
1711 			}
1712 			SCTP_INP_WUNLOCK(inp);
1713 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1714 		}
1715 		did_output = 0;
1716 		break;
1717 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1718 		if ((stcb == NULL) || (inp == NULL)) {
1719 			break;
1720 		}
1721 		SCTP_STAT_INCR(sctps_timopathmtu);
1722 		sctp_pathmtu_timer(inp, stcb, net);
1723 		did_output = 0;
1724 		break;
1725 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1726 		if ((stcb == NULL) || (inp == NULL)) {
1727 			break;
1728 		}
1729 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1730 			/* no need to unlock on tcb its gone */
1731 			goto out_decr;
1732 		}
1733 		SCTP_STAT_INCR(sctps_timoshutdownack);
1734 		stcb->asoc.timoshutdownack++;
1735 #ifdef SCTP_AUDITING_ENABLED
1736 		sctp_auditing(4, inp, stcb, net);
1737 #endif
1738 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1739 		break;
1740 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1741 		if ((stcb == NULL) || (inp == NULL)) {
1742 			break;
1743 		}
1744 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1745 		sctp_abort_an_association(inp, stcb,
1746 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1747 		/* no need to unlock on tcb its gone */
1748 		goto out_decr;
1749 
1750 	case SCTP_TIMER_TYPE_STRRESET:
1751 		if ((stcb == NULL) || (inp == NULL)) {
1752 			break;
1753 		}
1754 		if (sctp_strreset_timer(inp, stcb, net)) {
1755 			/* no need to unlock on tcb its gone */
1756 			goto out_decr;
1757 		}
1758 		SCTP_STAT_INCR(sctps_timostrmrst);
1759 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1760 		break;
1761 	case SCTP_TIMER_TYPE_ASCONF:
1762 		if ((stcb == NULL) || (inp == NULL)) {
1763 			break;
1764 		}
1765 		if (sctp_asconf_timer(inp, stcb, net)) {
1766 			/* no need to unlock on tcb its gone */
1767 			goto out_decr;
1768 		}
1769 		SCTP_STAT_INCR(sctps_timoasconf);
1770 #ifdef SCTP_AUDITING_ENABLED
1771 		sctp_auditing(4, inp, stcb, net);
1772 #endif
1773 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1774 		break;
1775 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1776 		if ((stcb == NULL) || (inp == NULL)) {
1777 			break;
1778 		}
1779 		sctp_delete_prim_timer(inp, stcb, net);
1780 		SCTP_STAT_INCR(sctps_timodelprim);
1781 		break;
1782 
1783 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1784 		if ((stcb == NULL) || (inp == NULL)) {
1785 			break;
1786 		}
1787 		SCTP_STAT_INCR(sctps_timoautoclose);
1788 		sctp_autoclose_timer(inp, stcb, net);
1789 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1790 		did_output = 0;
1791 		break;
1792 	case SCTP_TIMER_TYPE_ASOCKILL:
1793 		if ((stcb == NULL) || (inp == NULL)) {
1794 			break;
1795 		}
1796 		SCTP_STAT_INCR(sctps_timoassockill);
1797 		/* Can we free it yet? */
1798 		SCTP_INP_DECR_REF(inp);
1799 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1800 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1801 		so = SCTP_INP_SO(inp);
1802 		atomic_add_int(&stcb->asoc.refcnt, 1);
1803 		SCTP_TCB_UNLOCK(stcb);
1804 		SCTP_SOCKET_LOCK(so, 1);
1805 		SCTP_TCB_LOCK(stcb);
1806 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1807 #endif
1808 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1809 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1810 		SCTP_SOCKET_UNLOCK(so, 1);
1811 #endif
1812 		/*
1813 		 * free asoc, always unlocks (or destroy's) so prevent
1814 		 * duplicate unlock or unlock of a free mtx :-0
1815 		 */
1816 		stcb = NULL;
1817 		goto out_no_decr;
1818 	case SCTP_TIMER_TYPE_INPKILL:
1819 		SCTP_STAT_INCR(sctps_timoinpkill);
1820 		if (inp == NULL) {
1821 			break;
1822 		}
1823 		/*
1824 		 * special case, take away our increment since WE are the
1825 		 * killer
1826 		 */
1827 		SCTP_INP_DECR_REF(inp);
1828 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1829 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1830 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1831 		inp = NULL;
1832 		goto out_no_decr;
1833 	default:
1834 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1835 		    tmr->type);
1836 		break;
1837 	};
1838 #ifdef SCTP_AUDITING_ENABLED
1839 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1840 	if (inp)
1841 		sctp_auditing(5, inp, stcb, net);
1842 #endif
1843 	if ((did_output) && stcb) {
1844 		/*
1845 		 * Now we need to clean up the control chunk chain if an
1846 		 * ECNE is on it. It must be marked as UNSENT again so next
1847 		 * call will continue to send it until such time that we get
1848 		 * a CWR, to remove it. It is, however, less likely that we
1849 		 * will find a ecn echo on the chain though.
1850 		 */
1851 		sctp_fix_ecn_echo(&stcb->asoc);
1852 	}
1853 get_out:
1854 	if (stcb) {
1855 		SCTP_TCB_UNLOCK(stcb);
1856 	}
1857 out_decr:
1858 	if (inp) {
1859 		SCTP_INP_DECR_REF(inp);
1860 	}
1861 out_no_decr:
1862 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1863 	    type);
1864 	CURVNET_RESTORE();
1865 }
1866 
1867 void
1868 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1869     struct sctp_nets *net)
1870 {
1871 	uint32_t to_ticks;
1872 	struct sctp_timer *tmr;
1873 
1874 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1875 		return;
1876 
1877 	to_ticks = 0;
1878 
1879 	tmr = NULL;
1880 	if (stcb) {
1881 		SCTP_TCB_LOCK_ASSERT(stcb);
1882 	}
1883 	switch (t_type) {
1884 	case SCTP_TIMER_TYPE_ZERO_COPY:
1885 		tmr = &inp->sctp_ep.zero_copy_timer;
1886 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1887 		break;
1888 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1889 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1890 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1891 		break;
1892 	case SCTP_TIMER_TYPE_ADDR_WQ:
1893 		/* Only 1 tick away :-) */
1894 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1895 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1896 		break;
1897 	case SCTP_TIMER_TYPE_SEND:
1898 		/* Here we use the RTO timer */
1899 		{
1900 			int rto_val;
1901 
1902 			if ((stcb == NULL) || (net == NULL)) {
1903 				return;
1904 			}
1905 			tmr = &net->rxt_timer;
1906 			if (net->RTO == 0) {
1907 				rto_val = stcb->asoc.initial_rto;
1908 			} else {
1909 				rto_val = net->RTO;
1910 			}
1911 			to_ticks = MSEC_TO_TICKS(rto_val);
1912 		}
1913 		break;
1914 	case SCTP_TIMER_TYPE_INIT:
1915 		/*
1916 		 * Here we use the INIT timer default usually about 1
1917 		 * minute.
1918 		 */
1919 		if ((stcb == NULL) || (net == NULL)) {
1920 			return;
1921 		}
1922 		tmr = &net->rxt_timer;
1923 		if (net->RTO == 0) {
1924 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1925 		} else {
1926 			to_ticks = MSEC_TO_TICKS(net->RTO);
1927 		}
1928 		break;
1929 	case SCTP_TIMER_TYPE_RECV:
1930 		/*
1931 		 * Here we use the Delayed-Ack timer value from the inp
1932 		 * ususually about 200ms.
1933 		 */
1934 		if (stcb == NULL) {
1935 			return;
1936 		}
1937 		tmr = &stcb->asoc.dack_timer;
1938 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1939 		break;
1940 	case SCTP_TIMER_TYPE_SHUTDOWN:
1941 		/* Here we use the RTO of the destination. */
1942 		if ((stcb == NULL) || (net == NULL)) {
1943 			return;
1944 		}
1945 		if (net->RTO == 0) {
1946 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1947 		} else {
1948 			to_ticks = MSEC_TO_TICKS(net->RTO);
1949 		}
1950 		tmr = &net->rxt_timer;
1951 		break;
1952 	case SCTP_TIMER_TYPE_HEARTBEAT:
1953 		/*
1954 		 * the net is used here so that we can add in the RTO. Even
1955 		 * though we use a different timer. We also add the HB timer
1956 		 * PLUS a random jitter.
1957 		 */
1958 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1959 			return;
1960 		} else {
1961 			uint32_t rndval;
1962 			uint32_t jitter;
1963 
1964 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1965 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1966 				return;
1967 			}
1968 			if (net->RTO == 0) {
1969 				to_ticks = stcb->asoc.initial_rto;
1970 			} else {
1971 				to_ticks = net->RTO;
1972 			}
1973 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1974 			jitter = rndval % to_ticks;
1975 			if (jitter >= (to_ticks >> 1)) {
1976 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1977 			} else {
1978 				to_ticks = to_ticks - jitter;
1979 			}
1980 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1981 			    !(net->dest_state & SCTP_ADDR_PF)) {
1982 				to_ticks += net->heart_beat_delay;
1983 			}
1984 			/*
1985 			 * Now we must convert the to_ticks that are now in
1986 			 * ms to ticks.
1987 			 */
1988 			to_ticks = MSEC_TO_TICKS(to_ticks);
1989 			tmr = &net->hb_timer;
1990 		}
1991 		break;
1992 	case SCTP_TIMER_TYPE_COOKIE:
1993 		/*
1994 		 * Here we can use the RTO timer from the network since one
1995 		 * RTT was compelete. If a retran happened then we will be
1996 		 * using the RTO initial value.
1997 		 */
1998 		if ((stcb == NULL) || (net == NULL)) {
1999 			return;
2000 		}
2001 		if (net->RTO == 0) {
2002 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2003 		} else {
2004 			to_ticks = MSEC_TO_TICKS(net->RTO);
2005 		}
2006 		tmr = &net->rxt_timer;
2007 		break;
2008 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2009 		/*
2010 		 * nothing needed but the endpoint here ususually about 60
2011 		 * minutes.
2012 		 */
2013 		if (inp == NULL) {
2014 			return;
2015 		}
2016 		tmr = &inp->sctp_ep.signature_change;
2017 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2018 		break;
2019 	case SCTP_TIMER_TYPE_ASOCKILL:
2020 		if (stcb == NULL) {
2021 			return;
2022 		}
2023 		tmr = &stcb->asoc.strreset_timer;
2024 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2025 		break;
2026 	case SCTP_TIMER_TYPE_INPKILL:
2027 		/*
2028 		 * The inp is setup to die. We re-use the signature_chage
2029 		 * timer since that has stopped and we are in the GONE
2030 		 * state.
2031 		 */
2032 		if (inp == NULL) {
2033 			return;
2034 		}
2035 		tmr = &inp->sctp_ep.signature_change;
2036 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2037 		break;
2038 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2039 		/*
2040 		 * Here we use the value found in the EP for PMTU ususually
2041 		 * about 10 minutes.
2042 		 */
2043 		if ((stcb == NULL) || (inp == NULL)) {
2044 			return;
2045 		}
2046 		if (net == NULL) {
2047 			return;
2048 		}
2049 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2050 			return;
2051 		}
2052 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2053 		tmr = &net->pmtu_timer;
2054 		break;
2055 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2056 		/* Here we use the RTO of the destination */
2057 		if ((stcb == NULL) || (net == NULL)) {
2058 			return;
2059 		}
2060 		if (net->RTO == 0) {
2061 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2062 		} else {
2063 			to_ticks = MSEC_TO_TICKS(net->RTO);
2064 		}
2065 		tmr = &net->rxt_timer;
2066 		break;
2067 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2068 		/*
2069 		 * Here we use the endpoints shutdown guard timer usually
2070 		 * about 3 minutes.
2071 		 */
2072 		if ((inp == NULL) || (stcb == NULL)) {
2073 			return;
2074 		}
2075 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2076 		tmr = &stcb->asoc.shut_guard_timer;
2077 		break;
2078 	case SCTP_TIMER_TYPE_STRRESET:
2079 		/*
2080 		 * Here the timer comes from the stcb but its value is from
2081 		 * the net's RTO.
2082 		 */
2083 		if ((stcb == NULL) || (net == NULL)) {
2084 			return;
2085 		}
2086 		if (net->RTO == 0) {
2087 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2088 		} else {
2089 			to_ticks = MSEC_TO_TICKS(net->RTO);
2090 		}
2091 		tmr = &stcb->asoc.strreset_timer;
2092 		break;
2093 	case SCTP_TIMER_TYPE_ASCONF:
2094 		/*
2095 		 * Here the timer comes from the stcb but its value is from
2096 		 * the net's RTO.
2097 		 */
2098 		if ((stcb == NULL) || (net == NULL)) {
2099 			return;
2100 		}
2101 		if (net->RTO == 0) {
2102 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2103 		} else {
2104 			to_ticks = MSEC_TO_TICKS(net->RTO);
2105 		}
2106 		tmr = &stcb->asoc.asconf_timer;
2107 		break;
2108 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2109 		if ((stcb == NULL) || (net != NULL)) {
2110 			return;
2111 		}
2112 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2113 		tmr = &stcb->asoc.delete_prim_timer;
2114 		break;
2115 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2116 		if (stcb == NULL) {
2117 			return;
2118 		}
2119 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2120 			/*
2121 			 * Really an error since stcb is NOT set to
2122 			 * autoclose
2123 			 */
2124 			return;
2125 		}
2126 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2127 		tmr = &stcb->asoc.autoclose_timer;
2128 		break;
2129 	default:
2130 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2131 		    __FUNCTION__, t_type);
2132 		return;
2133 		break;
2134 	};
2135 	if ((to_ticks <= 0) || (tmr == NULL)) {
2136 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2137 		    __FUNCTION__, t_type, to_ticks, tmr);
2138 		return;
2139 	}
2140 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2141 		/*
2142 		 * we do NOT allow you to have it already running. if it is
2143 		 * we leave the current one up unchanged
2144 		 */
2145 		return;
2146 	}
2147 	/* At this point we can proceed */
2148 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2149 		stcb->asoc.num_send_timers_up++;
2150 	}
2151 	tmr->stopped_from = 0;
2152 	tmr->type = t_type;
2153 	tmr->ep = (void *)inp;
2154 	tmr->tcb = (void *)stcb;
2155 	tmr->net = (void *)net;
2156 	tmr->self = (void *)tmr;
2157 	tmr->vnet = (void *)curvnet;
2158 	tmr->ticks = sctp_get_tick_count();
2159 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2160 	return;
2161 }
2162 
2163 void
2164 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2165     struct sctp_nets *net, uint32_t from)
2166 {
2167 	struct sctp_timer *tmr;
2168 
2169 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2170 	    (inp == NULL))
2171 		return;
2172 
2173 	tmr = NULL;
2174 	if (stcb) {
2175 		SCTP_TCB_LOCK_ASSERT(stcb);
2176 	}
2177 	switch (t_type) {
2178 	case SCTP_TIMER_TYPE_ZERO_COPY:
2179 		tmr = &inp->sctp_ep.zero_copy_timer;
2180 		break;
2181 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2182 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2183 		break;
2184 	case SCTP_TIMER_TYPE_ADDR_WQ:
2185 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2186 		break;
2187 	case SCTP_TIMER_TYPE_SEND:
2188 		if ((stcb == NULL) || (net == NULL)) {
2189 			return;
2190 		}
2191 		tmr = &net->rxt_timer;
2192 		break;
2193 	case SCTP_TIMER_TYPE_INIT:
2194 		if ((stcb == NULL) || (net == NULL)) {
2195 			return;
2196 		}
2197 		tmr = &net->rxt_timer;
2198 		break;
2199 	case SCTP_TIMER_TYPE_RECV:
2200 		if (stcb == NULL) {
2201 			return;
2202 		}
2203 		tmr = &stcb->asoc.dack_timer;
2204 		break;
2205 	case SCTP_TIMER_TYPE_SHUTDOWN:
2206 		if ((stcb == NULL) || (net == NULL)) {
2207 			return;
2208 		}
2209 		tmr = &net->rxt_timer;
2210 		break;
2211 	case SCTP_TIMER_TYPE_HEARTBEAT:
2212 		if ((stcb == NULL) || (net == NULL)) {
2213 			return;
2214 		}
2215 		tmr = &net->hb_timer;
2216 		break;
2217 	case SCTP_TIMER_TYPE_COOKIE:
2218 		if ((stcb == NULL) || (net == NULL)) {
2219 			return;
2220 		}
2221 		tmr = &net->rxt_timer;
2222 		break;
2223 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2224 		/* nothing needed but the endpoint here */
2225 		tmr = &inp->sctp_ep.signature_change;
2226 		/*
2227 		 * We re-use the newcookie timer for the INP kill timer. We
2228 		 * must assure that we do not kill it by accident.
2229 		 */
2230 		break;
2231 	case SCTP_TIMER_TYPE_ASOCKILL:
2232 		/*
2233 		 * Stop the asoc kill timer.
2234 		 */
2235 		if (stcb == NULL) {
2236 			return;
2237 		}
2238 		tmr = &stcb->asoc.strreset_timer;
2239 		break;
2240 
2241 	case SCTP_TIMER_TYPE_INPKILL:
2242 		/*
2243 		 * The inp is setup to die. We re-use the signature_chage
2244 		 * timer since that has stopped and we are in the GONE
2245 		 * state.
2246 		 */
2247 		tmr = &inp->sctp_ep.signature_change;
2248 		break;
2249 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2250 		if ((stcb == NULL) || (net == NULL)) {
2251 			return;
2252 		}
2253 		tmr = &net->pmtu_timer;
2254 		break;
2255 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2256 		if ((stcb == NULL) || (net == NULL)) {
2257 			return;
2258 		}
2259 		tmr = &net->rxt_timer;
2260 		break;
2261 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2262 		if (stcb == NULL) {
2263 			return;
2264 		}
2265 		tmr = &stcb->asoc.shut_guard_timer;
2266 		break;
2267 	case SCTP_TIMER_TYPE_STRRESET:
2268 		if (stcb == NULL) {
2269 			return;
2270 		}
2271 		tmr = &stcb->asoc.strreset_timer;
2272 		break;
2273 	case SCTP_TIMER_TYPE_ASCONF:
2274 		if (stcb == NULL) {
2275 			return;
2276 		}
2277 		tmr = &stcb->asoc.asconf_timer;
2278 		break;
2279 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2280 		if (stcb == NULL) {
2281 			return;
2282 		}
2283 		tmr = &stcb->asoc.delete_prim_timer;
2284 		break;
2285 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2286 		if (stcb == NULL) {
2287 			return;
2288 		}
2289 		tmr = &stcb->asoc.autoclose_timer;
2290 		break;
2291 	default:
2292 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2293 		    __FUNCTION__, t_type);
2294 		break;
2295 	};
2296 	if (tmr == NULL) {
2297 		return;
2298 	}
2299 	if ((tmr->type != t_type) && tmr->type) {
2300 		/*
2301 		 * Ok we have a timer that is under joint use. Cookie timer
2302 		 * per chance with the SEND timer. We therefore are NOT
2303 		 * running the timer that the caller wants stopped.  So just
2304 		 * return.
2305 		 */
2306 		return;
2307 	}
2308 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2309 		stcb->asoc.num_send_timers_up--;
2310 		if (stcb->asoc.num_send_timers_up < 0) {
2311 			stcb->asoc.num_send_timers_up = 0;
2312 		}
2313 	}
2314 	tmr->self = NULL;
2315 	tmr->stopped_from = from;
2316 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2317 	return;
2318 }
2319 
2320 uint32_t
2321 sctp_calculate_len(struct mbuf *m)
2322 {
2323 	uint32_t tlen = 0;
2324 	struct mbuf *at;
2325 
2326 	at = m;
2327 	while (at) {
2328 		tlen += SCTP_BUF_LEN(at);
2329 		at = SCTP_BUF_NEXT(at);
2330 	}
2331 	return (tlen);
2332 }
2333 
2334 void
2335 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2336     struct sctp_association *asoc, uint32_t mtu)
2337 {
2338 	/*
2339 	 * Reset the P-MTU size on this association, this involves changing
2340 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2341 	 * allow the DF flag to be cleared.
2342 	 */
2343 	struct sctp_tmit_chunk *chk;
2344 	unsigned int eff_mtu, ovh;
2345 
2346 	asoc->smallest_mtu = mtu;
2347 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2348 		ovh = SCTP_MIN_OVERHEAD;
2349 	} else {
2350 		ovh = SCTP_MIN_V4_OVERHEAD;
2351 	}
2352 	eff_mtu = mtu - ovh;
2353 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2354 		if (chk->send_size > eff_mtu) {
2355 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2356 		}
2357 	}
2358 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2359 		if (chk->send_size > eff_mtu) {
2360 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2361 		}
2362 	}
2363 }
2364 
2365 
2366 /*
2367  * given an association and starting time of the current RTT period return
2368  * RTO in number of msecs net should point to the current network
2369  */
2370 
2371 uint32_t
2372 sctp_calculate_rto(struct sctp_tcb *stcb,
2373     struct sctp_association *asoc,
2374     struct sctp_nets *net,
2375     struct timeval *told,
2376     int safe, int rtt_from_sack)
2377 {
2378 	/*-
2379 	 * given an association and the starting time of the current RTT
2380 	 * period (in value1/value2) return RTO in number of msecs.
2381 	 */
2382 	int32_t rtt;		/* RTT in ms */
2383 	uint32_t new_rto;
2384 	int first_measure = 0;
2385 	struct timeval now, then, *old;
2386 
2387 	/* Copy it out for sparc64 */
2388 	if (safe == sctp_align_unsafe_makecopy) {
2389 		old = &then;
2390 		memcpy(&then, told, sizeof(struct timeval));
2391 	} else if (safe == sctp_align_safe_nocopy) {
2392 		old = told;
2393 	} else {
2394 		/* error */
2395 		SCTP_PRINTF("Huh, bad rto calc call\n");
2396 		return (0);
2397 	}
2398 	/************************/
2399 	/* 1. calculate new RTT */
2400 	/************************/
2401 	/* get the current time */
2402 	if (stcb->asoc.use_precise_time) {
2403 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2404 	} else {
2405 		(void)SCTP_GETTIME_TIMEVAL(&now);
2406 	}
2407 	timevalsub(&now, old);
2408 	/* store the current RTT in us */
2409 	net->rtt = (uint64_t) 10000000 *(uint64_t) now.tv_sec +
2410 	         (uint64_t) now.tv_usec;
2411 
2412 	/* computer rtt in ms */
2413 	rtt = net->rtt / 1000;
2414 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2415 		/*
2416 		 * Tell the CC module that a new update has just occurred
2417 		 * from a sack
2418 		 */
2419 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2420 	}
2421 	/*
2422 	 * Do we need to determine the lan? We do this only on sacks i.e.
2423 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2424 	 */
2425 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2426 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2427 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2428 			net->lan_type = SCTP_LAN_INTERNET;
2429 		} else {
2430 			net->lan_type = SCTP_LAN_LOCAL;
2431 		}
2432 	}
2433 	/***************************/
2434 	/* 2. update RTTVAR & SRTT */
2435 	/***************************/
2436 	/*-
2437 	 * Compute the scaled average lastsa and the
2438 	 * scaled variance lastsv as described in van Jacobson
2439 	 * Paper "Congestion Avoidance and Control", Annex A.
2440 	 *
2441 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2442 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2443 	 */
2444 	if (net->RTO_measured) {
2445 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2446 		net->lastsa += rtt;
2447 		if (rtt < 0) {
2448 			rtt = -rtt;
2449 		}
2450 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2451 		net->lastsv += rtt;
2452 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2453 			rto_logging(net, SCTP_LOG_RTTVAR);
2454 		}
2455 	} else {
2456 		/* First RTO measurment */
2457 		net->RTO_measured = 1;
2458 		first_measure = 1;
2459 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2460 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2461 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2462 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2463 		}
2464 	}
2465 	if (net->lastsv == 0) {
2466 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2467 	}
2468 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2469 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2470 	    (stcb->asoc.sat_network_lockout == 0)) {
2471 		stcb->asoc.sat_network = 1;
2472 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2473 		stcb->asoc.sat_network = 0;
2474 		stcb->asoc.sat_network_lockout = 1;
2475 	}
2476 	/* bound it, per C6/C7 in Section 5.3.1 */
2477 	if (new_rto < stcb->asoc.minrto) {
2478 		new_rto = stcb->asoc.minrto;
2479 	}
2480 	if (new_rto > stcb->asoc.maxrto) {
2481 		new_rto = stcb->asoc.maxrto;
2482 	}
2483 	/* we are now returning the RTO */
2484 	return (new_rto);
2485 }
2486 
2487 /*
2488  * return a pointer to a contiguous piece of data from the given mbuf chain
2489  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2490  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2491  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2492  */
2493 caddr_t
2494 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2495 {
2496 	uint32_t count;
2497 	uint8_t *ptr;
2498 
2499 	ptr = in_ptr;
2500 	if ((off < 0) || (len <= 0))
2501 		return (NULL);
2502 
2503 	/* find the desired start location */
2504 	while ((m != NULL) && (off > 0)) {
2505 		if (off < SCTP_BUF_LEN(m))
2506 			break;
2507 		off -= SCTP_BUF_LEN(m);
2508 		m = SCTP_BUF_NEXT(m);
2509 	}
2510 	if (m == NULL)
2511 		return (NULL);
2512 
2513 	/* is the current mbuf large enough (eg. contiguous)? */
2514 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2515 		return (mtod(m, caddr_t)+off);
2516 	} else {
2517 		/* else, it spans more than one mbuf, so save a temp copy... */
2518 		while ((m != NULL) && (len > 0)) {
2519 			count = min(SCTP_BUF_LEN(m) - off, len);
2520 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2521 			len -= count;
2522 			ptr += count;
2523 			off = 0;
2524 			m = SCTP_BUF_NEXT(m);
2525 		}
2526 		if ((m == NULL) && (len > 0))
2527 			return (NULL);
2528 		else
2529 			return ((caddr_t)in_ptr);
2530 	}
2531 }
2532 
2533 
2534 
2535 struct sctp_paramhdr *
2536 sctp_get_next_param(struct mbuf *m,
2537     int offset,
2538     struct sctp_paramhdr *pull,
2539     int pull_limit)
2540 {
2541 	/* This just provides a typed signature to Peter's Pull routine */
2542 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2543 	    (uint8_t *) pull));
2544 }
2545 
2546 
2547 int
2548 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2549 {
2550 	/*
2551 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2552 	 * padlen is > 3 this routine will fail.
2553 	 */
2554 	uint8_t *dp;
2555 	int i;
2556 
2557 	if (padlen > 3) {
2558 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2559 		return (ENOBUFS);
2560 	}
2561 	if (padlen <= M_TRAILINGSPACE(m)) {
2562 		/*
2563 		 * The easy way. We hope the majority of the time we hit
2564 		 * here :)
2565 		 */
2566 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2567 		SCTP_BUF_LEN(m) += padlen;
2568 	} else {
2569 		/* Hard way we must grow the mbuf */
2570 		struct mbuf *tmp;
2571 
2572 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2573 		if (tmp == NULL) {
2574 			/* Out of space GAK! we are in big trouble. */
2575 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2576 			return (ENOSPC);
2577 		}
2578 		/* setup and insert in middle */
2579 		SCTP_BUF_LEN(tmp) = padlen;
2580 		SCTP_BUF_NEXT(tmp) = NULL;
2581 		SCTP_BUF_NEXT(m) = tmp;
2582 		dp = mtod(tmp, uint8_t *);
2583 	}
2584 	/* zero out the pad */
2585 	for (i = 0; i < padlen; i++) {
2586 		*dp = 0;
2587 		dp++;
2588 	}
2589 	return (0);
2590 }
2591 
2592 int
2593 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2594 {
2595 	/* find the last mbuf in chain and pad it */
2596 	struct mbuf *m_at;
2597 
2598 	m_at = m;
2599 	if (last_mbuf) {
2600 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2601 	} else {
2602 		while (m_at) {
2603 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2604 				return (sctp_add_pad_tombuf(m_at, padval));
2605 			}
2606 			m_at = SCTP_BUF_NEXT(m_at);
2607 		}
2608 	}
2609 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2610 	return (EFAULT);
2611 }
2612 
2613 static void
2614 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2615     uint32_t error, void *data, int so_locked
2616 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2617     SCTP_UNUSED
2618 #endif
2619 )
2620 {
2621 	struct mbuf *m_notify;
2622 	struct sctp_assoc_change *sac;
2623 	struct sctp_queued_to_read *control;
2624 
2625 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2626 	struct socket *so;
2627 
2628 #endif
2629 
2630 	/*
2631 	 * For TCP model AND UDP connected sockets we will send an error up
2632 	 * when an ABORT comes in.
2633 	 */
2634 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2635 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2636 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2637 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2638 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2639 			stcb->sctp_socket->so_error = ECONNREFUSED;
2640 		} else {
2641 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2642 			stcb->sctp_socket->so_error = ECONNRESET;
2643 		}
2644 		/* Wake ANY sleepers */
2645 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2646 		so = SCTP_INP_SO(stcb->sctp_ep);
2647 		if (!so_locked) {
2648 			atomic_add_int(&stcb->asoc.refcnt, 1);
2649 			SCTP_TCB_UNLOCK(stcb);
2650 			SCTP_SOCKET_LOCK(so, 1);
2651 			SCTP_TCB_LOCK(stcb);
2652 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2653 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2654 				SCTP_SOCKET_UNLOCK(so, 1);
2655 				return;
2656 			}
2657 		}
2658 #endif
2659 		socantrcvmore(stcb->sctp_socket);
2660 		sorwakeup(stcb->sctp_socket);
2661 		sowwakeup(stcb->sctp_socket);
2662 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2663 		if (!so_locked) {
2664 			SCTP_SOCKET_UNLOCK(so, 1);
2665 		}
2666 #endif
2667 	}
2668 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2669 		/* event not enabled */
2670 		return;
2671 	}
2672 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2673 	if (m_notify == NULL)
2674 		/* no space left */
2675 		return;
2676 	SCTP_BUF_LEN(m_notify) = 0;
2677 
2678 	sac = mtod(m_notify, struct sctp_assoc_change *);
2679 	sac->sac_type = SCTP_ASSOC_CHANGE;
2680 	sac->sac_flags = 0;
2681 	sac->sac_length = sizeof(struct sctp_assoc_change);
2682 	sac->sac_state = event;
2683 	sac->sac_error = error;
2684 	/* XXX verify these stream counts */
2685 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2686 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2687 	sac->sac_assoc_id = sctp_get_associd(stcb);
2688 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2689 	SCTP_BUF_NEXT(m_notify) = NULL;
2690 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2691 	    0, 0, 0, 0, 0, 0,
2692 	    m_notify);
2693 	if (control == NULL) {
2694 		/* no memory */
2695 		sctp_m_freem(m_notify);
2696 		return;
2697 	}
2698 	control->length = SCTP_BUF_LEN(m_notify);
2699 	/* not that we need this */
2700 	control->tail_mbuf = m_notify;
2701 	control->spec_flags = M_NOTIFICATION;
2702 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2703 	    control,
2704 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2705 	    so_locked);
2706 	if (event == SCTP_COMM_LOST) {
2707 		/* Wake up any sleeper */
2708 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2709 		so = SCTP_INP_SO(stcb->sctp_ep);
2710 		if (!so_locked) {
2711 			atomic_add_int(&stcb->asoc.refcnt, 1);
2712 			SCTP_TCB_UNLOCK(stcb);
2713 			SCTP_SOCKET_LOCK(so, 1);
2714 			SCTP_TCB_LOCK(stcb);
2715 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2716 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2717 				SCTP_SOCKET_UNLOCK(so, 1);
2718 				return;
2719 			}
2720 		}
2721 #endif
2722 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2723 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2724 		if (!so_locked) {
2725 			SCTP_SOCKET_UNLOCK(so, 1);
2726 		}
2727 #endif
2728 	}
2729 }
2730 
2731 static void
2732 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2733     struct sockaddr *sa, uint32_t error)
2734 {
2735 	struct mbuf *m_notify;
2736 	struct sctp_paddr_change *spc;
2737 	struct sctp_queued_to_read *control;
2738 
2739 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2740 		/* event not enabled */
2741 		return;
2742 	}
2743 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2744 	if (m_notify == NULL)
2745 		return;
2746 	SCTP_BUF_LEN(m_notify) = 0;
2747 	spc = mtod(m_notify, struct sctp_paddr_change *);
2748 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2749 	spc->spc_flags = 0;
2750 	spc->spc_length = sizeof(struct sctp_paddr_change);
2751 	switch (sa->sa_family) {
2752 #ifdef INET
2753 	case AF_INET:
2754 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2755 		break;
2756 #endif
2757 #ifdef INET6
2758 	case AF_INET6:
2759 		{
2760 			struct sockaddr_in6 *sin6;
2761 
2762 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2763 
2764 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2765 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2766 				if (sin6->sin6_scope_id == 0) {
2767 					/* recover scope_id for user */
2768 					(void)sa6_recoverscope(sin6);
2769 				} else {
2770 					/* clear embedded scope_id for user */
2771 					in6_clearscope(&sin6->sin6_addr);
2772 				}
2773 			}
2774 			break;
2775 		}
2776 #endif
2777 	default:
2778 		/* TSNH */
2779 		break;
2780 	}
2781 	spc->spc_state = state;
2782 	spc->spc_error = error;
2783 	spc->spc_assoc_id = sctp_get_associd(stcb);
2784 
2785 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2786 	SCTP_BUF_NEXT(m_notify) = NULL;
2787 
2788 	/* append to socket */
2789 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2790 	    0, 0, 0, 0, 0, 0,
2791 	    m_notify);
2792 	if (control == NULL) {
2793 		/* no memory */
2794 		sctp_m_freem(m_notify);
2795 		return;
2796 	}
2797 	control->length = SCTP_BUF_LEN(m_notify);
2798 	control->spec_flags = M_NOTIFICATION;
2799 	/* not that we need this */
2800 	control->tail_mbuf = m_notify;
2801 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2802 	    control,
2803 	    &stcb->sctp_socket->so_rcv, 1,
2804 	    SCTP_READ_LOCK_NOT_HELD,
2805 	    SCTP_SO_NOT_LOCKED);
2806 }
2807 
2808 
2809 static void
2810 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2811     struct sctp_tmit_chunk *chk, int so_locked
2812 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2813     SCTP_UNUSED
2814 #endif
2815 )
2816 {
2817 	struct mbuf *m_notify;
2818 	struct sctp_send_failed *ssf;
2819 	struct sctp_queued_to_read *control;
2820 	int length;
2821 
2822 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2823 		/* event not enabled */
2824 		return;
2825 	}
2826 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2827 	if (m_notify == NULL)
2828 		/* no space left */
2829 		return;
2830 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2831 	length -= sizeof(struct sctp_data_chunk);
2832 	SCTP_BUF_LEN(m_notify) = 0;
2833 	ssf = mtod(m_notify, struct sctp_send_failed *);
2834 	ssf->ssf_type = SCTP_SEND_FAILED;
2835 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2836 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2837 	else
2838 		ssf->ssf_flags = SCTP_DATA_SENT;
2839 	ssf->ssf_length = length;
2840 	ssf->ssf_error = error;
2841 	/* not exactly what the user sent in, but should be close :) */
2842 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2843 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2844 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2845 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2846 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2847 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2848 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2849 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2850 
2851 	if (chk->data) {
2852 		/*
2853 		 * trim off the sctp chunk header(it should be there)
2854 		 */
2855 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2856 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2857 			sctp_mbuf_crush(chk->data);
2858 			chk->send_size -= sizeof(struct sctp_data_chunk);
2859 		}
2860 	}
2861 	SCTP_BUF_NEXT(m_notify) = chk->data;
2862 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2863 	/* Steal off the mbuf */
2864 	chk->data = NULL;
2865 	/*
2866 	 * For this case, we check the actual socket buffer, since the assoc
2867 	 * is going away we don't want to overfill the socket buffer for a
2868 	 * non-reader
2869 	 */
2870 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2871 		sctp_m_freem(m_notify);
2872 		return;
2873 	}
2874 	/* append to socket */
2875 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2876 	    0, 0, 0, 0, 0, 0,
2877 	    m_notify);
2878 	if (control == NULL) {
2879 		/* no memory */
2880 		sctp_m_freem(m_notify);
2881 		return;
2882 	}
2883 	control->spec_flags = M_NOTIFICATION;
2884 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2885 	    control,
2886 	    &stcb->sctp_socket->so_rcv, 1,
2887 	    SCTP_READ_LOCK_NOT_HELD,
2888 	    so_locked);
2889 }
2890 
2891 
2892 static void
2893 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2894     struct sctp_stream_queue_pending *sp, int so_locked
2895 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2896     SCTP_UNUSED
2897 #endif
2898 )
2899 {
2900 	struct mbuf *m_notify;
2901 	struct sctp_send_failed *ssf;
2902 	struct sctp_queued_to_read *control;
2903 	int length;
2904 
2905 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2906 		/* event not enabled */
2907 		return;
2908 	}
2909 	length = sizeof(struct sctp_send_failed) + sp->length;
2910 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2911 	if (m_notify == NULL)
2912 		/* no space left */
2913 		return;
2914 	SCTP_BUF_LEN(m_notify) = 0;
2915 	ssf = mtod(m_notify, struct sctp_send_failed *);
2916 	ssf->ssf_type = SCTP_SEND_FAILED;
2917 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2918 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2919 	else
2920 		ssf->ssf_flags = SCTP_DATA_SENT;
2921 	ssf->ssf_length = length;
2922 	ssf->ssf_error = error;
2923 	/* not exactly what the user sent in, but should be close :) */
2924 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2925 	ssf->ssf_info.sinfo_stream = sp->stream;
2926 	ssf->ssf_info.sinfo_ssn = sp->strseq;
2927 	if (sp->some_taken) {
2928 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2929 	} else {
2930 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2931 	}
2932 	ssf->ssf_info.sinfo_ppid = sp->ppid;
2933 	ssf->ssf_info.sinfo_context = sp->context;
2934 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2935 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2936 	SCTP_BUF_NEXT(m_notify) = sp->data;
2937 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2938 
2939 	/* Steal off the mbuf */
2940 	sp->data = NULL;
2941 	/*
2942 	 * For this case, we check the actual socket buffer, since the assoc
2943 	 * is going away we don't want to overfill the socket buffer for a
2944 	 * non-reader
2945 	 */
2946 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2947 		sctp_m_freem(m_notify);
2948 		return;
2949 	}
2950 	/* append to socket */
2951 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2952 	    0, 0, 0, 0, 0, 0,
2953 	    m_notify);
2954 	if (control == NULL) {
2955 		/* no memory */
2956 		sctp_m_freem(m_notify);
2957 		return;
2958 	}
2959 	control->spec_flags = M_NOTIFICATION;
2960 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2961 	    control,
2962 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
2963 }
2964 
2965 
2966 
2967 static void
2968 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
2969     uint32_t error)
2970 {
2971 	struct mbuf *m_notify;
2972 	struct sctp_adaptation_event *sai;
2973 	struct sctp_queued_to_read *control;
2974 
2975 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
2976 		/* event not enabled */
2977 		return;
2978 	}
2979 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
2980 	if (m_notify == NULL)
2981 		/* no space left */
2982 		return;
2983 	SCTP_BUF_LEN(m_notify) = 0;
2984 	sai = mtod(m_notify, struct sctp_adaptation_event *);
2985 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
2986 	sai->sai_flags = 0;
2987 	sai->sai_length = sizeof(struct sctp_adaptation_event);
2988 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
2989 	sai->sai_assoc_id = sctp_get_associd(stcb);
2990 
2991 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
2992 	SCTP_BUF_NEXT(m_notify) = NULL;
2993 
2994 	/* append to socket */
2995 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2996 	    0, 0, 0, 0, 0, 0,
2997 	    m_notify);
2998 	if (control == NULL) {
2999 		/* no memory */
3000 		sctp_m_freem(m_notify);
3001 		return;
3002 	}
3003 	control->length = SCTP_BUF_LEN(m_notify);
3004 	control->spec_flags = M_NOTIFICATION;
3005 	/* not that we need this */
3006 	control->tail_mbuf = m_notify;
3007 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3008 	    control,
3009 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3010 }
3011 
3012 /* This always must be called with the read-queue LOCKED in the INP */
3013 static void
3014 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3015     uint32_t val, int so_locked
3016 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3017     SCTP_UNUSED
3018 #endif
3019 )
3020 {
3021 	struct mbuf *m_notify;
3022 	struct sctp_pdapi_event *pdapi;
3023 	struct sctp_queued_to_read *control;
3024 	struct sockbuf *sb;
3025 
3026 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3027 		/* event not enabled */
3028 		return;
3029 	}
3030 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3031 		return;
3032 	}
3033 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3034 	if (m_notify == NULL)
3035 		/* no space left */
3036 		return;
3037 	SCTP_BUF_LEN(m_notify) = 0;
3038 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3039 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3040 	pdapi->pdapi_flags = 0;
3041 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3042 	pdapi->pdapi_indication = error;
3043 	pdapi->pdapi_stream = (val >> 16);
3044 	pdapi->pdapi_seq = (val & 0x0000ffff);
3045 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3046 
3047 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3048 	SCTP_BUF_NEXT(m_notify) = NULL;
3049 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3050 	    0, 0, 0, 0, 0, 0,
3051 	    m_notify);
3052 	if (control == NULL) {
3053 		/* no memory */
3054 		sctp_m_freem(m_notify);
3055 		return;
3056 	}
3057 	control->spec_flags = M_NOTIFICATION;
3058 	control->length = SCTP_BUF_LEN(m_notify);
3059 	/* not that we need this */
3060 	control->tail_mbuf = m_notify;
3061 	control->held_length = 0;
3062 	control->length = 0;
3063 	sb = &stcb->sctp_socket->so_rcv;
3064 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3065 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3066 	}
3067 	sctp_sballoc(stcb, sb, m_notify);
3068 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3069 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3070 	}
3071 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3072 	control->end_added = 1;
3073 	if (stcb->asoc.control_pdapi)
3074 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3075 	else {
3076 		/* we really should not see this case */
3077 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3078 	}
3079 	if (stcb->sctp_ep && stcb->sctp_socket) {
3080 		/* This should always be the case */
3081 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3082 		struct socket *so;
3083 
3084 		so = SCTP_INP_SO(stcb->sctp_ep);
3085 		if (!so_locked) {
3086 			atomic_add_int(&stcb->asoc.refcnt, 1);
3087 			SCTP_TCB_UNLOCK(stcb);
3088 			SCTP_SOCKET_LOCK(so, 1);
3089 			SCTP_TCB_LOCK(stcb);
3090 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3091 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3092 				SCTP_SOCKET_UNLOCK(so, 1);
3093 				return;
3094 			}
3095 		}
3096 #endif
3097 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3098 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3099 		if (!so_locked) {
3100 			SCTP_SOCKET_UNLOCK(so, 1);
3101 		}
3102 #endif
3103 	}
3104 }
3105 
3106 static void
3107 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3108 {
3109 	struct mbuf *m_notify;
3110 	struct sctp_shutdown_event *sse;
3111 	struct sctp_queued_to_read *control;
3112 
3113 	/*
3114 	 * For TCP model AND UDP connected sockets we will send an error up
3115 	 * when an SHUTDOWN completes
3116 	 */
3117 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3118 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3119 		/* mark socket closed for read/write and wakeup! */
3120 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3121 		struct socket *so;
3122 
3123 		so = SCTP_INP_SO(stcb->sctp_ep);
3124 		atomic_add_int(&stcb->asoc.refcnt, 1);
3125 		SCTP_TCB_UNLOCK(stcb);
3126 		SCTP_SOCKET_LOCK(so, 1);
3127 		SCTP_TCB_LOCK(stcb);
3128 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3129 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3130 			SCTP_SOCKET_UNLOCK(so, 1);
3131 			return;
3132 		}
3133 #endif
3134 		socantsendmore(stcb->sctp_socket);
3135 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3136 		SCTP_SOCKET_UNLOCK(so, 1);
3137 #endif
3138 	}
3139 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3140 		/* event not enabled */
3141 		return;
3142 	}
3143 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3144 	if (m_notify == NULL)
3145 		/* no space left */
3146 		return;
3147 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3148 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3149 	sse->sse_flags = 0;
3150 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3151 	sse->sse_assoc_id = sctp_get_associd(stcb);
3152 
3153 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3154 	SCTP_BUF_NEXT(m_notify) = NULL;
3155 
3156 	/* append to socket */
3157 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3158 	    0, 0, 0, 0, 0, 0,
3159 	    m_notify);
3160 	if (control == NULL) {
3161 		/* no memory */
3162 		sctp_m_freem(m_notify);
3163 		return;
3164 	}
3165 	control->spec_flags = M_NOTIFICATION;
3166 	control->length = SCTP_BUF_LEN(m_notify);
3167 	/* not that we need this */
3168 	control->tail_mbuf = m_notify;
3169 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3170 	    control,
3171 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3172 }
3173 
3174 static void
3175 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3176     int so_locked
3177 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3178     SCTP_UNUSED
3179 #endif
3180 )
3181 {
3182 	struct mbuf *m_notify;
3183 	struct sctp_sender_dry_event *event;
3184 	struct sctp_queued_to_read *control;
3185 
3186 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3187 		/* event not enabled */
3188 		return;
3189 	}
3190 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3191 	if (m_notify == NULL) {
3192 		/* no space left */
3193 		return;
3194 	}
3195 	SCTP_BUF_LEN(m_notify) = 0;
3196 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3197 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3198 	event->sender_dry_flags = 0;
3199 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3200 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3201 
3202 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3203 	SCTP_BUF_NEXT(m_notify) = NULL;
3204 
3205 	/* append to socket */
3206 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3207 	    0, 0, 0, 0, 0, 0, m_notify);
3208 	if (control == NULL) {
3209 		/* no memory */
3210 		sctp_m_freem(m_notify);
3211 		return;
3212 	}
3213 	control->length = SCTP_BUF_LEN(m_notify);
3214 	control->spec_flags = M_NOTIFICATION;
3215 	/* not that we need this */
3216 	control->tail_mbuf = m_notify;
3217 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3218 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3219 }
3220 
3221 
3222 static void
3223 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3224 {
3225 	struct mbuf *m_notify;
3226 	struct sctp_queued_to_read *control;
3227 	struct sctp_stream_reset_event *strreset;
3228 	int len;
3229 
3230 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3231 		/* event not enabled */
3232 		return;
3233 	}
3234 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3235 	if (m_notify == NULL)
3236 		/* no space left */
3237 		return;
3238 	SCTP_BUF_LEN(m_notify) = 0;
3239 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3240 	if (len > M_TRAILINGSPACE(m_notify)) {
3241 		/* never enough room */
3242 		sctp_m_freem(m_notify);
3243 		return;
3244 	}
3245 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3246 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3247 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3248 	strreset->strreset_length = len;
3249 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3250 	strreset->strreset_list[0] = number_entries;
3251 
3252 	SCTP_BUF_LEN(m_notify) = len;
3253 	SCTP_BUF_NEXT(m_notify) = NULL;
3254 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3255 		/* no space */
3256 		sctp_m_freem(m_notify);
3257 		return;
3258 	}
3259 	/* append to socket */
3260 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3261 	    0, 0, 0, 0, 0, 0,
3262 	    m_notify);
3263 	if (control == NULL) {
3264 		/* no memory */
3265 		sctp_m_freem(m_notify);
3266 		return;
3267 	}
3268 	control->spec_flags = M_NOTIFICATION;
3269 	control->length = SCTP_BUF_LEN(m_notify);
3270 	/* not that we need this */
3271 	control->tail_mbuf = m_notify;
3272 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3273 	    control,
3274 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3275 }
3276 
3277 
3278 static void
3279 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3280     int number_entries, uint16_t * list, int flag)
3281 {
3282 	struct mbuf *m_notify;
3283 	struct sctp_queued_to_read *control;
3284 	struct sctp_stream_reset_event *strreset;
3285 	int len;
3286 
3287 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3288 		/* event not enabled */
3289 		return;
3290 	}
3291 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3292 	if (m_notify == NULL)
3293 		/* no space left */
3294 		return;
3295 	SCTP_BUF_LEN(m_notify) = 0;
3296 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3297 	if (len > M_TRAILINGSPACE(m_notify)) {
3298 		/* never enough room */
3299 		sctp_m_freem(m_notify);
3300 		return;
3301 	}
3302 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3303 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3304 	if (number_entries == 0) {
3305 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3306 	} else {
3307 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3308 	}
3309 	strreset->strreset_length = len;
3310 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3311 	if (number_entries) {
3312 		int i;
3313 
3314 		for (i = 0; i < number_entries; i++) {
3315 			strreset->strreset_list[i] = ntohs(list[i]);
3316 		}
3317 	}
3318 	SCTP_BUF_LEN(m_notify) = len;
3319 	SCTP_BUF_NEXT(m_notify) = NULL;
3320 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3321 		/* no space */
3322 		sctp_m_freem(m_notify);
3323 		return;
3324 	}
3325 	/* append to socket */
3326 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3327 	    0, 0, 0, 0, 0, 0,
3328 	    m_notify);
3329 	if (control == NULL) {
3330 		/* no memory */
3331 		sctp_m_freem(m_notify);
3332 		return;
3333 	}
3334 	control->spec_flags = M_NOTIFICATION;
3335 	control->length = SCTP_BUF_LEN(m_notify);
3336 	/* not that we need this */
3337 	control->tail_mbuf = m_notify;
3338 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3339 	    control,
3340 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3341 }
3342 
3343 
3344 void
3345 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3346     uint32_t error, void *data, int so_locked
3347 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3348     SCTP_UNUSED
3349 #endif
3350 )
3351 {
3352 	if ((stcb == NULL) ||
3353 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3354 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3355 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3356 		/* If the socket is gone we are out of here */
3357 		return;
3358 	}
3359 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3360 		return;
3361 	}
3362 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3363 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3364 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3365 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3366 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3367 			/* Don't report these in front states */
3368 			return;
3369 		}
3370 	}
3371 	switch (notification) {
3372 	case SCTP_NOTIFY_ASSOC_UP:
3373 		if (stcb->asoc.assoc_up_sent == 0) {
3374 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3375 			stcb->asoc.assoc_up_sent = 1;
3376 		}
3377 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3378 			sctp_notify_adaptation_layer(stcb, error);
3379 		}
3380 		if (stcb->asoc.peer_supports_auth == 0) {
3381 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3382 			    NULL, so_locked);
3383 		}
3384 		break;
3385 	case SCTP_NOTIFY_ASSOC_DOWN:
3386 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3387 		break;
3388 	case SCTP_NOTIFY_INTERFACE_DOWN:
3389 		{
3390 			struct sctp_nets *net;
3391 
3392 			net = (struct sctp_nets *)data;
3393 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3394 			    (struct sockaddr *)&net->ro._l_addr, error);
3395 			break;
3396 		}
3397 	case SCTP_NOTIFY_INTERFACE_UP:
3398 		{
3399 			struct sctp_nets *net;
3400 
3401 			net = (struct sctp_nets *)data;
3402 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3403 			    (struct sockaddr *)&net->ro._l_addr, error);
3404 			break;
3405 		}
3406 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3407 		{
3408 			struct sctp_nets *net;
3409 
3410 			net = (struct sctp_nets *)data;
3411 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3412 			    (struct sockaddr *)&net->ro._l_addr, error);
3413 			break;
3414 		}
3415 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3416 		sctp_notify_send_failed2(stcb, error,
3417 		    (struct sctp_stream_queue_pending *)data, so_locked);
3418 		break;
3419 	case SCTP_NOTIFY_DG_FAIL:
3420 		sctp_notify_send_failed(stcb, error,
3421 		    (struct sctp_tmit_chunk *)data, so_locked);
3422 		break;
3423 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3424 		{
3425 			uint32_t val;
3426 
3427 			val = *((uint32_t *) data);
3428 
3429 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3430 			break;
3431 		}
3432 	case SCTP_NOTIFY_STRDATA_ERR:
3433 		break;
3434 	case SCTP_NOTIFY_ASSOC_ABORTED:
3435 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3436 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3437 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3438 		} else {
3439 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3440 		}
3441 		break;
3442 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3443 		break;
3444 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3445 		break;
3446 	case SCTP_NOTIFY_ASSOC_RESTART:
3447 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3448 		if (stcb->asoc.peer_supports_auth == 0) {
3449 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3450 			    NULL, so_locked);
3451 		}
3452 		break;
3453 	case SCTP_NOTIFY_HB_RESP:
3454 		break;
3455 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3456 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3457 		break;
3458 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3459 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3460 		break;
3461 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3462 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3463 		break;
3464 
3465 	case SCTP_NOTIFY_STR_RESET_SEND:
3466 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3467 		break;
3468 	case SCTP_NOTIFY_STR_RESET_RECV:
3469 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3470 		break;
3471 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3472 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3473 		break;
3474 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3475 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3476 		break;
3477 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3478 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3479 		    error);
3480 		break;
3481 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3482 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3483 		    error);
3484 		break;
3485 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3486 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3487 		    error);
3488 		break;
3489 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3490 		break;
3491 	case SCTP_NOTIFY_ASCONF_FAILED:
3492 		break;
3493 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3494 		sctp_notify_shutdown_event(stcb);
3495 		break;
3496 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3497 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3498 		    (uint16_t) (uintptr_t) data,
3499 		    so_locked);
3500 		break;
3501 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3502 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3503 		    (uint16_t) (uintptr_t) data,
3504 		    so_locked);
3505 		break;
3506 	case SCTP_NOTIFY_NO_PEER_AUTH:
3507 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3508 		    (uint16_t) (uintptr_t) data,
3509 		    so_locked);
3510 		break;
3511 	case SCTP_NOTIFY_SENDER_DRY:
3512 		sctp_notify_sender_dry_event(stcb, so_locked);
3513 		break;
3514 	default:
3515 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3516 		    __FUNCTION__, notification, notification);
3517 		break;
3518 	}			/* end switch */
3519 }
3520 
3521 void
3522 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3523 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3524     SCTP_UNUSED
3525 #endif
3526 )
3527 {
3528 	struct sctp_association *asoc;
3529 	struct sctp_stream_out *outs;
3530 	struct sctp_tmit_chunk *chk, *nchk;
3531 	struct sctp_stream_queue_pending *sp, *nsp;
3532 	int i;
3533 
3534 	if (stcb == NULL) {
3535 		return;
3536 	}
3537 	asoc = &stcb->asoc;
3538 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3539 		/* already being freed */
3540 		return;
3541 	}
3542 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3543 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3544 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3545 		return;
3546 	}
3547 	/* now through all the gunk freeing chunks */
3548 	if (holds_lock == 0) {
3549 		SCTP_TCB_SEND_LOCK(stcb);
3550 	}
3551 	/* sent queue SHOULD be empty */
3552 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3553 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3554 		asoc->sent_queue_cnt--;
3555 		if (chk->data != NULL) {
3556 			sctp_free_bufspace(stcb, asoc, chk, 1);
3557 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3558 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3559 			if (chk->data) {
3560 				sctp_m_freem(chk->data);
3561 				chk->data = NULL;
3562 			}
3563 		}
3564 		sctp_free_a_chunk(stcb, chk, so_locked);
3565 		/* sa_ignore FREED_MEMORY */
3566 	}
3567 	/* pending send queue SHOULD be empty */
3568 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3569 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3570 		asoc->send_queue_cnt--;
3571 		if (chk->data != NULL) {
3572 			sctp_free_bufspace(stcb, asoc, chk, 1);
3573 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3574 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3575 			if (chk->data) {
3576 				sctp_m_freem(chk->data);
3577 				chk->data = NULL;
3578 			}
3579 		}
3580 		sctp_free_a_chunk(stcb, chk, so_locked);
3581 		/* sa_ignore FREED_MEMORY */
3582 	}
3583 	for (i = 0; i < asoc->streamoutcnt; i++) {
3584 		/* For each stream */
3585 		outs = &asoc->strmout[i];
3586 		/* clean up any sends there */
3587 		asoc->locked_on_sending = NULL;
3588 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3589 			asoc->stream_queue_cnt--;
3590 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3591 			sctp_free_spbufspace(stcb, asoc, sp);
3592 			if (sp->data) {
3593 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3594 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3595 				if (sp->data) {
3596 					sctp_m_freem(sp->data);
3597 					sp->data = NULL;
3598 				}
3599 			}
3600 			if (sp->net) {
3601 				sctp_free_remote_addr(sp->net);
3602 				sp->net = NULL;
3603 			}
3604 			/* Free the chunk */
3605 			sctp_free_a_strmoq(stcb, sp, so_locked);
3606 			/* sa_ignore FREED_MEMORY */
3607 		}
3608 	}
3609 
3610 	if (holds_lock == 0) {
3611 		SCTP_TCB_SEND_UNLOCK(stcb);
3612 	}
3613 }
3614 
3615 void
3616 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3617 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3618     SCTP_UNUSED
3619 #endif
3620 )
3621 {
3622 	if (stcb == NULL) {
3623 		return;
3624 	}
3625 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3626 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3627 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3628 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3629 	}
3630 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3631 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3632 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3633 		return;
3634 	}
3635 	/* Tell them we lost the asoc */
3636 	sctp_report_all_outbound(stcb, 1, so_locked);
3637 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3638 }
3639 
3640 void
3641 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3642     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3643     uint32_t vrf_id, uint16_t port)
3644 {
3645 	uint32_t vtag;
3646 
3647 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3648 	struct socket *so;
3649 
3650 #endif
3651 
3652 	vtag = 0;
3653 	if (stcb != NULL) {
3654 		/* We have a TCB to abort, send notification too */
3655 		vtag = stcb->asoc.peer_vtag;
3656 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3657 		/* get the assoc vrf id and table id */
3658 		vrf_id = stcb->asoc.vrf_id;
3659 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3660 	}
3661 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3662 	if (stcb != NULL) {
3663 		/* Ok, now lets free it */
3664 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3665 		so = SCTP_INP_SO(inp);
3666 		atomic_add_int(&stcb->asoc.refcnt, 1);
3667 		SCTP_TCB_UNLOCK(stcb);
3668 		SCTP_SOCKET_LOCK(so, 1);
3669 		SCTP_TCB_LOCK(stcb);
3670 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3671 #endif
3672 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3673 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3674 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3675 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3676 		}
3677 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3678 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3679 		SCTP_SOCKET_UNLOCK(so, 1);
3680 #endif
3681 	}
3682 }
3683 
3684 #ifdef SCTP_ASOCLOG_OF_TSNS
3685 void
3686 sctp_print_out_track_log(struct sctp_tcb *stcb)
3687 {
3688 #ifdef NOSIY_PRINTS
3689 	int i;
3690 
3691 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3692 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3693 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3694 		SCTP_PRINTF("None rcvd\n");
3695 		goto none_in;
3696 	}
3697 	if (stcb->asoc.tsn_in_wrapped) {
3698 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3699 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3700 			    stcb->asoc.in_tsnlog[i].tsn,
3701 			    stcb->asoc.in_tsnlog[i].strm,
3702 			    stcb->asoc.in_tsnlog[i].seq,
3703 			    stcb->asoc.in_tsnlog[i].flgs,
3704 			    stcb->asoc.in_tsnlog[i].sz);
3705 		}
3706 	}
3707 	if (stcb->asoc.tsn_in_at) {
3708 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3709 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3710 			    stcb->asoc.in_tsnlog[i].tsn,
3711 			    stcb->asoc.in_tsnlog[i].strm,
3712 			    stcb->asoc.in_tsnlog[i].seq,
3713 			    stcb->asoc.in_tsnlog[i].flgs,
3714 			    stcb->asoc.in_tsnlog[i].sz);
3715 		}
3716 	}
3717 none_in:
3718 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3719 	if ((stcb->asoc.tsn_out_at == 0) &&
3720 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3721 		SCTP_PRINTF("None sent\n");
3722 	}
3723 	if (stcb->asoc.tsn_out_wrapped) {
3724 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3725 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3726 			    stcb->asoc.out_tsnlog[i].tsn,
3727 			    stcb->asoc.out_tsnlog[i].strm,
3728 			    stcb->asoc.out_tsnlog[i].seq,
3729 			    stcb->asoc.out_tsnlog[i].flgs,
3730 			    stcb->asoc.out_tsnlog[i].sz);
3731 		}
3732 	}
3733 	if (stcb->asoc.tsn_out_at) {
3734 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3735 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3736 			    stcb->asoc.out_tsnlog[i].tsn,
3737 			    stcb->asoc.out_tsnlog[i].strm,
3738 			    stcb->asoc.out_tsnlog[i].seq,
3739 			    stcb->asoc.out_tsnlog[i].flgs,
3740 			    stcb->asoc.out_tsnlog[i].sz);
3741 		}
3742 	}
3743 #endif
3744 }
3745 
3746 #endif
3747 
3748 void
3749 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3750     int error, struct mbuf *op_err,
3751     int so_locked
3752 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3753     SCTP_UNUSED
3754 #endif
3755 )
3756 {
3757 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3758 	struct socket *so;
3759 
3760 #endif
3761 
3762 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3763 	so = SCTP_INP_SO(inp);
3764 #endif
3765 	if (stcb == NULL) {
3766 		/* Got to have a TCB */
3767 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3768 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3769 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3770 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3771 			}
3772 		}
3773 		return;
3774 	} else {
3775 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3776 	}
3777 	/* notify the ulp */
3778 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3779 		sctp_abort_notification(stcb, error, so_locked);
3780 	/* notify the peer */
3781 #if defined(SCTP_PANIC_ON_ABORT)
3782 	panic("aborting an association");
3783 #endif
3784 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3785 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3786 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3787 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3788 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3789 	}
3790 	/* now free the asoc */
3791 #ifdef SCTP_ASOCLOG_OF_TSNS
3792 	sctp_print_out_track_log(stcb);
3793 #endif
3794 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3795 	if (!so_locked) {
3796 		atomic_add_int(&stcb->asoc.refcnt, 1);
3797 		SCTP_TCB_UNLOCK(stcb);
3798 		SCTP_SOCKET_LOCK(so, 1);
3799 		SCTP_TCB_LOCK(stcb);
3800 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3801 	}
3802 #endif
3803 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3804 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3805 	if (!so_locked) {
3806 		SCTP_SOCKET_UNLOCK(so, 1);
3807 	}
3808 #endif
3809 }
3810 
3811 void
3812 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3813     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3814 {
3815 	struct sctp_chunkhdr *ch, chunk_buf;
3816 	unsigned int chk_length;
3817 
3818 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3819 	/* Generate a TO address for future reference */
3820 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3821 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3822 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3823 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3824 		}
3825 	}
3826 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3827 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3828 	while (ch != NULL) {
3829 		chk_length = ntohs(ch->chunk_length);
3830 		if (chk_length < sizeof(*ch)) {
3831 			/* break to abort land */
3832 			break;
3833 		}
3834 		switch (ch->chunk_type) {
3835 		case SCTP_COOKIE_ECHO:
3836 			/* We hit here only if the assoc is being freed */
3837 			return;
3838 		case SCTP_PACKET_DROPPED:
3839 			/* we don't respond to pkt-dropped */
3840 			return;
3841 		case SCTP_ABORT_ASSOCIATION:
3842 			/* we don't respond with an ABORT to an ABORT */
3843 			return;
3844 		case SCTP_SHUTDOWN_COMPLETE:
3845 			/*
3846 			 * we ignore it since we are not waiting for it and
3847 			 * peer is gone
3848 			 */
3849 			return;
3850 		case SCTP_SHUTDOWN_ACK:
3851 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
3852 			return;
3853 		default:
3854 			break;
3855 		}
3856 		offset += SCTP_SIZE32(chk_length);
3857 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3858 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3859 	}
3860 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
3861 }
3862 
3863 /*
3864  * check the inbound datagram to make sure there is not an abort inside it,
3865  * if there is return 1, else return 0.
3866  */
3867 int
3868 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3869 {
3870 	struct sctp_chunkhdr *ch;
3871 	struct sctp_init_chunk *init_chk, chunk_buf;
3872 	int offset;
3873 	unsigned int chk_length;
3874 
3875 	offset = iphlen + sizeof(struct sctphdr);
3876 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3877 	    (uint8_t *) & chunk_buf);
3878 	while (ch != NULL) {
3879 		chk_length = ntohs(ch->chunk_length);
3880 		if (chk_length < sizeof(*ch)) {
3881 			/* packet is probably corrupt */
3882 			break;
3883 		}
3884 		/* we seem to be ok, is it an abort? */
3885 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3886 			/* yep, tell them */
3887 			return (1);
3888 		}
3889 		if (ch->chunk_type == SCTP_INITIATION) {
3890 			/* need to update the Vtag */
3891 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
3892 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
3893 			if (init_chk != NULL) {
3894 				*vtagfill = ntohl(init_chk->init.initiate_tag);
3895 			}
3896 		}
3897 		/* Nope, move to the next chunk */
3898 		offset += SCTP_SIZE32(chk_length);
3899 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3900 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3901 	}
3902 	return (0);
3903 }
3904 
3905 /*
3906  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
3907  * set (i.e. it's 0) so, create this function to compare link local scopes
3908  */
3909 #ifdef INET6
3910 uint32_t
3911 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
3912 {
3913 	struct sockaddr_in6 a, b;
3914 
3915 	/* save copies */
3916 	a = *addr1;
3917 	b = *addr2;
3918 
3919 	if (a.sin6_scope_id == 0)
3920 		if (sa6_recoverscope(&a)) {
3921 			/* can't get scope, so can't match */
3922 			return (0);
3923 		}
3924 	if (b.sin6_scope_id == 0)
3925 		if (sa6_recoverscope(&b)) {
3926 			/* can't get scope, so can't match */
3927 			return (0);
3928 		}
3929 	if (a.sin6_scope_id != b.sin6_scope_id)
3930 		return (0);
3931 
3932 	return (1);
3933 }
3934 
3935 /*
3936  * returns a sockaddr_in6 with embedded scope recovered and removed
3937  */
3938 struct sockaddr_in6 *
3939 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
3940 {
3941 	/* check and strip embedded scope junk */
3942 	if (addr->sin6_family == AF_INET6) {
3943 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
3944 			if (addr->sin6_scope_id == 0) {
3945 				*store = *addr;
3946 				if (!sa6_recoverscope(store)) {
3947 					/* use the recovered scope */
3948 					addr = store;
3949 				}
3950 			} else {
3951 				/* else, return the original "to" addr */
3952 				in6_clearscope(&addr->sin6_addr);
3953 			}
3954 		}
3955 	}
3956 	return (addr);
3957 }
3958 
3959 #endif
3960 
3961 /*
3962  * are the two addresses the same?  currently a "scopeless" check returns: 1
3963  * if same, 0 if not
3964  */
3965 int
3966 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
3967 {
3968 
3969 	/* must be valid */
3970 	if (sa1 == NULL || sa2 == NULL)
3971 		return (0);
3972 
3973 	/* must be the same family */
3974 	if (sa1->sa_family != sa2->sa_family)
3975 		return (0);
3976 
3977 	switch (sa1->sa_family) {
3978 #ifdef INET6
3979 	case AF_INET6:
3980 		{
3981 			/* IPv6 addresses */
3982 			struct sockaddr_in6 *sin6_1, *sin6_2;
3983 
3984 			sin6_1 = (struct sockaddr_in6 *)sa1;
3985 			sin6_2 = (struct sockaddr_in6 *)sa2;
3986 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
3987 			    sin6_2));
3988 		}
3989 #endif
3990 #ifdef INET
3991 	case AF_INET:
3992 		{
3993 			/* IPv4 addresses */
3994 			struct sockaddr_in *sin_1, *sin_2;
3995 
3996 			sin_1 = (struct sockaddr_in *)sa1;
3997 			sin_2 = (struct sockaddr_in *)sa2;
3998 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
3999 		}
4000 #endif
4001 	default:
4002 		/* we don't do these... */
4003 		return (0);
4004 	}
4005 }
4006 
4007 void
4008 sctp_print_address(struct sockaddr *sa)
4009 {
4010 #ifdef INET6
4011 	char ip6buf[INET6_ADDRSTRLEN];
4012 
4013 	ip6buf[0] = 0;
4014 #endif
4015 
4016 	switch (sa->sa_family) {
4017 #ifdef INET6
4018 	case AF_INET6:
4019 		{
4020 			struct sockaddr_in6 *sin6;
4021 
4022 			sin6 = (struct sockaddr_in6 *)sa;
4023 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4024 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4025 			    ntohs(sin6->sin6_port),
4026 			    sin6->sin6_scope_id);
4027 			break;
4028 		}
4029 #endif
4030 #ifdef INET
4031 	case AF_INET:
4032 		{
4033 			struct sockaddr_in *sin;
4034 			unsigned char *p;
4035 
4036 			sin = (struct sockaddr_in *)sa;
4037 			p = (unsigned char *)&sin->sin_addr;
4038 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4039 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4040 			break;
4041 		}
4042 #endif
4043 	default:
4044 		SCTP_PRINTF("?\n");
4045 		break;
4046 	}
4047 }
4048 
4049 void
4050 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4051 {
4052 	switch (iph->ip_v) {
4053 #ifdef INET
4054 	case IPVERSION:
4055 		{
4056 			struct sockaddr_in lsa, fsa;
4057 
4058 			bzero(&lsa, sizeof(lsa));
4059 			lsa.sin_len = sizeof(lsa);
4060 			lsa.sin_family = AF_INET;
4061 			lsa.sin_addr = iph->ip_src;
4062 			lsa.sin_port = sh->src_port;
4063 			bzero(&fsa, sizeof(fsa));
4064 			fsa.sin_len = sizeof(fsa);
4065 			fsa.sin_family = AF_INET;
4066 			fsa.sin_addr = iph->ip_dst;
4067 			fsa.sin_port = sh->dest_port;
4068 			SCTP_PRINTF("src: ");
4069 			sctp_print_address((struct sockaddr *)&lsa);
4070 			SCTP_PRINTF("dest: ");
4071 			sctp_print_address((struct sockaddr *)&fsa);
4072 			break;
4073 		}
4074 #endif
4075 #ifdef INET6
4076 	case IPV6_VERSION >> 4:
4077 		{
4078 			struct ip6_hdr *ip6;
4079 			struct sockaddr_in6 lsa6, fsa6;
4080 
4081 			ip6 = (struct ip6_hdr *)iph;
4082 			bzero(&lsa6, sizeof(lsa6));
4083 			lsa6.sin6_len = sizeof(lsa6);
4084 			lsa6.sin6_family = AF_INET6;
4085 			lsa6.sin6_addr = ip6->ip6_src;
4086 			lsa6.sin6_port = sh->src_port;
4087 			bzero(&fsa6, sizeof(fsa6));
4088 			fsa6.sin6_len = sizeof(fsa6);
4089 			fsa6.sin6_family = AF_INET6;
4090 			fsa6.sin6_addr = ip6->ip6_dst;
4091 			fsa6.sin6_port = sh->dest_port;
4092 			SCTP_PRINTF("src: ");
4093 			sctp_print_address((struct sockaddr *)&lsa6);
4094 			SCTP_PRINTF("dest: ");
4095 			sctp_print_address((struct sockaddr *)&fsa6);
4096 			break;
4097 		}
4098 #endif
4099 	default:
4100 		/* TSNH */
4101 		break;
4102 	}
4103 }
4104 
4105 void
4106 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4107     struct sctp_inpcb *new_inp,
4108     struct sctp_tcb *stcb,
4109     int waitflags)
4110 {
4111 	/*
4112 	 * go through our old INP and pull off any control structures that
4113 	 * belong to stcb and move then to the new inp.
4114 	 */
4115 	struct socket *old_so, *new_so;
4116 	struct sctp_queued_to_read *control, *nctl;
4117 	struct sctp_readhead tmp_queue;
4118 	struct mbuf *m;
4119 	int error = 0;
4120 
4121 	old_so = old_inp->sctp_socket;
4122 	new_so = new_inp->sctp_socket;
4123 	TAILQ_INIT(&tmp_queue);
4124 	error = sblock(&old_so->so_rcv, waitflags);
4125 	if (error) {
4126 		/*
4127 		 * Gak, can't get sblock, we have a problem. data will be
4128 		 * left stranded.. and we don't dare look at it since the
4129 		 * other thread may be reading something. Oh well, its a
4130 		 * screwed up app that does a peeloff OR a accept while
4131 		 * reading from the main socket... actually its only the
4132 		 * peeloff() case, since I think read will fail on a
4133 		 * listening socket..
4134 		 */
4135 		return;
4136 	}
4137 	/* lock the socket buffers */
4138 	SCTP_INP_READ_LOCK(old_inp);
4139 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4140 		/* Pull off all for out target stcb */
4141 		if (control->stcb == stcb) {
4142 			/* remove it we want it */
4143 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4144 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4145 			m = control->data;
4146 			while (m) {
4147 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4148 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4149 				}
4150 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4151 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4152 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4153 				}
4154 				m = SCTP_BUF_NEXT(m);
4155 			}
4156 		}
4157 	}
4158 	SCTP_INP_READ_UNLOCK(old_inp);
4159 	/* Remove the sb-lock on the old socket */
4160 
4161 	sbunlock(&old_so->so_rcv);
4162 	/* Now we move them over to the new socket buffer */
4163 	SCTP_INP_READ_LOCK(new_inp);
4164 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4165 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4166 		m = control->data;
4167 		while (m) {
4168 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4169 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4170 			}
4171 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4172 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4173 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4174 			}
4175 			m = SCTP_BUF_NEXT(m);
4176 		}
4177 	}
4178 	SCTP_INP_READ_UNLOCK(new_inp);
4179 }
4180 
4181 void
4182 sctp_add_to_readq(struct sctp_inpcb *inp,
4183     struct sctp_tcb *stcb,
4184     struct sctp_queued_to_read *control,
4185     struct sockbuf *sb,
4186     int end,
4187     int inp_read_lock_held,
4188     int so_locked
4189 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4190     SCTP_UNUSED
4191 #endif
4192 )
4193 {
4194 	/*
4195 	 * Here we must place the control on the end of the socket read
4196 	 * queue AND increment sb_cc so that select will work properly on
4197 	 * read.
4198 	 */
4199 	struct mbuf *m, *prev = NULL;
4200 
4201 	if (inp == NULL) {
4202 		/* Gak, TSNH!! */
4203 #ifdef INVARIANTS
4204 		panic("Gak, inp NULL on add_to_readq");
4205 #endif
4206 		return;
4207 	}
4208 	if (inp_read_lock_held == 0)
4209 		SCTP_INP_READ_LOCK(inp);
4210 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4211 		sctp_free_remote_addr(control->whoFrom);
4212 		if (control->data) {
4213 			sctp_m_freem(control->data);
4214 			control->data = NULL;
4215 		}
4216 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4217 		if (inp_read_lock_held == 0)
4218 			SCTP_INP_READ_UNLOCK(inp);
4219 		return;
4220 	}
4221 	if (!(control->spec_flags & M_NOTIFICATION)) {
4222 		atomic_add_int(&inp->total_recvs, 1);
4223 		if (!control->do_not_ref_stcb) {
4224 			atomic_add_int(&stcb->total_recvs, 1);
4225 		}
4226 	}
4227 	m = control->data;
4228 	control->held_length = 0;
4229 	control->length = 0;
4230 	while (m) {
4231 		if (SCTP_BUF_LEN(m) == 0) {
4232 			/* Skip mbufs with NO length */
4233 			if (prev == NULL) {
4234 				/* First one */
4235 				control->data = sctp_m_free(m);
4236 				m = control->data;
4237 			} else {
4238 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4239 				m = SCTP_BUF_NEXT(prev);
4240 			}
4241 			if (m == NULL) {
4242 				control->tail_mbuf = prev;
4243 			}
4244 			continue;
4245 		}
4246 		prev = m;
4247 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4248 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4249 		}
4250 		sctp_sballoc(stcb, sb, m);
4251 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4252 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4253 		}
4254 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4255 		m = SCTP_BUF_NEXT(m);
4256 	}
4257 	if (prev != NULL) {
4258 		control->tail_mbuf = prev;
4259 	} else {
4260 		/* Everything got collapsed out?? */
4261 		sctp_free_remote_addr(control->whoFrom);
4262 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4263 		if (inp_read_lock_held == 0)
4264 			SCTP_INP_READ_UNLOCK(inp);
4265 		return;
4266 	}
4267 	if (end) {
4268 		control->end_added = 1;
4269 	}
4270 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4271 	if (inp_read_lock_held == 0)
4272 		SCTP_INP_READ_UNLOCK(inp);
4273 	if (inp && inp->sctp_socket) {
4274 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4275 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4276 		} else {
4277 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4278 			struct socket *so;
4279 
4280 			so = SCTP_INP_SO(inp);
4281 			if (!so_locked) {
4282 				atomic_add_int(&stcb->asoc.refcnt, 1);
4283 				SCTP_TCB_UNLOCK(stcb);
4284 				SCTP_SOCKET_LOCK(so, 1);
4285 				SCTP_TCB_LOCK(stcb);
4286 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4287 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4288 					SCTP_SOCKET_UNLOCK(so, 1);
4289 					return;
4290 				}
4291 			}
4292 #endif
4293 			sctp_sorwakeup(inp, inp->sctp_socket);
4294 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4295 			if (!so_locked) {
4296 				SCTP_SOCKET_UNLOCK(so, 1);
4297 			}
4298 #endif
4299 		}
4300 	}
4301 }
4302 
4303 
4304 int
4305 sctp_append_to_readq(struct sctp_inpcb *inp,
4306     struct sctp_tcb *stcb,
4307     struct sctp_queued_to_read *control,
4308     struct mbuf *m,
4309     int end,
4310     int ctls_cumack,
4311     struct sockbuf *sb)
4312 {
4313 	/*
4314 	 * A partial delivery API event is underway. OR we are appending on
4315 	 * the reassembly queue.
4316 	 *
4317 	 * If PDAPI this means we need to add m to the end of the data.
4318 	 * Increase the length in the control AND increment the sb_cc.
4319 	 * Otherwise sb is NULL and all we need to do is put it at the end
4320 	 * of the mbuf chain.
4321 	 */
4322 	int len = 0;
4323 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4324 
4325 	if (inp) {
4326 		SCTP_INP_READ_LOCK(inp);
4327 	}
4328 	if (control == NULL) {
4329 get_out:
4330 		if (inp) {
4331 			SCTP_INP_READ_UNLOCK(inp);
4332 		}
4333 		return (-1);
4334 	}
4335 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4336 		SCTP_INP_READ_UNLOCK(inp);
4337 		return 0;
4338 	}
4339 	if (control->end_added) {
4340 		/* huh this one is complete? */
4341 		goto get_out;
4342 	}
4343 	mm = m;
4344 	if (mm == NULL) {
4345 		goto get_out;
4346 	}
4347 	while (mm) {
4348 		if (SCTP_BUF_LEN(mm) == 0) {
4349 			/* Skip mbufs with NO lenght */
4350 			if (prev == NULL) {
4351 				/* First one */
4352 				m = sctp_m_free(mm);
4353 				mm = m;
4354 			} else {
4355 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4356 				mm = SCTP_BUF_NEXT(prev);
4357 			}
4358 			continue;
4359 		}
4360 		prev = mm;
4361 		len += SCTP_BUF_LEN(mm);
4362 		if (sb) {
4363 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4364 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4365 			}
4366 			sctp_sballoc(stcb, sb, mm);
4367 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4368 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4369 			}
4370 		}
4371 		mm = SCTP_BUF_NEXT(mm);
4372 	}
4373 	if (prev) {
4374 		tail = prev;
4375 	} else {
4376 		/* Really there should always be a prev */
4377 		if (m == NULL) {
4378 			/* Huh nothing left? */
4379 #ifdef INVARIANTS
4380 			panic("Nothing left to add?");
4381 #else
4382 			goto get_out;
4383 #endif
4384 		}
4385 		tail = m;
4386 	}
4387 	if (control->tail_mbuf) {
4388 		/* append */
4389 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4390 		control->tail_mbuf = tail;
4391 	} else {
4392 		/* nothing there */
4393 #ifdef INVARIANTS
4394 		if (control->data != NULL) {
4395 			panic("This should NOT happen");
4396 		}
4397 #endif
4398 		control->data = m;
4399 		control->tail_mbuf = tail;
4400 	}
4401 	atomic_add_int(&control->length, len);
4402 	if (end) {
4403 		/* message is complete */
4404 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4405 			stcb->asoc.control_pdapi = NULL;
4406 		}
4407 		control->held_length = 0;
4408 		control->end_added = 1;
4409 	}
4410 	if (stcb == NULL) {
4411 		control->do_not_ref_stcb = 1;
4412 	}
4413 	/*
4414 	 * When we are appending in partial delivery, the cum-ack is used
4415 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4416 	 * is populated in the outbound sinfo structure from the true cumack
4417 	 * if the association exists...
4418 	 */
4419 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4420 	if (inp) {
4421 		SCTP_INP_READ_UNLOCK(inp);
4422 	}
4423 	if (inp && inp->sctp_socket) {
4424 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4425 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4426 		} else {
4427 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4428 			struct socket *so;
4429 
4430 			so = SCTP_INP_SO(inp);
4431 			atomic_add_int(&stcb->asoc.refcnt, 1);
4432 			SCTP_TCB_UNLOCK(stcb);
4433 			SCTP_SOCKET_LOCK(so, 1);
4434 			SCTP_TCB_LOCK(stcb);
4435 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4436 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4437 				SCTP_SOCKET_UNLOCK(so, 1);
4438 				return (0);
4439 			}
4440 #endif
4441 			sctp_sorwakeup(inp, inp->sctp_socket);
4442 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4443 			SCTP_SOCKET_UNLOCK(so, 1);
4444 #endif
4445 		}
4446 	}
4447 	return (0);
4448 }
4449 
4450 
4451 
4452 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4453  *************ALTERNATE ROUTING CODE
4454  */
4455 
4456 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4457  *************ALTERNATE ROUTING CODE
4458  */
4459 
4460 struct mbuf *
4461 sctp_generate_invmanparam(int err)
4462 {
4463 	/* Return a MBUF with a invalid mandatory parameter */
4464 	struct mbuf *m;
4465 
4466 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4467 	if (m) {
4468 		struct sctp_paramhdr *ph;
4469 
4470 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4471 		ph = mtod(m, struct sctp_paramhdr *);
4472 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4473 		ph->param_type = htons(err);
4474 	}
4475 	return (m);
4476 }
4477 
4478 #ifdef SCTP_MBCNT_LOGGING
4479 void
4480 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4481     struct sctp_tmit_chunk *tp1, int chk_cnt)
4482 {
4483 	if (tp1->data == NULL) {
4484 		return;
4485 	}
4486 	asoc->chunks_on_out_queue -= chk_cnt;
4487 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4488 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4489 		    asoc->total_output_queue_size,
4490 		    tp1->book_size,
4491 		    0,
4492 		    tp1->mbcnt);
4493 	}
4494 	if (asoc->total_output_queue_size >= tp1->book_size) {
4495 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4496 	} else {
4497 		asoc->total_output_queue_size = 0;
4498 	}
4499 
4500 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4501 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4502 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4503 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4504 		} else {
4505 			stcb->sctp_socket->so_snd.sb_cc = 0;
4506 
4507 		}
4508 	}
4509 }
4510 
4511 #endif
4512 
4513 int
4514 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4515     int reason, int so_locked
4516 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4517     SCTP_UNUSED
4518 #endif
4519 )
4520 {
4521 	struct sctp_stream_out *strq;
4522 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4523 	struct sctp_stream_queue_pending *sp;
4524 	uint16_t stream = 0, seq = 0;
4525 	uint8_t foundeom = 0;
4526 	int ret_sz = 0;
4527 	int notdone;
4528 	int do_wakeup_routine = 0;
4529 
4530 	stream = tp1->rec.data.stream_number;
4531 	seq = tp1->rec.data.stream_seq;
4532 	do {
4533 		ret_sz += tp1->book_size;
4534 		if (tp1->data != NULL) {
4535 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4536 				sctp_flight_size_decrease(tp1);
4537 				sctp_total_flight_decrease(stcb, tp1);
4538 			}
4539 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4540 			stcb->asoc.peers_rwnd += tp1->send_size;
4541 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4542 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4543 			if (tp1->data) {
4544 				sctp_m_freem(tp1->data);
4545 				tp1->data = NULL;
4546 			}
4547 			do_wakeup_routine = 1;
4548 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4549 				stcb->asoc.sent_queue_cnt_removeable--;
4550 			}
4551 		}
4552 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4553 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4554 		    SCTP_DATA_NOT_FRAG) {
4555 			/* not frag'ed we ae done   */
4556 			notdone = 0;
4557 			foundeom = 1;
4558 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4559 			/* end of frag, we are done */
4560 			notdone = 0;
4561 			foundeom = 1;
4562 		} else {
4563 			/*
4564 			 * Its a begin or middle piece, we must mark all of
4565 			 * it
4566 			 */
4567 			notdone = 1;
4568 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4569 		}
4570 	} while (tp1 && notdone);
4571 	if (foundeom == 0) {
4572 		/*
4573 		 * The multi-part message was scattered across the send and
4574 		 * sent queue.
4575 		 */
4576 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4577 			if ((tp1->rec.data.stream_number != stream) ||
4578 			    (tp1->rec.data.stream_seq != seq)) {
4579 				break;
4580 			}
4581 			/*
4582 			 * save to chk in case we have some on stream out
4583 			 * queue. If so and we have an un-transmitted one we
4584 			 * don't have to fudge the TSN.
4585 			 */
4586 			chk = tp1;
4587 			ret_sz += tp1->book_size;
4588 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4589 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4590 			if (tp1->data) {
4591 				sctp_m_freem(tp1->data);
4592 				tp1->data = NULL;
4593 			}
4594 			/* No flight involved here book the size to 0 */
4595 			tp1->book_size = 0;
4596 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4597 				foundeom = 1;
4598 			}
4599 			do_wakeup_routine = 1;
4600 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4601 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4602 			/*
4603 			 * on to the sent queue so we can wait for it to be
4604 			 * passed by.
4605 			 */
4606 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4607 			    sctp_next);
4608 			stcb->asoc.send_queue_cnt--;
4609 			stcb->asoc.sent_queue_cnt++;
4610 		}
4611 	}
4612 	if (foundeom == 0) {
4613 		/*
4614 		 * Still no eom found. That means there is stuff left on the
4615 		 * stream out queue.. yuck.
4616 		 */
4617 		strq = &stcb->asoc.strmout[stream];
4618 		SCTP_TCB_SEND_LOCK(stcb);
4619 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4620 			/* FIXME: Shouldn't this be a serial number check? */
4621 			if (sp->strseq > seq) {
4622 				break;
4623 			}
4624 			/* Check if its our SEQ */
4625 			if (sp->strseq == seq) {
4626 				sp->discard_rest = 1;
4627 				/*
4628 				 * We may need to put a chunk on the queue
4629 				 * that holds the TSN that would have been
4630 				 * sent with the LAST bit.
4631 				 */
4632 				if (chk == NULL) {
4633 					/* Yep, we have to */
4634 					sctp_alloc_a_chunk(stcb, chk);
4635 					if (chk == NULL) {
4636 						/*
4637 						 * we are hosed. All we can
4638 						 * do is nothing.. which
4639 						 * will cause an abort if
4640 						 * the peer is paying
4641 						 * attention.
4642 						 */
4643 						goto oh_well;
4644 					}
4645 					memset(chk, 0, sizeof(*chk));
4646 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4647 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4648 					chk->asoc = &stcb->asoc;
4649 					chk->rec.data.stream_seq = sp->strseq;
4650 					chk->rec.data.stream_number = sp->stream;
4651 					chk->rec.data.payloadtype = sp->ppid;
4652 					chk->rec.data.context = sp->context;
4653 					chk->flags = sp->act_flags;
4654 					if (sp->net)
4655 						chk->whoTo = sp->net;
4656 					else
4657 						chk->whoTo = stcb->asoc.primary_destination;
4658 					atomic_add_int(&chk->whoTo->ref_count, 1);
4659 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4660 					stcb->asoc.pr_sctp_cnt++;
4661 					chk->pr_sctp_on = 1;
4662 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4663 					stcb->asoc.sent_queue_cnt++;
4664 					stcb->asoc.pr_sctp_cnt++;
4665 				} else {
4666 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4667 				}
4668 		oh_well:
4669 				if (sp->data) {
4670 					/*
4671 					 * Pull any data to free up the SB
4672 					 * and allow sender to "add more"
4673 					 * whilc we will throw away :-)
4674 					 */
4675 					sctp_free_spbufspace(stcb, &stcb->asoc,
4676 					    sp);
4677 					ret_sz += sp->length;
4678 					do_wakeup_routine = 1;
4679 					sp->some_taken = 1;
4680 					sctp_m_freem(sp->data);
4681 					sp->length = 0;
4682 					sp->data = NULL;
4683 					sp->tail_mbuf = NULL;
4684 				}
4685 				break;
4686 			}
4687 		}		/* End tailq_foreach */
4688 		SCTP_TCB_SEND_UNLOCK(stcb);
4689 	}
4690 	if (do_wakeup_routine) {
4691 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4692 		struct socket *so;
4693 
4694 		so = SCTP_INP_SO(stcb->sctp_ep);
4695 		if (!so_locked) {
4696 			atomic_add_int(&stcb->asoc.refcnt, 1);
4697 			SCTP_TCB_UNLOCK(stcb);
4698 			SCTP_SOCKET_LOCK(so, 1);
4699 			SCTP_TCB_LOCK(stcb);
4700 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4701 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4702 				/* assoc was freed while we were unlocked */
4703 				SCTP_SOCKET_UNLOCK(so, 1);
4704 				return (ret_sz);
4705 			}
4706 		}
4707 #endif
4708 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4709 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4710 		if (!so_locked) {
4711 			SCTP_SOCKET_UNLOCK(so, 1);
4712 		}
4713 #endif
4714 	}
4715 	return (ret_sz);
4716 }
4717 
4718 /*
4719  * checks to see if the given address, sa, is one that is currently known by
4720  * the kernel note: can't distinguish the same address on multiple interfaces
4721  * and doesn't handle multiple addresses with different zone/scope id's note:
4722  * ifa_ifwithaddr() compares the entire sockaddr struct
4723  */
4724 struct sctp_ifa *
4725 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4726     int holds_lock)
4727 {
4728 	struct sctp_laddr *laddr;
4729 
4730 	if (holds_lock == 0) {
4731 		SCTP_INP_RLOCK(inp);
4732 	}
4733 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4734 		if (laddr->ifa == NULL)
4735 			continue;
4736 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4737 			continue;
4738 #ifdef INET
4739 		if (addr->sa_family == AF_INET) {
4740 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4741 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4742 				/* found him. */
4743 				if (holds_lock == 0) {
4744 					SCTP_INP_RUNLOCK(inp);
4745 				}
4746 				return (laddr->ifa);
4747 				break;
4748 			}
4749 		}
4750 #endif
4751 #ifdef INET6
4752 		if (addr->sa_family == AF_INET6) {
4753 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4754 			    &laddr->ifa->address.sin6)) {
4755 				/* found him. */
4756 				if (holds_lock == 0) {
4757 					SCTP_INP_RUNLOCK(inp);
4758 				}
4759 				return (laddr->ifa);
4760 				break;
4761 			}
4762 		}
4763 #endif
4764 	}
4765 	if (holds_lock == 0) {
4766 		SCTP_INP_RUNLOCK(inp);
4767 	}
4768 	return (NULL);
4769 }
4770 
4771 uint32_t
4772 sctp_get_ifa_hash_val(struct sockaddr *addr)
4773 {
4774 	switch (addr->sa_family) {
4775 #ifdef INET
4776 	case AF_INET:
4777 		{
4778 			struct sockaddr_in *sin;
4779 
4780 			sin = (struct sockaddr_in *)addr;
4781 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4782 		}
4783 #endif
4784 #ifdef INET6
4785 	case INET6:
4786 		{
4787 			struct sockaddr_in6 *sin6;
4788 			uint32_t hash_of_addr;
4789 
4790 			sin6 = (struct sockaddr_in6 *)addr;
4791 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4792 			    sin6->sin6_addr.s6_addr32[1] +
4793 			    sin6->sin6_addr.s6_addr32[2] +
4794 			    sin6->sin6_addr.s6_addr32[3]);
4795 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4796 			return (hash_of_addr);
4797 		}
4798 #endif
4799 	default:
4800 		break;
4801 	}
4802 	return (0);
4803 }
4804 
4805 struct sctp_ifa *
4806 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4807 {
4808 	struct sctp_ifa *sctp_ifap;
4809 	struct sctp_vrf *vrf;
4810 	struct sctp_ifalist *hash_head;
4811 	uint32_t hash_of_addr;
4812 
4813 	if (holds_lock == 0)
4814 		SCTP_IPI_ADDR_RLOCK();
4815 
4816 	vrf = sctp_find_vrf(vrf_id);
4817 	if (vrf == NULL) {
4818 stage_right:
4819 		if (holds_lock == 0)
4820 			SCTP_IPI_ADDR_RUNLOCK();
4821 		return (NULL);
4822 	}
4823 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4824 
4825 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4826 	if (hash_head == NULL) {
4827 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4828 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4829 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4830 		sctp_print_address(addr);
4831 		SCTP_PRINTF("No such bucket for address\n");
4832 		if (holds_lock == 0)
4833 			SCTP_IPI_ADDR_RUNLOCK();
4834 
4835 		return (NULL);
4836 	}
4837 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4838 		if (sctp_ifap == NULL) {
4839 #ifdef INVARIANTS
4840 			panic("Huh LIST_FOREACH corrupt");
4841 			goto stage_right;
4842 #else
4843 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4844 			goto stage_right;
4845 #endif
4846 		}
4847 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4848 			continue;
4849 #ifdef INET
4850 		if (addr->sa_family == AF_INET) {
4851 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4852 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4853 				/* found him. */
4854 				if (holds_lock == 0)
4855 					SCTP_IPI_ADDR_RUNLOCK();
4856 				return (sctp_ifap);
4857 				break;
4858 			}
4859 		}
4860 #endif
4861 #ifdef INET6
4862 		if (addr->sa_family == AF_INET6) {
4863 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4864 			    &sctp_ifap->address.sin6)) {
4865 				/* found him. */
4866 				if (holds_lock == 0)
4867 					SCTP_IPI_ADDR_RUNLOCK();
4868 				return (sctp_ifap);
4869 				break;
4870 			}
4871 		}
4872 #endif
4873 	}
4874 	if (holds_lock == 0)
4875 		SCTP_IPI_ADDR_RUNLOCK();
4876 	return (NULL);
4877 }
4878 
4879 static void
4880 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4881     uint32_t rwnd_req)
4882 {
4883 	/* User pulled some data, do we need a rwnd update? */
4884 	int r_unlocked = 0;
4885 	uint32_t dif, rwnd;
4886 	struct socket *so = NULL;
4887 
4888 	if (stcb == NULL)
4889 		return;
4890 
4891 	atomic_add_int(&stcb->asoc.refcnt, 1);
4892 
4893 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4894 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4895 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4896 		/* Pre-check If we are freeing no update */
4897 		goto no_lock;
4898 	}
4899 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4900 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4901 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4902 		goto out;
4903 	}
4904 	so = stcb->sctp_socket;
4905 	if (so == NULL) {
4906 		goto out;
4907 	}
4908 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4909 	/* Have you have freed enough to look */
4910 	*freed_so_far = 0;
4911 	/* Yep, its worth a look and the lock overhead */
4912 
4913 	/* Figure out what the rwnd would be */
4914 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4915 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4916 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4917 	} else {
4918 		dif = 0;
4919 	}
4920 	if (dif >= rwnd_req) {
4921 		if (hold_rlock) {
4922 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4923 			r_unlocked = 1;
4924 		}
4925 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4926 			/*
4927 			 * One last check before we allow the guy possibly
4928 			 * to get in. There is a race, where the guy has not
4929 			 * reached the gate. In that case
4930 			 */
4931 			goto out;
4932 		}
4933 		SCTP_TCB_LOCK(stcb);
4934 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4935 			/* No reports here */
4936 			SCTP_TCB_UNLOCK(stcb);
4937 			goto out;
4938 		}
4939 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4940 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
4941 
4942 		sctp_chunk_output(stcb->sctp_ep, stcb,
4943 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4944 		/* make sure no timer is running */
4945 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4946 		SCTP_TCB_UNLOCK(stcb);
4947 	} else {
4948 		/* Update how much we have pending */
4949 		stcb->freed_by_sorcv_sincelast = dif;
4950 	}
4951 out:
4952 	if (so && r_unlocked && hold_rlock) {
4953 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4954 	}
4955 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4956 no_lock:
4957 	atomic_add_int(&stcb->asoc.refcnt, -1);
4958 	return;
4959 }
4960 
4961 int
4962 sctp_sorecvmsg(struct socket *so,
4963     struct uio *uio,
4964     struct mbuf **mp,
4965     struct sockaddr *from,
4966     int fromlen,
4967     int *msg_flags,
4968     struct sctp_sndrcvinfo *sinfo,
4969     int filling_sinfo)
4970 {
4971 	/*
4972 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4973 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4974 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4975 	 * On the way out we may send out any combination of:
4976 	 * MSG_NOTIFICATION MSG_EOR
4977 	 *
4978 	 */
4979 	struct sctp_inpcb *inp = NULL;
4980 	int my_len = 0;
4981 	int cp_len = 0, error = 0;
4982 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4983 	struct mbuf *m = NULL;
4984 	struct sctp_tcb *stcb = NULL;
4985 	int wakeup_read_socket = 0;
4986 	int freecnt_applied = 0;
4987 	int out_flags = 0, in_flags = 0;
4988 	int block_allowed = 1;
4989 	uint32_t freed_so_far = 0;
4990 	uint32_t copied_so_far = 0;
4991 	int in_eeor_mode = 0;
4992 	int no_rcv_needed = 0;
4993 	uint32_t rwnd_req = 0;
4994 	int hold_sblock = 0;
4995 	int hold_rlock = 0;
4996 	int slen = 0;
4997 	uint32_t held_length = 0;
4998 	int sockbuf_lock = 0;
4999 
5000 	if (uio == NULL) {
5001 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5002 		return (EINVAL);
5003 	}
5004 	if (msg_flags) {
5005 		in_flags = *msg_flags;
5006 		if (in_flags & MSG_PEEK)
5007 			SCTP_STAT_INCR(sctps_read_peeks);
5008 	} else {
5009 		in_flags = 0;
5010 	}
5011 	slen = uio->uio_resid;
5012 
5013 	/* Pull in and set up our int flags */
5014 	if (in_flags & MSG_OOB) {
5015 		/* Out of band's NOT supported */
5016 		return (EOPNOTSUPP);
5017 	}
5018 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5019 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5020 		return (EINVAL);
5021 	}
5022 	if ((in_flags & (MSG_DONTWAIT
5023 	    | MSG_NBIO
5024 	    )) ||
5025 	    SCTP_SO_IS_NBIO(so)) {
5026 		block_allowed = 0;
5027 	}
5028 	/* setup the endpoint */
5029 	inp = (struct sctp_inpcb *)so->so_pcb;
5030 	if (inp == NULL) {
5031 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5032 		return (EFAULT);
5033 	}
5034 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5035 	/* Must be at least a MTU's worth */
5036 	if (rwnd_req < SCTP_MIN_RWND)
5037 		rwnd_req = SCTP_MIN_RWND;
5038 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5039 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5040 		sctp_misc_ints(SCTP_SORECV_ENTER,
5041 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5042 	}
5043 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5044 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5045 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5046 	}
5047 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5048 	sockbuf_lock = 1;
5049 	if (error) {
5050 		goto release_unlocked;
5051 	}
5052 restart:
5053 
5054 
5055 restart_nosblocks:
5056 	if (hold_sblock == 0) {
5057 		SOCKBUF_LOCK(&so->so_rcv);
5058 		hold_sblock = 1;
5059 	}
5060 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5061 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5062 		goto out;
5063 	}
5064 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5065 		if (so->so_error) {
5066 			error = so->so_error;
5067 			if ((in_flags & MSG_PEEK) == 0)
5068 				so->so_error = 0;
5069 			goto out;
5070 		} else {
5071 			if (so->so_rcv.sb_cc == 0) {
5072 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5073 				/* indicate EOF */
5074 				error = 0;
5075 				goto out;
5076 			}
5077 		}
5078 	}
5079 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5080 		/* we need to wait for data */
5081 		if ((so->so_rcv.sb_cc == 0) &&
5082 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5083 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5084 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5085 				/*
5086 				 * For active open side clear flags for
5087 				 * re-use passive open is blocked by
5088 				 * connect.
5089 				 */
5090 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5091 					/*
5092 					 * You were aborted, passive side
5093 					 * always hits here
5094 					 */
5095 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5096 					error = ECONNRESET;
5097 				}
5098 				so->so_state &= ~(SS_ISCONNECTING |
5099 				    SS_ISDISCONNECTING |
5100 				    SS_ISCONFIRMING |
5101 				    SS_ISCONNECTED);
5102 				if (error == 0) {
5103 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5104 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5105 						error = ENOTCONN;
5106 					}
5107 				}
5108 				goto out;
5109 			}
5110 		}
5111 		error = sbwait(&so->so_rcv);
5112 		if (error) {
5113 			goto out;
5114 		}
5115 		held_length = 0;
5116 		goto restart_nosblocks;
5117 	} else if (so->so_rcv.sb_cc == 0) {
5118 		if (so->so_error) {
5119 			error = so->so_error;
5120 			if ((in_flags & MSG_PEEK) == 0)
5121 				so->so_error = 0;
5122 		} else {
5123 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5124 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5125 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5126 					/*
5127 					 * For active open side clear flags
5128 					 * for re-use passive open is
5129 					 * blocked by connect.
5130 					 */
5131 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5132 						/*
5133 						 * You were aborted, passive
5134 						 * side always hits here
5135 						 */
5136 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5137 						error = ECONNRESET;
5138 					}
5139 					so->so_state &= ~(SS_ISCONNECTING |
5140 					    SS_ISDISCONNECTING |
5141 					    SS_ISCONFIRMING |
5142 					    SS_ISCONNECTED);
5143 					if (error == 0) {
5144 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5145 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5146 							error = ENOTCONN;
5147 						}
5148 					}
5149 					goto out;
5150 				}
5151 			}
5152 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5153 			error = EWOULDBLOCK;
5154 		}
5155 		goto out;
5156 	}
5157 	if (hold_sblock == 1) {
5158 		SOCKBUF_UNLOCK(&so->so_rcv);
5159 		hold_sblock = 0;
5160 	}
5161 	/* we possibly have data we can read */
5162 	/* sa_ignore FREED_MEMORY */
5163 	control = TAILQ_FIRST(&inp->read_queue);
5164 	if (control == NULL) {
5165 		/*
5166 		 * This could be happening since the appender did the
5167 		 * increment but as not yet did the tailq insert onto the
5168 		 * read_queue
5169 		 */
5170 		if (hold_rlock == 0) {
5171 			SCTP_INP_READ_LOCK(inp);
5172 			hold_rlock = 1;
5173 		}
5174 		control = TAILQ_FIRST(&inp->read_queue);
5175 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5176 #ifdef INVARIANTS
5177 			panic("Huh, its non zero and nothing on control?");
5178 #endif
5179 			so->so_rcv.sb_cc = 0;
5180 		}
5181 		SCTP_INP_READ_UNLOCK(inp);
5182 		hold_rlock = 0;
5183 		goto restart;
5184 	}
5185 	if ((control->length == 0) &&
5186 	    (control->do_not_ref_stcb)) {
5187 		/*
5188 		 * Clean up code for freeing assoc that left behind a
5189 		 * pdapi.. maybe a peer in EEOR that just closed after
5190 		 * sending and never indicated a EOR.
5191 		 */
5192 		if (hold_rlock == 0) {
5193 			hold_rlock = 1;
5194 			SCTP_INP_READ_LOCK(inp);
5195 		}
5196 		control->held_length = 0;
5197 		if (control->data) {
5198 			/* Hmm there is data here .. fix */
5199 			struct mbuf *m_tmp;
5200 			int cnt = 0;
5201 
5202 			m_tmp = control->data;
5203 			while (m_tmp) {
5204 				cnt += SCTP_BUF_LEN(m_tmp);
5205 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5206 					control->tail_mbuf = m_tmp;
5207 					control->end_added = 1;
5208 				}
5209 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5210 			}
5211 			control->length = cnt;
5212 		} else {
5213 			/* remove it */
5214 			TAILQ_REMOVE(&inp->read_queue, control, next);
5215 			/* Add back any hiddend data */
5216 			sctp_free_remote_addr(control->whoFrom);
5217 			sctp_free_a_readq(stcb, control);
5218 		}
5219 		if (hold_rlock) {
5220 			hold_rlock = 0;
5221 			SCTP_INP_READ_UNLOCK(inp);
5222 		}
5223 		goto restart;
5224 	}
5225 	if ((control->length == 0) &&
5226 	    (control->end_added == 1)) {
5227 		/*
5228 		 * Do we also need to check for (control->pdapi_aborted ==
5229 		 * 1)?
5230 		 */
5231 		if (hold_rlock == 0) {
5232 			hold_rlock = 1;
5233 			SCTP_INP_READ_LOCK(inp);
5234 		}
5235 		TAILQ_REMOVE(&inp->read_queue, control, next);
5236 		if (control->data) {
5237 #ifdef INVARIANTS
5238 			panic("control->data not null but control->length == 0");
5239 #else
5240 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5241 			sctp_m_freem(control->data);
5242 			control->data = NULL;
5243 #endif
5244 		}
5245 		if (control->aux_data) {
5246 			sctp_m_free(control->aux_data);
5247 			control->aux_data = NULL;
5248 		}
5249 		sctp_free_remote_addr(control->whoFrom);
5250 		sctp_free_a_readq(stcb, control);
5251 		if (hold_rlock) {
5252 			hold_rlock = 0;
5253 			SCTP_INP_READ_UNLOCK(inp);
5254 		}
5255 		goto restart;
5256 	}
5257 	if (control->length == 0) {
5258 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5259 		    (filling_sinfo)) {
5260 			/* find a more suitable one then this */
5261 			ctl = TAILQ_NEXT(control, next);
5262 			while (ctl) {
5263 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5264 				    (ctl->some_taken ||
5265 				    (ctl->spec_flags & M_NOTIFICATION) ||
5266 				    ((ctl->do_not_ref_stcb == 0) &&
5267 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5268 				    ) {
5269 					/*-
5270 					 * If we have a different TCB next, and there is data
5271 					 * present. If we have already taken some (pdapi), OR we can
5272 					 * ref the tcb and no delivery as started on this stream, we
5273 					 * take it. Note we allow a notification on a different
5274 					 * assoc to be delivered..
5275 					 */
5276 					control = ctl;
5277 					goto found_one;
5278 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5279 					    (ctl->length) &&
5280 					    ((ctl->some_taken) ||
5281 					    ((ctl->do_not_ref_stcb == 0) &&
5282 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5283 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5284 					/*-
5285 					 * If we have the same tcb, and there is data present, and we
5286 					 * have the strm interleave feature present. Then if we have
5287 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5288 					 * not started a delivery for this stream, we can take it.
5289 					 * Note we do NOT allow a notificaiton on the same assoc to
5290 					 * be delivered.
5291 					 */
5292 					control = ctl;
5293 					goto found_one;
5294 				}
5295 				ctl = TAILQ_NEXT(ctl, next);
5296 			}
5297 		}
5298 		/*
5299 		 * if we reach here, not suitable replacement is available
5300 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5301 		 * into the our held count, and its time to sleep again.
5302 		 */
5303 		held_length = so->so_rcv.sb_cc;
5304 		control->held_length = so->so_rcv.sb_cc;
5305 		goto restart;
5306 	}
5307 	/* Clear the held length since there is something to read */
5308 	control->held_length = 0;
5309 	if (hold_rlock) {
5310 		SCTP_INP_READ_UNLOCK(inp);
5311 		hold_rlock = 0;
5312 	}
5313 found_one:
5314 	/*
5315 	 * If we reach here, control has a some data for us to read off.
5316 	 * Note that stcb COULD be NULL.
5317 	 */
5318 	control->some_taken++;
5319 	if (hold_sblock) {
5320 		SOCKBUF_UNLOCK(&so->so_rcv);
5321 		hold_sblock = 0;
5322 	}
5323 	stcb = control->stcb;
5324 	if (stcb) {
5325 		if ((control->do_not_ref_stcb == 0) &&
5326 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5327 			if (freecnt_applied == 0)
5328 				stcb = NULL;
5329 		} else if (control->do_not_ref_stcb == 0) {
5330 			/* you can't free it on me please */
5331 			/*
5332 			 * The lock on the socket buffer protects us so the
5333 			 * free code will stop. But since we used the
5334 			 * socketbuf lock and the sender uses the tcb_lock
5335 			 * to increment, we need to use the atomic add to
5336 			 * the refcnt
5337 			 */
5338 			if (freecnt_applied) {
5339 #ifdef INVARIANTS
5340 				panic("refcnt already incremented");
5341 #else
5342 				printf("refcnt already incremented?\n");
5343 #endif
5344 			} else {
5345 				atomic_add_int(&stcb->asoc.refcnt, 1);
5346 				freecnt_applied = 1;
5347 			}
5348 			/*
5349 			 * Setup to remember how much we have not yet told
5350 			 * the peer our rwnd has opened up. Note we grab the
5351 			 * value from the tcb from last time. Note too that
5352 			 * sack sending clears this when a sack is sent,
5353 			 * which is fine. Once we hit the rwnd_req, we then
5354 			 * will go to the sctp_user_rcvd() that will not
5355 			 * lock until it KNOWs it MUST send a WUP-SACK.
5356 			 */
5357 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5358 			stcb->freed_by_sorcv_sincelast = 0;
5359 		}
5360 	}
5361 	if (stcb &&
5362 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5363 	    control->do_not_ref_stcb == 0) {
5364 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5365 	}
5366 	/* First lets get off the sinfo and sockaddr info */
5367 	if ((sinfo) && filling_sinfo) {
5368 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5369 		nxt = TAILQ_NEXT(control, next);
5370 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5371 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5372 			struct sctp_extrcvinfo *s_extra;
5373 
5374 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5375 			if ((nxt) &&
5376 			    (nxt->length)) {
5377 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5378 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5379 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5380 				}
5381 				if (nxt->spec_flags & M_NOTIFICATION) {
5382 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5383 				}
5384 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5385 				s_extra->sreinfo_next_length = nxt->length;
5386 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5387 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5388 				if (nxt->tail_mbuf != NULL) {
5389 					if (nxt->end_added) {
5390 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5391 					}
5392 				}
5393 			} else {
5394 				/*
5395 				 * we explicitly 0 this, since the memcpy
5396 				 * got some other things beyond the older
5397 				 * sinfo_ that is on the control's structure
5398 				 * :-D
5399 				 */
5400 				nxt = NULL;
5401 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5402 				s_extra->sreinfo_next_aid = 0;
5403 				s_extra->sreinfo_next_length = 0;
5404 				s_extra->sreinfo_next_ppid = 0;
5405 				s_extra->sreinfo_next_stream = 0;
5406 			}
5407 		}
5408 		/*
5409 		 * update off the real current cum-ack, if we have an stcb.
5410 		 */
5411 		if ((control->do_not_ref_stcb == 0) && stcb)
5412 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5413 		/*
5414 		 * mask off the high bits, we keep the actual chunk bits in
5415 		 * there.
5416 		 */
5417 		sinfo->sinfo_flags &= 0x00ff;
5418 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5419 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5420 		}
5421 	}
5422 #ifdef SCTP_ASOCLOG_OF_TSNS
5423 	{
5424 		int index, newindex;
5425 		struct sctp_pcbtsn_rlog *entry;
5426 
5427 		do {
5428 			index = inp->readlog_index;
5429 			newindex = index + 1;
5430 			if (newindex >= SCTP_READ_LOG_SIZE) {
5431 				newindex = 0;
5432 			}
5433 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5434 		entry = &inp->readlog[index];
5435 		entry->vtag = control->sinfo_assoc_id;
5436 		entry->strm = control->sinfo_stream;
5437 		entry->seq = control->sinfo_ssn;
5438 		entry->sz = control->length;
5439 		entry->flgs = control->sinfo_flags;
5440 	}
5441 #endif
5442 	if (fromlen && from) {
5443 		struct sockaddr *to;
5444 
5445 #ifdef INET
5446 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5447 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5448 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5449 #else
5450 		/* No AF_INET use AF_INET6 */
5451 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5452 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5453 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5454 #endif
5455 
5456 		to = from;
5457 #if defined(INET) && defined(INET6)
5458 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5459 		    (to->sa_family == AF_INET) &&
5460 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5461 			struct sockaddr_in *sin;
5462 			struct sockaddr_in6 sin6;
5463 
5464 			sin = (struct sockaddr_in *)to;
5465 			bzero(&sin6, sizeof(sin6));
5466 			sin6.sin6_family = AF_INET6;
5467 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5468 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5469 			bcopy(&sin->sin_addr,
5470 			    &sin6.sin6_addr.s6_addr32[3],
5471 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5472 			sin6.sin6_port = sin->sin_port;
5473 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5474 		}
5475 #endif
5476 #if defined(INET6)
5477 		{
5478 			struct sockaddr_in6 lsa6, *to6;
5479 
5480 			to6 = (struct sockaddr_in6 *)to;
5481 			sctp_recover_scope_mac(to6, (&lsa6));
5482 		}
5483 #endif
5484 	}
5485 	/* now copy out what data we can */
5486 	if (mp == NULL) {
5487 		/* copy out each mbuf in the chain up to length */
5488 get_more_data:
5489 		m = control->data;
5490 		while (m) {
5491 			/* Move out all we can */
5492 			cp_len = (int)uio->uio_resid;
5493 			my_len = (int)SCTP_BUF_LEN(m);
5494 			if (cp_len > my_len) {
5495 				/* not enough in this buf */
5496 				cp_len = my_len;
5497 			}
5498 			if (hold_rlock) {
5499 				SCTP_INP_READ_UNLOCK(inp);
5500 				hold_rlock = 0;
5501 			}
5502 			if (cp_len > 0)
5503 				error = uiomove(mtod(m, char *), cp_len, uio);
5504 			/* re-read */
5505 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5506 				goto release;
5507 			}
5508 			if ((control->do_not_ref_stcb == 0) && stcb &&
5509 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5510 				no_rcv_needed = 1;
5511 			}
5512 			if (error) {
5513 				/* error we are out of here */
5514 				goto release;
5515 			}
5516 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5517 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5518 			    ((control->end_added == 0) ||
5519 			    (control->end_added &&
5520 			    (TAILQ_NEXT(control, next) == NULL)))
5521 			    ) {
5522 				SCTP_INP_READ_LOCK(inp);
5523 				hold_rlock = 1;
5524 			}
5525 			if (cp_len == SCTP_BUF_LEN(m)) {
5526 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5527 				    (control->end_added)) {
5528 					out_flags |= MSG_EOR;
5529 					if ((control->do_not_ref_stcb == 0) &&
5530 					    (control->stcb != NULL) &&
5531 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5532 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5533 				}
5534 				if (control->spec_flags & M_NOTIFICATION) {
5535 					out_flags |= MSG_NOTIFICATION;
5536 				}
5537 				/* we ate up the mbuf */
5538 				if (in_flags & MSG_PEEK) {
5539 					/* just looking */
5540 					m = SCTP_BUF_NEXT(m);
5541 					copied_so_far += cp_len;
5542 				} else {
5543 					/* dispose of the mbuf */
5544 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5545 						sctp_sblog(&so->so_rcv,
5546 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5547 					}
5548 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5549 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5550 						sctp_sblog(&so->so_rcv,
5551 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5552 					}
5553 					copied_so_far += cp_len;
5554 					freed_so_far += cp_len;
5555 					freed_so_far += MSIZE;
5556 					atomic_subtract_int(&control->length, cp_len);
5557 					control->data = sctp_m_free(m);
5558 					m = control->data;
5559 					/*
5560 					 * been through it all, must hold sb
5561 					 * lock ok to null tail
5562 					 */
5563 					if (control->data == NULL) {
5564 #ifdef INVARIANTS
5565 						if ((control->end_added == 0) ||
5566 						    (TAILQ_NEXT(control, next) == NULL)) {
5567 							/*
5568 							 * If the end is not
5569 							 * added, OR the
5570 							 * next is NOT null
5571 							 * we MUST have the
5572 							 * lock.
5573 							 */
5574 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5575 								panic("Hmm we don't own the lock?");
5576 							}
5577 						}
5578 #endif
5579 						control->tail_mbuf = NULL;
5580 #ifdef INVARIANTS
5581 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5582 							panic("end_added, nothing left and no MSG_EOR");
5583 						}
5584 #endif
5585 					}
5586 				}
5587 			} else {
5588 				/* Do we need to trim the mbuf? */
5589 				if (control->spec_flags & M_NOTIFICATION) {
5590 					out_flags |= MSG_NOTIFICATION;
5591 				}
5592 				if ((in_flags & MSG_PEEK) == 0) {
5593 					SCTP_BUF_RESV_UF(m, cp_len);
5594 					SCTP_BUF_LEN(m) -= cp_len;
5595 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5596 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5597 					}
5598 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5599 					if ((control->do_not_ref_stcb == 0) &&
5600 					    stcb) {
5601 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5602 					}
5603 					copied_so_far += cp_len;
5604 					freed_so_far += cp_len;
5605 					freed_so_far += MSIZE;
5606 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5607 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5608 						    SCTP_LOG_SBRESULT, 0);
5609 					}
5610 					atomic_subtract_int(&control->length, cp_len);
5611 				} else {
5612 					copied_so_far += cp_len;
5613 				}
5614 			}
5615 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5616 				break;
5617 			}
5618 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5619 			    (control->do_not_ref_stcb == 0) &&
5620 			    (freed_so_far >= rwnd_req)) {
5621 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5622 			}
5623 		}		/* end while(m) */
5624 		/*
5625 		 * At this point we have looked at it all and we either have
5626 		 * a MSG_EOR/or read all the user wants... <OR>
5627 		 * control->length == 0.
5628 		 */
5629 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5630 			/* we are done with this control */
5631 			if (control->length == 0) {
5632 				if (control->data) {
5633 #ifdef INVARIANTS
5634 					panic("control->data not null at read eor?");
5635 #else
5636 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5637 					sctp_m_freem(control->data);
5638 					control->data = NULL;
5639 #endif
5640 				}
5641 		done_with_control:
5642 				if (TAILQ_NEXT(control, next) == NULL) {
5643 					/*
5644 					 * If we don't have a next we need a
5645 					 * lock, if there is a next
5646 					 * interrupt is filling ahead of us
5647 					 * and we don't need a lock to
5648 					 * remove this guy (which is the
5649 					 * head of the queue).
5650 					 */
5651 					if (hold_rlock == 0) {
5652 						SCTP_INP_READ_LOCK(inp);
5653 						hold_rlock = 1;
5654 					}
5655 				}
5656 				TAILQ_REMOVE(&inp->read_queue, control, next);
5657 				/* Add back any hiddend data */
5658 				if (control->held_length) {
5659 					held_length = 0;
5660 					control->held_length = 0;
5661 					wakeup_read_socket = 1;
5662 				}
5663 				if (control->aux_data) {
5664 					sctp_m_free(control->aux_data);
5665 					control->aux_data = NULL;
5666 				}
5667 				no_rcv_needed = control->do_not_ref_stcb;
5668 				sctp_free_remote_addr(control->whoFrom);
5669 				control->data = NULL;
5670 				sctp_free_a_readq(stcb, control);
5671 				control = NULL;
5672 				if ((freed_so_far >= rwnd_req) &&
5673 				    (no_rcv_needed == 0))
5674 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5675 
5676 			} else {
5677 				/*
5678 				 * The user did not read all of this
5679 				 * message, turn off the returned MSG_EOR
5680 				 * since we are leaving more behind on the
5681 				 * control to read.
5682 				 */
5683 #ifdef INVARIANTS
5684 				if (control->end_added &&
5685 				    (control->data == NULL) &&
5686 				    (control->tail_mbuf == NULL)) {
5687 					panic("Gak, control->length is corrupt?");
5688 				}
5689 #endif
5690 				no_rcv_needed = control->do_not_ref_stcb;
5691 				out_flags &= ~MSG_EOR;
5692 			}
5693 		}
5694 		if (out_flags & MSG_EOR) {
5695 			goto release;
5696 		}
5697 		if ((uio->uio_resid == 0) ||
5698 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5699 		    ) {
5700 			goto release;
5701 		}
5702 		/*
5703 		 * If I hit here the receiver wants more and this message is
5704 		 * NOT done (pd-api). So two questions. Can we block? if not
5705 		 * we are done. Did the user NOT set MSG_WAITALL?
5706 		 */
5707 		if (block_allowed == 0) {
5708 			goto release;
5709 		}
5710 		/*
5711 		 * We need to wait for more data a few things: - We don't
5712 		 * sbunlock() so we don't get someone else reading. - We
5713 		 * must be sure to account for the case where what is added
5714 		 * is NOT to our control when we wakeup.
5715 		 */
5716 
5717 		/*
5718 		 * Do we need to tell the transport a rwnd update might be
5719 		 * needed before we go to sleep?
5720 		 */
5721 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5722 		    ((freed_so_far >= rwnd_req) &&
5723 		    (control->do_not_ref_stcb == 0) &&
5724 		    (no_rcv_needed == 0))) {
5725 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5726 		}
5727 wait_some_more:
5728 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5729 			goto release;
5730 		}
5731 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5732 			goto release;
5733 
5734 		if (hold_rlock == 1) {
5735 			SCTP_INP_READ_UNLOCK(inp);
5736 			hold_rlock = 0;
5737 		}
5738 		if (hold_sblock == 0) {
5739 			SOCKBUF_LOCK(&so->so_rcv);
5740 			hold_sblock = 1;
5741 		}
5742 		if ((copied_so_far) && (control->length == 0) &&
5743 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5744 			goto release;
5745 		}
5746 		if (so->so_rcv.sb_cc <= control->held_length) {
5747 			error = sbwait(&so->so_rcv);
5748 			if (error) {
5749 				goto release;
5750 			}
5751 			control->held_length = 0;
5752 		}
5753 		if (hold_sblock) {
5754 			SOCKBUF_UNLOCK(&so->so_rcv);
5755 			hold_sblock = 0;
5756 		}
5757 		if (control->length == 0) {
5758 			/* still nothing here */
5759 			if (control->end_added == 1) {
5760 				/* he aborted, or is done i.e.did a shutdown */
5761 				out_flags |= MSG_EOR;
5762 				if (control->pdapi_aborted) {
5763 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5764 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5765 
5766 					out_flags |= MSG_TRUNC;
5767 				} else {
5768 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5769 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5770 				}
5771 				goto done_with_control;
5772 			}
5773 			if (so->so_rcv.sb_cc > held_length) {
5774 				control->held_length = so->so_rcv.sb_cc;
5775 				held_length = 0;
5776 			}
5777 			goto wait_some_more;
5778 		} else if (control->data == NULL) {
5779 			/*
5780 			 * we must re-sync since data is probably being
5781 			 * added
5782 			 */
5783 			SCTP_INP_READ_LOCK(inp);
5784 			if ((control->length > 0) && (control->data == NULL)) {
5785 				/*
5786 				 * big trouble.. we have the lock and its
5787 				 * corrupt?
5788 				 */
5789 #ifdef INVARIANTS
5790 				panic("Impossible data==NULL length !=0");
5791 #endif
5792 				out_flags |= MSG_EOR;
5793 				out_flags |= MSG_TRUNC;
5794 				control->length = 0;
5795 				SCTP_INP_READ_UNLOCK(inp);
5796 				goto done_with_control;
5797 			}
5798 			SCTP_INP_READ_UNLOCK(inp);
5799 			/* We will fall around to get more data */
5800 		}
5801 		goto get_more_data;
5802 	} else {
5803 		/*-
5804 		 * Give caller back the mbuf chain,
5805 		 * store in uio_resid the length
5806 		 */
5807 		wakeup_read_socket = 0;
5808 		if ((control->end_added == 0) ||
5809 		    (TAILQ_NEXT(control, next) == NULL)) {
5810 			/* Need to get rlock */
5811 			if (hold_rlock == 0) {
5812 				SCTP_INP_READ_LOCK(inp);
5813 				hold_rlock = 1;
5814 			}
5815 		}
5816 		if (control->end_added) {
5817 			out_flags |= MSG_EOR;
5818 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5819 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5820 		}
5821 		if (control->spec_flags & M_NOTIFICATION) {
5822 			out_flags |= MSG_NOTIFICATION;
5823 		}
5824 		uio->uio_resid = control->length;
5825 		*mp = control->data;
5826 		m = control->data;
5827 		while (m) {
5828 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5829 				sctp_sblog(&so->so_rcv,
5830 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5831 			}
5832 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5833 			freed_so_far += SCTP_BUF_LEN(m);
5834 			freed_so_far += MSIZE;
5835 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5836 				sctp_sblog(&so->so_rcv,
5837 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5838 			}
5839 			m = SCTP_BUF_NEXT(m);
5840 		}
5841 		control->data = control->tail_mbuf = NULL;
5842 		control->length = 0;
5843 		if (out_flags & MSG_EOR) {
5844 			/* Done with this control */
5845 			goto done_with_control;
5846 		}
5847 	}
5848 release:
5849 	if (hold_rlock == 1) {
5850 		SCTP_INP_READ_UNLOCK(inp);
5851 		hold_rlock = 0;
5852 	}
5853 	if (hold_sblock == 1) {
5854 		SOCKBUF_UNLOCK(&so->so_rcv);
5855 		hold_sblock = 0;
5856 	}
5857 	sbunlock(&so->so_rcv);
5858 	sockbuf_lock = 0;
5859 
5860 release_unlocked:
5861 	if (hold_sblock) {
5862 		SOCKBUF_UNLOCK(&so->so_rcv);
5863 		hold_sblock = 0;
5864 	}
5865 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5866 		if ((freed_so_far >= rwnd_req) &&
5867 		    (control && (control->do_not_ref_stcb == 0)) &&
5868 		    (no_rcv_needed == 0))
5869 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5870 	}
5871 out:
5872 	if (msg_flags) {
5873 		*msg_flags = out_flags;
5874 	}
5875 	if (((out_flags & MSG_EOR) == 0) &&
5876 	    ((in_flags & MSG_PEEK) == 0) &&
5877 	    (sinfo) &&
5878 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5879 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
5880 		struct sctp_extrcvinfo *s_extra;
5881 
5882 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5883 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5884 	}
5885 	if (hold_rlock == 1) {
5886 		SCTP_INP_READ_UNLOCK(inp);
5887 		hold_rlock = 0;
5888 	}
5889 	if (hold_sblock) {
5890 		SOCKBUF_UNLOCK(&so->so_rcv);
5891 		hold_sblock = 0;
5892 	}
5893 	if (sockbuf_lock) {
5894 		sbunlock(&so->so_rcv);
5895 	}
5896 	if (freecnt_applied) {
5897 		/*
5898 		 * The lock on the socket buffer protects us so the free
5899 		 * code will stop. But since we used the socketbuf lock and
5900 		 * the sender uses the tcb_lock to increment, we need to use
5901 		 * the atomic add to the refcnt.
5902 		 */
5903 		if (stcb == NULL) {
5904 #ifdef INVARIANTS
5905 			panic("stcb for refcnt has gone NULL?");
5906 			goto stage_left;
5907 #else
5908 			goto stage_left;
5909 #endif
5910 		}
5911 		atomic_add_int(&stcb->asoc.refcnt, -1);
5912 		freecnt_applied = 0;
5913 		/* Save the value back for next time */
5914 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5915 	}
5916 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5917 		if (stcb) {
5918 			sctp_misc_ints(SCTP_SORECV_DONE,
5919 			    freed_so_far,
5920 			    ((uio) ? (slen - uio->uio_resid) : slen),
5921 			    stcb->asoc.my_rwnd,
5922 			    so->so_rcv.sb_cc);
5923 		} else {
5924 			sctp_misc_ints(SCTP_SORECV_DONE,
5925 			    freed_so_far,
5926 			    ((uio) ? (slen - uio->uio_resid) : slen),
5927 			    0,
5928 			    so->so_rcv.sb_cc);
5929 		}
5930 	}
5931 stage_left:
5932 	if (wakeup_read_socket) {
5933 		sctp_sorwakeup(inp, so);
5934 	}
5935 	return (error);
5936 }
5937 
5938 
5939 #ifdef SCTP_MBUF_LOGGING
5940 struct mbuf *
5941 sctp_m_free(struct mbuf *m)
5942 {
5943 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5944 		if (SCTP_BUF_IS_EXTENDED(m)) {
5945 			sctp_log_mb(m, SCTP_MBUF_IFREE);
5946 		}
5947 	}
5948 	return (m_free(m));
5949 }
5950 
5951 void
5952 sctp_m_freem(struct mbuf *mb)
5953 {
5954 	while (mb != NULL)
5955 		mb = sctp_m_free(mb);
5956 }
5957 
5958 #endif
5959 
5960 int
5961 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5962 {
5963 	/*
5964 	 * Given a local address. For all associations that holds the
5965 	 * address, request a peer-set-primary.
5966 	 */
5967 	struct sctp_ifa *ifa;
5968 	struct sctp_laddr *wi;
5969 
5970 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5971 	if (ifa == NULL) {
5972 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
5973 		return (EADDRNOTAVAIL);
5974 	}
5975 	/*
5976 	 * Now that we have the ifa we must awaken the iterator with this
5977 	 * message.
5978 	 */
5979 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
5980 	if (wi == NULL) {
5981 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
5982 		return (ENOMEM);
5983 	}
5984 	/* Now incr the count and int wi structure */
5985 	SCTP_INCR_LADDR_COUNT();
5986 	bzero(wi, sizeof(*wi));
5987 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
5988 	wi->ifa = ifa;
5989 	wi->action = SCTP_SET_PRIM_ADDR;
5990 	atomic_add_int(&ifa->refcount, 1);
5991 
5992 	/* Now add it to the work queue */
5993 	SCTP_WQ_ADDR_LOCK();
5994 	/*
5995 	 * Should this really be a tailq? As it is we will process the
5996 	 * newest first :-0
5997 	 */
5998 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
5999 	SCTP_WQ_ADDR_UNLOCK();
6000 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6001 	    (struct sctp_inpcb *)NULL,
6002 	    (struct sctp_tcb *)NULL,
6003 	    (struct sctp_nets *)NULL);
6004 	return (0);
6005 }
6006 
6007 
6008 int
6009 sctp_soreceive(struct socket *so,
6010     struct sockaddr **psa,
6011     struct uio *uio,
6012     struct mbuf **mp0,
6013     struct mbuf **controlp,
6014     int *flagsp)
6015 {
6016 	int error, fromlen;
6017 	uint8_t sockbuf[256];
6018 	struct sockaddr *from;
6019 	struct sctp_extrcvinfo sinfo;
6020 	int filling_sinfo = 1;
6021 	struct sctp_inpcb *inp;
6022 
6023 	inp = (struct sctp_inpcb *)so->so_pcb;
6024 	/* pickup the assoc we are reading from */
6025 	if (inp == NULL) {
6026 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6027 		return (EINVAL);
6028 	}
6029 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6030 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6031 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6032 	    (controlp == NULL)) {
6033 		/* user does not want the sndrcv ctl */
6034 		filling_sinfo = 0;
6035 	}
6036 	if (psa) {
6037 		from = (struct sockaddr *)sockbuf;
6038 		fromlen = sizeof(sockbuf);
6039 		from->sa_len = 0;
6040 	} else {
6041 		from = NULL;
6042 		fromlen = 0;
6043 	}
6044 
6045 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6046 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6047 	if ((controlp) && (filling_sinfo)) {
6048 		/* copy back the sinfo in a CMSG format */
6049 		if (filling_sinfo)
6050 			*controlp = sctp_build_ctl_nchunk(inp,
6051 			    (struct sctp_sndrcvinfo *)&sinfo);
6052 		else
6053 			*controlp = NULL;
6054 	}
6055 	if (psa) {
6056 		/* copy back the address info */
6057 		if (from && from->sa_len) {
6058 			*psa = sodupsockaddr(from, M_NOWAIT);
6059 		} else {
6060 			*psa = NULL;
6061 		}
6062 	}
6063 	return (error);
6064 }
6065 
6066 
6067 
6068 
6069 
6070 int
6071 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6072     int totaddr, int *error)
6073 {
6074 	int added = 0;
6075 	int i;
6076 	struct sctp_inpcb *inp;
6077 	struct sockaddr *sa;
6078 	size_t incr = 0;
6079 
6080 #ifdef INET
6081 	struct sockaddr_in *sin;
6082 
6083 #endif
6084 #ifdef INET6
6085 	struct sockaddr_in6 *sin6;
6086 
6087 #endif
6088 
6089 	sa = addr;
6090 	inp = stcb->sctp_ep;
6091 	*error = 0;
6092 	for (i = 0; i < totaddr; i++) {
6093 		switch (sa->sa_family) {
6094 #ifdef INET
6095 		case AF_INET:
6096 			incr = sizeof(struct sockaddr_in);
6097 			sin = (struct sockaddr_in *)sa;
6098 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6099 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6100 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6101 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6102 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6103 				*error = EINVAL;
6104 				goto out_now;
6105 			}
6106 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6107 				/* assoc gone no un-lock */
6108 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6109 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6110 				*error = ENOBUFS;
6111 				goto out_now;
6112 			}
6113 			added++;
6114 			break;
6115 #endif
6116 #ifdef INET6
6117 		case AF_INET6:
6118 			incr = sizeof(struct sockaddr_in6);
6119 			sin6 = (struct sockaddr_in6 *)sa;
6120 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6121 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6122 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6123 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6124 				*error = EINVAL;
6125 				goto out_now;
6126 			}
6127 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6128 				/* assoc gone no un-lock */
6129 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6130 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6131 				*error = ENOBUFS;
6132 				goto out_now;
6133 			}
6134 			added++;
6135 			break;
6136 #endif
6137 		default:
6138 			break;
6139 		}
6140 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6141 	}
6142 out_now:
6143 	return (added);
6144 }
6145 
6146 struct sctp_tcb *
6147 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6148     int *totaddr, int *num_v4, int *num_v6, int *error,
6149     int limit, int *bad_addr)
6150 {
6151 	struct sockaddr *sa;
6152 	struct sctp_tcb *stcb = NULL;
6153 	size_t incr, at, i;
6154 
6155 	at = incr = 0;
6156 	sa = addr;
6157 
6158 	*error = *num_v6 = *num_v4 = 0;
6159 	/* account and validate addresses */
6160 	for (i = 0; i < (size_t)*totaddr; i++) {
6161 		switch (sa->sa_family) {
6162 #ifdef INET
6163 		case AF_INET:
6164 			(*num_v4) += 1;
6165 			incr = sizeof(struct sockaddr_in);
6166 			if (sa->sa_len != incr) {
6167 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6168 				*error = EINVAL;
6169 				*bad_addr = 1;
6170 				return (NULL);
6171 			}
6172 			break;
6173 #endif
6174 #ifdef INET6
6175 		case AF_INET6:
6176 			{
6177 				struct sockaddr_in6 *sin6;
6178 
6179 				sin6 = (struct sockaddr_in6 *)sa;
6180 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6181 					/* Must be non-mapped for connectx */
6182 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6183 					*error = EINVAL;
6184 					*bad_addr = 1;
6185 					return (NULL);
6186 				}
6187 				(*num_v6) += 1;
6188 				incr = sizeof(struct sockaddr_in6);
6189 				if (sa->sa_len != incr) {
6190 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6191 					*error = EINVAL;
6192 					*bad_addr = 1;
6193 					return (NULL);
6194 				}
6195 				break;
6196 			}
6197 #endif
6198 		default:
6199 			*totaddr = i;
6200 			/* we are done */
6201 			break;
6202 		}
6203 		if (i == (size_t)*totaddr) {
6204 			break;
6205 		}
6206 		SCTP_INP_INCR_REF(inp);
6207 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6208 		if (stcb != NULL) {
6209 			/* Already have or am bring up an association */
6210 			return (stcb);
6211 		} else {
6212 			SCTP_INP_DECR_REF(inp);
6213 		}
6214 		if ((at + incr) > (size_t)limit) {
6215 			*totaddr = i;
6216 			break;
6217 		}
6218 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6219 	}
6220 	return ((struct sctp_tcb *)NULL);
6221 }
6222 
6223 /*
6224  * sctp_bindx(ADD) for one address.
6225  * assumes all arguments are valid/checked by caller.
6226  */
6227 void
6228 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6229     struct sockaddr *sa, sctp_assoc_t assoc_id,
6230     uint32_t vrf_id, int *error, void *p)
6231 {
6232 	struct sockaddr *addr_touse;
6233 
6234 #ifdef INET6
6235 	struct sockaddr_in sin;
6236 
6237 #endif
6238 
6239 	/* see if we're bound all already! */
6240 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6241 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6242 		*error = EINVAL;
6243 		return;
6244 	}
6245 	addr_touse = sa;
6246 #ifdef INET6
6247 	if (sa->sa_family == AF_INET6) {
6248 		struct sockaddr_in6 *sin6;
6249 
6250 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6251 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6252 			*error = EINVAL;
6253 			return;
6254 		}
6255 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6256 			/* can only bind v6 on PF_INET6 sockets */
6257 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6258 			*error = EINVAL;
6259 			return;
6260 		}
6261 		sin6 = (struct sockaddr_in6 *)addr_touse;
6262 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6263 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6264 			    SCTP_IPV6_V6ONLY(inp)) {
6265 				/* can't bind v4-mapped on PF_INET sockets */
6266 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6267 				*error = EINVAL;
6268 				return;
6269 			}
6270 			in6_sin6_2_sin(&sin, sin6);
6271 			addr_touse = (struct sockaddr *)&sin;
6272 		}
6273 	}
6274 #endif
6275 #ifdef INET
6276 	if (sa->sa_family == AF_INET) {
6277 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6278 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6279 			*error = EINVAL;
6280 			return;
6281 		}
6282 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6283 		    SCTP_IPV6_V6ONLY(inp)) {
6284 			/* can't bind v4 on PF_INET sockets */
6285 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6286 			*error = EINVAL;
6287 			return;
6288 		}
6289 	}
6290 #endif
6291 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6292 		if (p == NULL) {
6293 			/* Can't get proc for Net/Open BSD */
6294 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6295 			*error = EINVAL;
6296 			return;
6297 		}
6298 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6299 		return;
6300 	}
6301 	/*
6302 	 * No locks required here since bind and mgmt_ep_sa all do their own
6303 	 * locking. If we do something for the FIX: below we may need to
6304 	 * lock in that case.
6305 	 */
6306 	if (assoc_id == 0) {
6307 		/* add the address */
6308 		struct sctp_inpcb *lep;
6309 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6310 
6311 		/* validate the incoming port */
6312 		if ((lsin->sin_port != 0) &&
6313 		    (lsin->sin_port != inp->sctp_lport)) {
6314 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6315 			*error = EINVAL;
6316 			return;
6317 		} else {
6318 			/* user specified 0 port, set it to existing port */
6319 			lsin->sin_port = inp->sctp_lport;
6320 		}
6321 
6322 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6323 		if (lep != NULL) {
6324 			/*
6325 			 * We must decrement the refcount since we have the
6326 			 * ep already and are binding. No remove going on
6327 			 * here.
6328 			 */
6329 			SCTP_INP_DECR_REF(lep);
6330 		}
6331 		if (lep == inp) {
6332 			/* already bound to it.. ok */
6333 			return;
6334 		} else if (lep == NULL) {
6335 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6336 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6337 			    SCTP_ADD_IP_ADDRESS,
6338 			    vrf_id, NULL);
6339 		} else {
6340 			*error = EADDRINUSE;
6341 		}
6342 		if (*error)
6343 			return;
6344 	} else {
6345 		/*
6346 		 * FIX: decide whether we allow assoc based bindx
6347 		 */
6348 	}
6349 }
6350 
6351 /*
6352  * sctp_bindx(DELETE) for one address.
6353  * assumes all arguments are valid/checked by caller.
6354  */
6355 void
6356 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6357     struct sockaddr *sa, sctp_assoc_t assoc_id,
6358     uint32_t vrf_id, int *error)
6359 {
6360 	struct sockaddr *addr_touse;
6361 
6362 #ifdef INET6
6363 	struct sockaddr_in sin;
6364 
6365 #endif
6366 
6367 	/* see if we're bound all already! */
6368 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6369 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6370 		*error = EINVAL;
6371 		return;
6372 	}
6373 	addr_touse = sa;
6374 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6375 	if (sa->sa_family == AF_INET6) {
6376 		struct sockaddr_in6 *sin6;
6377 
6378 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6379 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6380 			*error = EINVAL;
6381 			return;
6382 		}
6383 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6384 			/* can only bind v6 on PF_INET6 sockets */
6385 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6386 			*error = EINVAL;
6387 			return;
6388 		}
6389 		sin6 = (struct sockaddr_in6 *)addr_touse;
6390 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6391 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6392 			    SCTP_IPV6_V6ONLY(inp)) {
6393 				/* can't bind mapped-v4 on PF_INET sockets */
6394 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6395 				*error = EINVAL;
6396 				return;
6397 			}
6398 			in6_sin6_2_sin(&sin, sin6);
6399 			addr_touse = (struct sockaddr *)&sin;
6400 		}
6401 	}
6402 #endif
6403 #ifdef INET
6404 	if (sa->sa_family == AF_INET) {
6405 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6406 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6407 			*error = EINVAL;
6408 			return;
6409 		}
6410 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6411 		    SCTP_IPV6_V6ONLY(inp)) {
6412 			/* can't bind v4 on PF_INET sockets */
6413 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6414 			*error = EINVAL;
6415 			return;
6416 		}
6417 	}
6418 #endif
6419 	/*
6420 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6421 	 * below is ever changed we may need to lock before calling
6422 	 * association level binding.
6423 	 */
6424 	if (assoc_id == 0) {
6425 		/* delete the address */
6426 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6427 		    SCTP_DEL_IP_ADDRESS,
6428 		    vrf_id, NULL);
6429 	} else {
6430 		/*
6431 		 * FIX: decide whether we allow assoc based bindx
6432 		 */
6433 	}
6434 }
6435 
6436 /*
6437  * returns the valid local address count for an assoc, taking into account
6438  * all scoping rules
6439  */
6440 int
6441 sctp_local_addr_count(struct sctp_tcb *stcb)
6442 {
6443 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6444 	int ipv4_addr_legal, ipv6_addr_legal;
6445 	struct sctp_vrf *vrf;
6446 	struct sctp_ifn *sctp_ifn;
6447 	struct sctp_ifa *sctp_ifa;
6448 	int count = 0;
6449 
6450 	/* Turn on all the appropriate scopes */
6451 	loopback_scope = stcb->asoc.loopback_scope;
6452 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6453 	local_scope = stcb->asoc.local_scope;
6454 	site_scope = stcb->asoc.site_scope;
6455 	ipv4_addr_legal = ipv6_addr_legal = 0;
6456 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6457 		ipv6_addr_legal = 1;
6458 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6459 			ipv4_addr_legal = 1;
6460 		}
6461 	} else {
6462 		ipv4_addr_legal = 1;
6463 	}
6464 
6465 	SCTP_IPI_ADDR_RLOCK();
6466 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6467 	if (vrf == NULL) {
6468 		/* no vrf, no addresses */
6469 		SCTP_IPI_ADDR_RUNLOCK();
6470 		return (0);
6471 	}
6472 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6473 		/*
6474 		 * bound all case: go through all ifns on the vrf
6475 		 */
6476 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6477 			if ((loopback_scope == 0) &&
6478 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6479 				continue;
6480 			}
6481 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6482 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6483 					continue;
6484 				switch (sctp_ifa->address.sa.sa_family) {
6485 #ifdef INET
6486 				case AF_INET:
6487 					if (ipv4_addr_legal) {
6488 						struct sockaddr_in *sin;
6489 
6490 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6491 						if (sin->sin_addr.s_addr == 0) {
6492 							/*
6493 							 * skip unspecified
6494 							 * addrs
6495 							 */
6496 							continue;
6497 						}
6498 						if ((ipv4_local_scope == 0) &&
6499 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6500 							continue;
6501 						}
6502 						/* count this one */
6503 						count++;
6504 					} else {
6505 						continue;
6506 					}
6507 					break;
6508 #endif
6509 #ifdef INET6
6510 				case AF_INET6:
6511 					if (ipv6_addr_legal) {
6512 						struct sockaddr_in6 *sin6;
6513 
6514 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6515 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6516 							continue;
6517 						}
6518 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6519 							if (local_scope == 0)
6520 								continue;
6521 							if (sin6->sin6_scope_id == 0) {
6522 								if (sa6_recoverscope(sin6) != 0)
6523 									/*
6524 									 *
6525 									 * bad
6526 									 *
6527 									 * li
6528 									 * nk
6529 									 *
6530 									 * loc
6531 									 * al
6532 									 *
6533 									 * add
6534 									 * re
6535 									 * ss
6536 									 * */
6537 									continue;
6538 							}
6539 						}
6540 						if ((site_scope == 0) &&
6541 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6542 							continue;
6543 						}
6544 						/* count this one */
6545 						count++;
6546 					}
6547 					break;
6548 #endif
6549 				default:
6550 					/* TSNH */
6551 					break;
6552 				}
6553 			}
6554 		}
6555 	} else {
6556 		/*
6557 		 * subset bound case
6558 		 */
6559 		struct sctp_laddr *laddr;
6560 
6561 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6562 		    sctp_nxt_addr) {
6563 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6564 				continue;
6565 			}
6566 			/* count this one */
6567 			count++;
6568 		}
6569 	}
6570 	SCTP_IPI_ADDR_RUNLOCK();
6571 	return (count);
6572 }
6573 
6574 #if defined(SCTP_LOCAL_TRACE_BUF)
6575 
6576 void
6577 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6578 {
6579 	uint32_t saveindex, newindex;
6580 
6581 	do {
6582 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6583 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6584 			newindex = 1;
6585 		} else {
6586 			newindex = saveindex + 1;
6587 		}
6588 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6589 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6590 		saveindex = 0;
6591 	}
6592 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6593 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6594 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6595 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6596 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6597 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6598 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6599 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6600 }
6601 
6602 #endif
6603 /* XXX: Remove the #ifdef after tunneling over IPv6 works also on FreeBSD. */
6604 #ifdef INET
6605 /* We will need to add support
6606  * to bind the ports and such here
6607  * so we can do UDP tunneling. In
6608  * the mean-time, we return error
6609  */
6610 #include <netinet/udp.h>
6611 #include <netinet/udp_var.h>
6612 #include <sys/proc.h>
6613 #ifdef INET6
6614 #include <netinet6/sctp6_var.h>
6615 #endif
6616 
6617 static void
6618 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6619 {
6620 	struct ip *iph;
6621 	struct mbuf *sp, *last;
6622 	struct udphdr *uhdr;
6623 	uint16_t port = 0;
6624 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6625 
6626 	/*
6627 	 * Split out the mbuf chain. Leave the IP header in m, place the
6628 	 * rest in the sp.
6629 	 */
6630 	if ((m->m_flags & M_PKTHDR) == 0) {
6631 		/* Can't handle one that is not a pkt hdr */
6632 		goto out;
6633 	}
6634 	/* pull the src port */
6635 	iph = mtod(m, struct ip *);
6636 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6637 
6638 	port = uhdr->uh_sport;
6639 	sp = m_split(m, off, M_DONTWAIT);
6640 	if (sp == NULL) {
6641 		/* Gak, drop packet, we can't do a split */
6642 		goto out;
6643 	}
6644 	if (sp->m_pkthdr.len < header_size) {
6645 		/* Gak, packet can't have an SCTP header in it - to small */
6646 		m_freem(sp);
6647 		goto out;
6648 	}
6649 	/* ok now pull up the UDP header and SCTP header together */
6650 	sp = m_pullup(sp, header_size);
6651 	if (sp == NULL) {
6652 		/* Gak pullup failed */
6653 		goto out;
6654 	}
6655 	/* trim out the UDP header */
6656 	m_adj(sp, sizeof(struct udphdr));
6657 
6658 	/* Now reconstruct the mbuf chain */
6659 	/* 1) find last one */
6660 	last = m;
6661 	while (last->m_next != NULL) {
6662 		last = last->m_next;
6663 	}
6664 	last->m_next = sp;
6665 	m->m_pkthdr.len += sp->m_pkthdr.len;
6666 	last = m;
6667 	while (last != NULL) {
6668 		last = last->m_next;
6669 	}
6670 	/* Now its ready for sctp_input or sctp6_input */
6671 	iph = mtod(m, struct ip *);
6672 	switch (iph->ip_v) {
6673 #ifdef INET
6674 	case IPVERSION:
6675 		{
6676 			uint16_t len;
6677 
6678 			/* its IPv4 */
6679 			len = SCTP_GET_IPV4_LENGTH(iph);
6680 			len -= sizeof(struct udphdr);
6681 			SCTP_GET_IPV4_LENGTH(iph) = len;
6682 			sctp_input_with_port(m, off, port);
6683 			break;
6684 		}
6685 #endif
6686 #ifdef INET6
6687 	case IPV6_VERSION >> 4:
6688 		{
6689 			/* its IPv6 - NOT supported */
6690 			goto out;
6691 			break;
6692 
6693 		}
6694 #endif
6695 	default:
6696 		{
6697 			m_freem(m);
6698 			break;
6699 		}
6700 	}
6701 	return;
6702 out:
6703 	m_freem(m);
6704 }
6705 
6706 void
6707 sctp_over_udp_stop(void)
6708 {
6709 	struct socket *sop;
6710 
6711 	/*
6712 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6713 	 * for writting!
6714 	 */
6715 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6716 		/* Nothing to do */
6717 		return;
6718 	}
6719 	sop = SCTP_BASE_INFO(udp_tun_socket);
6720 	soclose(sop);
6721 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6722 }
6723 
6724 int
6725 sctp_over_udp_start(void)
6726 {
6727 	uint16_t port;
6728 	int ret;
6729 	struct sockaddr_in sin;
6730 	struct socket *sop = NULL;
6731 	struct thread *th;
6732 	struct ucred *cred;
6733 
6734 	/*
6735 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6736 	 * for writting!
6737 	 */
6738 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6739 	if (port == 0) {
6740 		/* Must have a port set */
6741 		return (EINVAL);
6742 	}
6743 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6744 		/* Already running -- must stop first */
6745 		return (EALREADY);
6746 	}
6747 	th = curthread;
6748 	cred = th->td_ucred;
6749 	if ((ret = socreate(PF_INET, &sop,
6750 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6751 		return (ret);
6752 	}
6753 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6754 	/* call the special UDP hook */
6755 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6756 	if (ret) {
6757 		goto exit_stage_left;
6758 	}
6759 	/* Ok we have a socket, bind it to the port */
6760 	memset(&sin, 0, sizeof(sin));
6761 	sin.sin_len = sizeof(sin);
6762 	sin.sin_family = AF_INET;
6763 	sin.sin_port = htons(port);
6764 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6765 	if (ret) {
6766 		/* Close up we cant get the port */
6767 exit_stage_left:
6768 		sctp_over_udp_stop();
6769 		return (ret);
6770 	}
6771 	/*
6772 	 * Ok we should now get UDP packets directly to our input routine
6773 	 * sctp_recv_upd_tunneled_packet().
6774 	 */
6775 	return (0);
6776 }
6777 
6778 #endif
6779