xref: /freebsd/sys/netinet/sctputil.c (revision 3e65b9c6e6b7b2081d54e1dc40983c3c00eaf738)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #endif
45 #include <netinet/sctp_header.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_bsd_addr.h>
53 
54 
55 #ifndef KTR_SCTP
56 #define KTR_SCTP KTR_SUBSYS
57 #endif
58 
59 extern struct sctp_cc_functions sctp_cc_functions[];
60 extern struct sctp_ss_functions sctp_ss_functions[];
61 
62 void
63 sctp_sblog(struct sockbuf *sb,
64     struct sctp_tcb *stcb, int from, int incr)
65 {
66 	struct sctp_cwnd_log sctp_clog;
67 
68 	sctp_clog.x.sb.stcb = stcb;
69 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
70 	if (stcb)
71 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
72 	else
73 		sctp_clog.x.sb.stcb_sbcc = 0;
74 	sctp_clog.x.sb.incr = incr;
75 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
76 	    SCTP_LOG_EVENT_SB,
77 	    from,
78 	    sctp_clog.x.misc.log1,
79 	    sctp_clog.x.misc.log2,
80 	    sctp_clog.x.misc.log3,
81 	    sctp_clog.x.misc.log4);
82 }
83 
84 void
85 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
86 {
87 	struct sctp_cwnd_log sctp_clog;
88 
89 	sctp_clog.x.close.inp = (void *)inp;
90 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
91 	if (stcb) {
92 		sctp_clog.x.close.stcb = (void *)stcb;
93 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
94 	} else {
95 		sctp_clog.x.close.stcb = 0;
96 		sctp_clog.x.close.state = 0;
97 	}
98 	sctp_clog.x.close.loc = loc;
99 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
100 	    SCTP_LOG_EVENT_CLOSE,
101 	    0,
102 	    sctp_clog.x.misc.log1,
103 	    sctp_clog.x.misc.log2,
104 	    sctp_clog.x.misc.log3,
105 	    sctp_clog.x.misc.log4);
106 }
107 
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 	struct sctp_cwnd_log sctp_clog;
170 
171 	sctp_clog.x.sack.cumack = cumack;
172 	sctp_clog.x.sack.oldcumack = old_cumack;
173 	sctp_clog.x.sack.tsn = tsn;
174 	sctp_clog.x.sack.numGaps = gaps;
175 	sctp_clog.x.sack.numDups = dups;
176 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 	    SCTP_LOG_EVENT_SACK,
178 	    from,
179 	    sctp_clog.x.misc.log1,
180 	    sctp_clog.x.misc.log2,
181 	    sctp_clog.x.misc.log3,
182 	    sctp_clog.x.misc.log4);
183 }
184 
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	memset(&sctp_clog, 0, sizeof(sctp_clog));
191 	sctp_clog.x.map.base = map;
192 	sctp_clog.x.map.cum = cum;
193 	sctp_clog.x.map.high = high;
194 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
195 	    SCTP_LOG_EVENT_MAP,
196 	    from,
197 	    sctp_clog.x.misc.log1,
198 	    sctp_clog.x.misc.log2,
199 	    sctp_clog.x.misc.log3,
200 	    sctp_clog.x.misc.log4);
201 }
202 
203 void
204 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
205     int from)
206 {
207 	struct sctp_cwnd_log sctp_clog;
208 
209 	memset(&sctp_clog, 0, sizeof(sctp_clog));
210 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
211 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
212 	sctp_clog.x.fr.tsn = tsn;
213 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
214 	    SCTP_LOG_EVENT_FR,
215 	    from,
216 	    sctp_clog.x.misc.log1,
217 	    sctp_clog.x.misc.log2,
218 	    sctp_clog.x.misc.log3,
219 	    sctp_clog.x.misc.log4);
220 }
221 
222 
223 void
224 sctp_log_mb(struct mbuf *m, int from)
225 {
226 	struct sctp_cwnd_log sctp_clog;
227 
228 	sctp_clog.x.mb.mp = m;
229 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
230 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
231 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
232 	if (SCTP_BUF_IS_EXTENDED(m)) {
233 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
234 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
235 	} else {
236 		sctp_clog.x.mb.ext = 0;
237 		sctp_clog.x.mb.refcnt = 0;
238 	}
239 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
240 	    SCTP_LOG_EVENT_MBUF,
241 	    from,
242 	    sctp_clog.x.misc.log1,
243 	    sctp_clog.x.misc.log2,
244 	    sctp_clog.x.misc.log3,
245 	    sctp_clog.x.misc.log4);
246 }
247 
248 
249 void
250 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
251     int from)
252 {
253 	struct sctp_cwnd_log sctp_clog;
254 
255 	if (control == NULL) {
256 		SCTP_PRINTF("Gak log of NULL?\n");
257 		return;
258 	}
259 	sctp_clog.x.strlog.stcb = control->stcb;
260 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
261 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
262 	sctp_clog.x.strlog.strm = control->sinfo_stream;
263 	if (poschk != NULL) {
264 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
265 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
266 	} else {
267 		sctp_clog.x.strlog.e_tsn = 0;
268 		sctp_clog.x.strlog.e_sseq = 0;
269 	}
270 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
271 	    SCTP_LOG_EVENT_STRM,
272 	    from,
273 	    sctp_clog.x.misc.log1,
274 	    sctp_clog.x.misc.log2,
275 	    sctp_clog.x.misc.log3,
276 	    sctp_clog.x.misc.log4);
277 }
278 
279 void
280 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
281 {
282 	struct sctp_cwnd_log sctp_clog;
283 
284 	sctp_clog.x.cwnd.net = net;
285 	if (stcb->asoc.send_queue_cnt > 255)
286 		sctp_clog.x.cwnd.cnt_in_send = 255;
287 	else
288 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
289 	if (stcb->asoc.stream_queue_cnt > 255)
290 		sctp_clog.x.cwnd.cnt_in_str = 255;
291 	else
292 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
293 
294 	if (net) {
295 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
296 		sctp_clog.x.cwnd.inflight = net->flight_size;
297 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
298 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
299 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
300 	}
301 	if (SCTP_CWNDLOG_PRESEND == from) {
302 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
303 	}
304 	sctp_clog.x.cwnd.cwnd_augment = augment;
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_CWND,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 }
313 
314 void
315 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
316 {
317 	struct sctp_cwnd_log sctp_clog;
318 
319 	memset(&sctp_clog, 0, sizeof(sctp_clog));
320 	if (inp) {
321 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
322 
323 	} else {
324 		sctp_clog.x.lock.sock = (void *)NULL;
325 	}
326 	sctp_clog.x.lock.inp = (void *)inp;
327 	if (stcb) {
328 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
329 	} else {
330 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
331 	}
332 	if (inp) {
333 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
334 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
335 	} else {
336 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
337 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
338 	}
339 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
340 	if (inp && (inp->sctp_socket)) {
341 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
342 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
343 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
344 	} else {
345 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
346 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
347 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
348 	}
349 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
350 	    SCTP_LOG_LOCK_EVENT,
351 	    from,
352 	    sctp_clog.x.misc.log1,
353 	    sctp_clog.x.misc.log2,
354 	    sctp_clog.x.misc.log3,
355 	    sctp_clog.x.misc.log4);
356 }
357 
358 void
359 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
360 {
361 	struct sctp_cwnd_log sctp_clog;
362 
363 	memset(&sctp_clog, 0, sizeof(sctp_clog));
364 	sctp_clog.x.cwnd.net = net;
365 	sctp_clog.x.cwnd.cwnd_new_value = error;
366 	sctp_clog.x.cwnd.inflight = net->flight_size;
367 	sctp_clog.x.cwnd.cwnd_augment = burst;
368 	if (stcb->asoc.send_queue_cnt > 255)
369 		sctp_clog.x.cwnd.cnt_in_send = 255;
370 	else
371 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
372 	if (stcb->asoc.stream_queue_cnt > 255)
373 		sctp_clog.x.cwnd.cnt_in_str = 255;
374 	else
375 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
376 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
377 	    SCTP_LOG_EVENT_MAXBURST,
378 	    from,
379 	    sctp_clog.x.misc.log1,
380 	    sctp_clog.x.misc.log2,
381 	    sctp_clog.x.misc.log3,
382 	    sctp_clog.x.misc.log4);
383 }
384 
385 void
386 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
387 {
388 	struct sctp_cwnd_log sctp_clog;
389 
390 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
391 	sctp_clog.x.rwnd.send_size = snd_size;
392 	sctp_clog.x.rwnd.overhead = overhead;
393 	sctp_clog.x.rwnd.new_rwnd = 0;
394 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
395 	    SCTP_LOG_EVENT_RWND,
396 	    from,
397 	    sctp_clog.x.misc.log1,
398 	    sctp_clog.x.misc.log2,
399 	    sctp_clog.x.misc.log3,
400 	    sctp_clog.x.misc.log4);
401 }
402 
403 void
404 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
405 {
406 	struct sctp_cwnd_log sctp_clog;
407 
408 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
409 	sctp_clog.x.rwnd.send_size = flight_size;
410 	sctp_clog.x.rwnd.overhead = overhead;
411 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
412 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
413 	    SCTP_LOG_EVENT_RWND,
414 	    from,
415 	    sctp_clog.x.misc.log1,
416 	    sctp_clog.x.misc.log2,
417 	    sctp_clog.x.misc.log3,
418 	    sctp_clog.x.misc.log4);
419 }
420 
421 void
422 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
423 {
424 	struct sctp_cwnd_log sctp_clog;
425 
426 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
427 	sctp_clog.x.mbcnt.size_change = book;
428 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
429 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
430 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
431 	    SCTP_LOG_EVENT_MBCNT,
432 	    from,
433 	    sctp_clog.x.misc.log1,
434 	    sctp_clog.x.misc.log2,
435 	    sctp_clog.x.misc.log3,
436 	    sctp_clog.x.misc.log4);
437 }
438 
439 void
440 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
441 {
442 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
443 	    SCTP_LOG_MISC_EVENT,
444 	    from,
445 	    a, b, c, d);
446 }
447 
448 void
449 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
450 {
451 	struct sctp_cwnd_log sctp_clog;
452 
453 	sctp_clog.x.wake.stcb = (void *)stcb;
454 	sctp_clog.x.wake.wake_cnt = wake_cnt;
455 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
456 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
457 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
458 
459 	if (stcb->asoc.stream_queue_cnt < 0xff)
460 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
461 	else
462 		sctp_clog.x.wake.stream_qcnt = 0xff;
463 
464 	if (stcb->asoc.chunks_on_out_queue < 0xff)
465 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
466 	else
467 		sctp_clog.x.wake.chunks_on_oque = 0xff;
468 
469 	sctp_clog.x.wake.sctpflags = 0;
470 	/* set in the defered mode stuff */
471 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
472 		sctp_clog.x.wake.sctpflags |= 1;
473 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
474 		sctp_clog.x.wake.sctpflags |= 2;
475 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
476 		sctp_clog.x.wake.sctpflags |= 4;
477 	/* what about the sb */
478 	if (stcb->sctp_socket) {
479 		struct socket *so = stcb->sctp_socket;
480 
481 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
482 	} else {
483 		sctp_clog.x.wake.sbflags = 0xff;
484 	}
485 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
486 	    SCTP_LOG_EVENT_WAKE,
487 	    from,
488 	    sctp_clog.x.misc.log1,
489 	    sctp_clog.x.misc.log2,
490 	    sctp_clog.x.misc.log3,
491 	    sctp_clog.x.misc.log4);
492 }
493 
494 void
495 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
496 {
497 	struct sctp_cwnd_log sctp_clog;
498 
499 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
500 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
501 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
502 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
503 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
504 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
505 	sctp_clog.x.blk.sndlen = sendlen;
506 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
507 	    SCTP_LOG_EVENT_BLOCK,
508 	    from,
509 	    sctp_clog.x.misc.log1,
510 	    sctp_clog.x.misc.log2,
511 	    sctp_clog.x.misc.log3,
512 	    sctp_clog.x.misc.log4);
513 }
514 
515 int
516 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
517 {
518 	/* May need to fix this if ktrdump does not work */
519 	return (0);
520 }
521 
522 #ifdef SCTP_AUDITING_ENABLED
523 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
524 static int sctp_audit_indx = 0;
525 
526 static
527 void
528 sctp_print_audit_report(void)
529 {
530 	int i;
531 	int cnt;
532 
533 	cnt = 0;
534 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
535 		if ((sctp_audit_data[i][0] == 0xe0) &&
536 		    (sctp_audit_data[i][1] == 0x01)) {
537 			cnt = 0;
538 			SCTP_PRINTF("\n");
539 		} else if (sctp_audit_data[i][0] == 0xf0) {
540 			cnt = 0;
541 			SCTP_PRINTF("\n");
542 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
543 		    (sctp_audit_data[i][1] == 0x01)) {
544 			SCTP_PRINTF("\n");
545 			cnt = 0;
546 		}
547 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
548 		    (uint32_t) sctp_audit_data[i][1]);
549 		cnt++;
550 		if ((cnt % 14) == 0)
551 			SCTP_PRINTF("\n");
552 	}
553 	for (i = 0; i < sctp_audit_indx; i++) {
554 		if ((sctp_audit_data[i][0] == 0xe0) &&
555 		    (sctp_audit_data[i][1] == 0x01)) {
556 			cnt = 0;
557 			SCTP_PRINTF("\n");
558 		} else if (sctp_audit_data[i][0] == 0xf0) {
559 			cnt = 0;
560 			SCTP_PRINTF("\n");
561 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
562 		    (sctp_audit_data[i][1] == 0x01)) {
563 			SCTP_PRINTF("\n");
564 			cnt = 0;
565 		}
566 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
567 		    (uint32_t) sctp_audit_data[i][1]);
568 		cnt++;
569 		if ((cnt % 14) == 0)
570 			SCTP_PRINTF("\n");
571 	}
572 	SCTP_PRINTF("\n");
573 }
574 
575 void
576 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
577     struct sctp_nets *net)
578 {
579 	int resend_cnt, tot_out, rep, tot_book_cnt;
580 	struct sctp_nets *lnet;
581 	struct sctp_tmit_chunk *chk;
582 
583 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
584 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
585 	sctp_audit_indx++;
586 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
587 		sctp_audit_indx = 0;
588 	}
589 	if (inp == NULL) {
590 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
591 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
592 		sctp_audit_indx++;
593 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
594 			sctp_audit_indx = 0;
595 		}
596 		return;
597 	}
598 	if (stcb == NULL) {
599 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
600 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
601 		sctp_audit_indx++;
602 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
603 			sctp_audit_indx = 0;
604 		}
605 		return;
606 	}
607 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
608 	sctp_audit_data[sctp_audit_indx][1] =
609 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
610 	sctp_audit_indx++;
611 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
612 		sctp_audit_indx = 0;
613 	}
614 	rep = 0;
615 	tot_book_cnt = 0;
616 	resend_cnt = tot_out = 0;
617 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
618 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
619 			resend_cnt++;
620 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
621 			tot_out += chk->book_size;
622 			tot_book_cnt++;
623 		}
624 	}
625 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
626 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
627 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
628 		sctp_audit_indx++;
629 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
630 			sctp_audit_indx = 0;
631 		}
632 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
633 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
634 		rep = 1;
635 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
636 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
637 		sctp_audit_data[sctp_audit_indx][1] =
638 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
639 		sctp_audit_indx++;
640 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
641 			sctp_audit_indx = 0;
642 		}
643 	}
644 	if (tot_out != stcb->asoc.total_flight) {
645 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
646 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
647 		sctp_audit_indx++;
648 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
649 			sctp_audit_indx = 0;
650 		}
651 		rep = 1;
652 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
653 		    (int)stcb->asoc.total_flight);
654 		stcb->asoc.total_flight = tot_out;
655 	}
656 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
657 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
658 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
659 		sctp_audit_indx++;
660 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
661 			sctp_audit_indx = 0;
662 		}
663 		rep = 1;
664 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
665 
666 		stcb->asoc.total_flight_count = tot_book_cnt;
667 	}
668 	tot_out = 0;
669 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
670 		tot_out += lnet->flight_size;
671 	}
672 	if (tot_out != stcb->asoc.total_flight) {
673 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
674 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
675 		sctp_audit_indx++;
676 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
677 			sctp_audit_indx = 0;
678 		}
679 		rep = 1;
680 		SCTP_PRINTF("real flight:%d net total was %d\n",
681 		    stcb->asoc.total_flight, tot_out);
682 		/* now corrective action */
683 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
684 
685 			tot_out = 0;
686 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
687 				if ((chk->whoTo == lnet) &&
688 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
689 					tot_out += chk->book_size;
690 				}
691 			}
692 			if (lnet->flight_size != tot_out) {
693 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
694 				    lnet, lnet->flight_size,
695 				    tot_out);
696 				lnet->flight_size = tot_out;
697 			}
698 		}
699 	}
700 	if (rep) {
701 		sctp_print_audit_report();
702 	}
703 }
704 
705 void
706 sctp_audit_log(uint8_t ev, uint8_t fd)
707 {
708 
709 	sctp_audit_data[sctp_audit_indx][0] = ev;
710 	sctp_audit_data[sctp_audit_indx][1] = fd;
711 	sctp_audit_indx++;
712 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
713 		sctp_audit_indx = 0;
714 	}
715 }
716 
717 #endif
718 
719 /*
720  * sctp_stop_timers_for_shutdown() should be called
721  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
722  * state to make sure that all timers are stopped.
723  */
724 void
725 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
726 {
727 	struct sctp_association *asoc;
728 	struct sctp_nets *net;
729 
730 	asoc = &stcb->asoc;
731 
732 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
733 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
734 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
735 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
736 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
737 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
738 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
739 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
740 	}
741 }
742 
743 /*
744  * a list of sizes based on typical mtu's, used only if next hop size not
745  * returned.
746  */
747 static uint32_t sctp_mtu_sizes[] = {
748 	68,
749 	296,
750 	508,
751 	512,
752 	544,
753 	576,
754 	1006,
755 	1492,
756 	1500,
757 	1536,
758 	2002,
759 	2048,
760 	4352,
761 	4464,
762 	8166,
763 	17914,
764 	32000,
765 	65535
766 };
767 
768 /*
769  * Return the largest MTU smaller than val. If there is no
770  * entry, just return val.
771  */
772 uint32_t
773 sctp_get_prev_mtu(uint32_t val)
774 {
775 	uint32_t i;
776 
777 	if (val <= sctp_mtu_sizes[0]) {
778 		return (val);
779 	}
780 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
781 		if (val <= sctp_mtu_sizes[i]) {
782 			break;
783 		}
784 	}
785 	return (sctp_mtu_sizes[i - 1]);
786 }
787 
788 /*
789  * Return the smallest MTU larger than val. If there is no
790  * entry, just return val.
791  */
792 uint32_t
793 sctp_get_next_mtu(uint32_t val)
794 {
795 	/* select another MTU that is just bigger than this one */
796 	uint32_t i;
797 
798 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
799 		if (val < sctp_mtu_sizes[i]) {
800 			return (sctp_mtu_sizes[i]);
801 		}
802 	}
803 	return (val);
804 }
805 
806 void
807 sctp_fill_random_store(struct sctp_pcb *m)
808 {
809 	/*
810 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
811 	 * our counter. The result becomes our good random numbers and we
812 	 * then setup to give these out. Note that we do no locking to
813 	 * protect this. This is ok, since if competing folks call this we
814 	 * will get more gobbled gook in the random store which is what we
815 	 * want. There is a danger that two guys will use the same random
816 	 * numbers, but thats ok too since that is random as well :->
817 	 */
818 	m->store_at = 0;
819 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
820 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
821 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
822 	m->random_counter++;
823 }
824 
825 uint32_t
826 sctp_select_initial_TSN(struct sctp_pcb *inp)
827 {
828 	/*
829 	 * A true implementation should use random selection process to get
830 	 * the initial stream sequence number, using RFC1750 as a good
831 	 * guideline
832 	 */
833 	uint32_t x, *xp;
834 	uint8_t *p;
835 	int store_at, new_store;
836 
837 	if (inp->initial_sequence_debug != 0) {
838 		uint32_t ret;
839 
840 		ret = inp->initial_sequence_debug;
841 		inp->initial_sequence_debug++;
842 		return (ret);
843 	}
844 retry:
845 	store_at = inp->store_at;
846 	new_store = store_at + sizeof(uint32_t);
847 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
848 		new_store = 0;
849 	}
850 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
851 		goto retry;
852 	}
853 	if (new_store == 0) {
854 		/* Refill the random store */
855 		sctp_fill_random_store(inp);
856 	}
857 	p = &inp->random_store[store_at];
858 	xp = (uint32_t *) p;
859 	x = *xp;
860 	return (x);
861 }
862 
863 uint32_t
864 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
865 {
866 	uint32_t x;
867 	struct timeval now;
868 
869 	if (check) {
870 		(void)SCTP_GETTIME_TIMEVAL(&now);
871 	}
872 	for (;;) {
873 		x = sctp_select_initial_TSN(&inp->sctp_ep);
874 		if (x == 0) {
875 			/* we never use 0 */
876 			continue;
877 		}
878 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
879 			break;
880 		}
881 	}
882 	return (x);
883 }
884 
885 int
886 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
887     uint32_t override_tag, uint32_t vrf_id)
888 {
889 	struct sctp_association *asoc;
890 
891 	/*
892 	 * Anything set to zero is taken care of by the allocation routine's
893 	 * bzero
894 	 */
895 
896 	/*
897 	 * Up front select what scoping to apply on addresses I tell my peer
898 	 * Not sure what to do with these right now, we will need to come up
899 	 * with a way to set them. We may need to pass them through from the
900 	 * caller in the sctp_aloc_assoc() function.
901 	 */
902 	int i;
903 
904 	asoc = &stcb->asoc;
905 	/* init all variables to a known value. */
906 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
907 	asoc->max_burst = m->sctp_ep.max_burst;
908 	asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
909 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
910 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
911 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
912 	asoc->ecn_allowed = m->sctp_ecn_enable;
913 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
914 	asoc->sctp_cmt_pf = (uint8_t) 0;
915 	asoc->sctp_frag_point = m->sctp_frag_point;
916 	asoc->sctp_features = m->sctp_features;
917 	asoc->default_dscp = m->sctp_ep.default_dscp;
918 #ifdef INET6
919 	if (m->sctp_ep.default_flowlabel) {
920 		asoc->default_flowlabel = m->sctp_ep.default_flowlabel;
921 	} else {
922 		if (m->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
923 			asoc->default_flowlabel = sctp_select_initial_TSN(&m->sctp_ep);
924 			asoc->default_flowlabel &= 0x000fffff;
925 			asoc->default_flowlabel |= 0x80000000;
926 		} else {
927 			asoc->default_flowlabel = 0;
928 		}
929 	}
930 #endif
931 	asoc->sb_send_resv = 0;
932 	if (override_tag) {
933 		asoc->my_vtag = override_tag;
934 	} else {
935 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
936 	}
937 	/* Get the nonce tags */
938 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
939 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
940 	asoc->vrf_id = vrf_id;
941 
942 #ifdef SCTP_ASOCLOG_OF_TSNS
943 	asoc->tsn_in_at = 0;
944 	asoc->tsn_out_at = 0;
945 	asoc->tsn_in_wrapped = 0;
946 	asoc->tsn_out_wrapped = 0;
947 	asoc->cumack_log_at = 0;
948 	asoc->cumack_log_atsnt = 0;
949 #endif
950 #ifdef SCTP_FS_SPEC_LOG
951 	asoc->fs_index = 0;
952 #endif
953 	asoc->refcnt = 0;
954 	asoc->assoc_up_sent = 0;
955 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
956 	    sctp_select_initial_TSN(&m->sctp_ep);
957 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
958 	/* we are optimisitic here */
959 	asoc->peer_supports_pktdrop = 1;
960 	asoc->peer_supports_nat = 0;
961 	asoc->sent_queue_retran_cnt = 0;
962 
963 	/* for CMT */
964 	asoc->last_net_cmt_send_started = NULL;
965 
966 	/* This will need to be adjusted */
967 	asoc->last_acked_seq = asoc->init_seq_number - 1;
968 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
969 	asoc->asconf_seq_in = asoc->last_acked_seq;
970 
971 	/* here we are different, we hold the next one we expect */
972 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
973 
974 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
975 	asoc->initial_rto = m->sctp_ep.initial_rto;
976 
977 	asoc->max_init_times = m->sctp_ep.max_init_times;
978 	asoc->max_send_times = m->sctp_ep.max_send_times;
979 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
980 	asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold;
981 	asoc->free_chunk_cnt = 0;
982 
983 	asoc->iam_blocking = 0;
984 
985 	asoc->context = m->sctp_context;
986 	asoc->def_send = m->def_send;
987 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
988 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
989 	asoc->pr_sctp_cnt = 0;
990 	asoc->total_output_queue_size = 0;
991 
992 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
993 		struct in6pcb *inp6;
994 
995 		/* Its a V6 socket */
996 		inp6 = (struct in6pcb *)m;
997 		asoc->ipv6_addr_legal = 1;
998 		/* Now look at the binding flag to see if V4 will be legal */
999 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1000 			asoc->ipv4_addr_legal = 1;
1001 		} else {
1002 			/* V4 addresses are NOT legal on the association */
1003 			asoc->ipv4_addr_legal = 0;
1004 		}
1005 	} else {
1006 		/* Its a V4 socket, no - V6 */
1007 		asoc->ipv4_addr_legal = 1;
1008 		asoc->ipv6_addr_legal = 0;
1009 	}
1010 
1011 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1012 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1013 
1014 	asoc->smallest_mtu = m->sctp_frag_point;
1015 	asoc->minrto = m->sctp_ep.sctp_minrto;
1016 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1017 
1018 	asoc->locked_on_sending = NULL;
1019 	asoc->stream_locked_on = 0;
1020 	asoc->ecn_echo_cnt_onq = 0;
1021 	asoc->stream_locked = 0;
1022 
1023 	asoc->send_sack = 1;
1024 
1025 	LIST_INIT(&asoc->sctp_restricted_addrs);
1026 
1027 	TAILQ_INIT(&asoc->nets);
1028 	TAILQ_INIT(&asoc->pending_reply_queue);
1029 	TAILQ_INIT(&asoc->asconf_ack_sent);
1030 	/* Setup to fill the hb random cache at first HB */
1031 	asoc->hb_random_idx = 4;
1032 
1033 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1034 
1035 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1036 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1037 
1038 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1039 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1040 
1041 	/*
1042 	 * Now the stream parameters, here we allocate space for all streams
1043 	 * that we request by default.
1044 	 */
1045 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1046 	    m->sctp_ep.pre_open_stream_count;
1047 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1048 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1049 	    SCTP_M_STRMO);
1050 	if (asoc->strmout == NULL) {
1051 		/* big trouble no memory */
1052 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1053 		return (ENOMEM);
1054 	}
1055 	for (i = 0; i < asoc->streamoutcnt; i++) {
1056 		/*
1057 		 * inbound side must be set to 0xffff, also NOTE when we get
1058 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1059 		 * count (streamoutcnt) but first check if we sent to any of
1060 		 * the upper streams that were dropped (if some were). Those
1061 		 * that were dropped must be notified to the upper layer as
1062 		 * failed to send.
1063 		 */
1064 		asoc->strmout[i].next_sequence_sent = 0x0;
1065 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1066 		asoc->strmout[i].stream_no = i;
1067 		asoc->strmout[i].last_msg_incomplete = 0;
1068 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1069 	}
1070 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1071 
1072 	/* Now the mapping array */
1073 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1074 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1075 	    SCTP_M_MAP);
1076 	if (asoc->mapping_array == NULL) {
1077 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1078 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1079 		return (ENOMEM);
1080 	}
1081 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1082 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1083 	    SCTP_M_MAP);
1084 	if (asoc->nr_mapping_array == NULL) {
1085 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1086 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1087 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1088 		return (ENOMEM);
1089 	}
1090 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1091 
1092 	/* Now the init of the other outqueues */
1093 	TAILQ_INIT(&asoc->free_chunks);
1094 	TAILQ_INIT(&asoc->control_send_queue);
1095 	TAILQ_INIT(&asoc->asconf_send_queue);
1096 	TAILQ_INIT(&asoc->send_queue);
1097 	TAILQ_INIT(&asoc->sent_queue);
1098 	TAILQ_INIT(&asoc->reasmqueue);
1099 	TAILQ_INIT(&asoc->resetHead);
1100 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1101 	TAILQ_INIT(&asoc->asconf_queue);
1102 	/* authentication fields */
1103 	asoc->authinfo.random = NULL;
1104 	asoc->authinfo.active_keyid = 0;
1105 	asoc->authinfo.assoc_key = NULL;
1106 	asoc->authinfo.assoc_keyid = 0;
1107 	asoc->authinfo.recv_key = NULL;
1108 	asoc->authinfo.recv_keyid = 0;
1109 	LIST_INIT(&asoc->shared_keys);
1110 	asoc->marked_retrans = 0;
1111 	asoc->port = m->sctp_ep.port;
1112 	asoc->timoinit = 0;
1113 	asoc->timodata = 0;
1114 	asoc->timosack = 0;
1115 	asoc->timoshutdown = 0;
1116 	asoc->timoheartbeat = 0;
1117 	asoc->timocookie = 0;
1118 	asoc->timoshutdownack = 0;
1119 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1120 	asoc->discontinuity_time = asoc->start_time;
1121 	/*
1122 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1123 	 * freed later when the association is freed.
1124 	 */
1125 	return (0);
1126 }
1127 
1128 void
1129 sctp_print_mapping_array(struct sctp_association *asoc)
1130 {
1131 	unsigned int i, limit;
1132 
1133 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1134 	    asoc->mapping_array_size,
1135 	    asoc->mapping_array_base_tsn,
1136 	    asoc->cumulative_tsn,
1137 	    asoc->highest_tsn_inside_map,
1138 	    asoc->highest_tsn_inside_nr_map);
1139 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1140 		if (asoc->mapping_array[limit - 1]) {
1141 			break;
1142 		}
1143 	}
1144 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1145 	for (i = 0; i < limit; i++) {
1146 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1147 	}
1148 	if (limit % 16)
1149 		printf("\n");
1150 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1151 		if (asoc->nr_mapping_array[limit - 1]) {
1152 			break;
1153 		}
1154 	}
1155 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1156 	for (i = 0; i < limit; i++) {
1157 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1158 	}
1159 	if (limit % 16)
1160 		printf("\n");
1161 }
1162 
1163 int
1164 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1165 {
1166 	/* mapping array needs to grow */
1167 	uint8_t *new_array1, *new_array2;
1168 	uint32_t new_size;
1169 
1170 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1171 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1172 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1173 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1174 		/* can't get more, forget it */
1175 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1176 		if (new_array1) {
1177 			SCTP_FREE(new_array1, SCTP_M_MAP);
1178 		}
1179 		if (new_array2) {
1180 			SCTP_FREE(new_array2, SCTP_M_MAP);
1181 		}
1182 		return (-1);
1183 	}
1184 	memset(new_array1, 0, new_size);
1185 	memset(new_array2, 0, new_size);
1186 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1187 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1188 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1189 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1190 	asoc->mapping_array = new_array1;
1191 	asoc->nr_mapping_array = new_array2;
1192 	asoc->mapping_array_size = new_size;
1193 	return (0);
1194 }
1195 
1196 
1197 static void
1198 sctp_iterator_work(struct sctp_iterator *it)
1199 {
1200 	int iteration_count = 0;
1201 	int inp_skip = 0;
1202 	int first_in = 1;
1203 	struct sctp_inpcb *tinp;
1204 
1205 	SCTP_INP_INFO_RLOCK();
1206 	SCTP_ITERATOR_LOCK();
1207 	if (it->inp) {
1208 		SCTP_INP_RLOCK(it->inp);
1209 		SCTP_INP_DECR_REF(it->inp);
1210 	}
1211 	if (it->inp == NULL) {
1212 		/* iterator is complete */
1213 done_with_iterator:
1214 		SCTP_ITERATOR_UNLOCK();
1215 		SCTP_INP_INFO_RUNLOCK();
1216 		if (it->function_atend != NULL) {
1217 			(*it->function_atend) (it->pointer, it->val);
1218 		}
1219 		SCTP_FREE(it, SCTP_M_ITER);
1220 		return;
1221 	}
1222 select_a_new_ep:
1223 	if (first_in) {
1224 		first_in = 0;
1225 	} else {
1226 		SCTP_INP_RLOCK(it->inp);
1227 	}
1228 	while (((it->pcb_flags) &&
1229 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1230 	    ((it->pcb_features) &&
1231 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1232 		/* endpoint flags or features don't match, so keep looking */
1233 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1234 			SCTP_INP_RUNLOCK(it->inp);
1235 			goto done_with_iterator;
1236 		}
1237 		tinp = it->inp;
1238 		it->inp = LIST_NEXT(it->inp, sctp_list);
1239 		SCTP_INP_RUNLOCK(tinp);
1240 		if (it->inp == NULL) {
1241 			goto done_with_iterator;
1242 		}
1243 		SCTP_INP_RLOCK(it->inp);
1244 	}
1245 	/* now go through each assoc which is in the desired state */
1246 	if (it->done_current_ep == 0) {
1247 		if (it->function_inp != NULL)
1248 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1249 		it->done_current_ep = 1;
1250 	}
1251 	if (it->stcb == NULL) {
1252 		/* run the per instance function */
1253 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1254 	}
1255 	if ((inp_skip) || it->stcb == NULL) {
1256 		if (it->function_inp_end != NULL) {
1257 			inp_skip = (*it->function_inp_end) (it->inp,
1258 			    it->pointer,
1259 			    it->val);
1260 		}
1261 		SCTP_INP_RUNLOCK(it->inp);
1262 		goto no_stcb;
1263 	}
1264 	while (it->stcb) {
1265 		SCTP_TCB_LOCK(it->stcb);
1266 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1267 			/* not in the right state... keep looking */
1268 			SCTP_TCB_UNLOCK(it->stcb);
1269 			goto next_assoc;
1270 		}
1271 		/* see if we have limited out the iterator loop */
1272 		iteration_count++;
1273 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1274 			/* Pause to let others grab the lock */
1275 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1276 			SCTP_TCB_UNLOCK(it->stcb);
1277 			SCTP_INP_INCR_REF(it->inp);
1278 			SCTP_INP_RUNLOCK(it->inp);
1279 			SCTP_ITERATOR_UNLOCK();
1280 			SCTP_INP_INFO_RUNLOCK();
1281 			SCTP_INP_INFO_RLOCK();
1282 			SCTP_ITERATOR_LOCK();
1283 			if (sctp_it_ctl.iterator_flags) {
1284 				/* We won't be staying here */
1285 				SCTP_INP_DECR_REF(it->inp);
1286 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1287 				if (sctp_it_ctl.iterator_flags &
1288 				    SCTP_ITERATOR_STOP_CUR_IT) {
1289 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1290 					goto done_with_iterator;
1291 				}
1292 				if (sctp_it_ctl.iterator_flags &
1293 				    SCTP_ITERATOR_STOP_CUR_INP) {
1294 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1295 					goto no_stcb;
1296 				}
1297 				/* If we reach here huh? */
1298 				printf("Unknown it ctl flag %x\n",
1299 				    sctp_it_ctl.iterator_flags);
1300 				sctp_it_ctl.iterator_flags = 0;
1301 			}
1302 			SCTP_INP_RLOCK(it->inp);
1303 			SCTP_INP_DECR_REF(it->inp);
1304 			SCTP_TCB_LOCK(it->stcb);
1305 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1306 			iteration_count = 0;
1307 		}
1308 		/* run function on this one */
1309 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1310 
1311 		/*
1312 		 * we lie here, it really needs to have its own type but
1313 		 * first I must verify that this won't effect things :-0
1314 		 */
1315 		if (it->no_chunk_output == 0)
1316 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1317 
1318 		SCTP_TCB_UNLOCK(it->stcb);
1319 next_assoc:
1320 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1321 		if (it->stcb == NULL) {
1322 			/* Run last function */
1323 			if (it->function_inp_end != NULL) {
1324 				inp_skip = (*it->function_inp_end) (it->inp,
1325 				    it->pointer,
1326 				    it->val);
1327 			}
1328 		}
1329 	}
1330 	SCTP_INP_RUNLOCK(it->inp);
1331 no_stcb:
1332 	/* done with all assocs on this endpoint, move on to next endpoint */
1333 	it->done_current_ep = 0;
1334 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1335 		it->inp = NULL;
1336 	} else {
1337 		it->inp = LIST_NEXT(it->inp, sctp_list);
1338 	}
1339 	if (it->inp == NULL) {
1340 		goto done_with_iterator;
1341 	}
1342 	goto select_a_new_ep;
1343 }
1344 
1345 void
1346 sctp_iterator_worker(void)
1347 {
1348 	struct sctp_iterator *it, *nit;
1349 
1350 	/* This function is called with the WQ lock in place */
1351 
1352 	sctp_it_ctl.iterator_running = 1;
1353 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1354 		sctp_it_ctl.cur_it = it;
1355 		/* now lets work on this one */
1356 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1357 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1358 		CURVNET_SET(it->vn);
1359 		sctp_iterator_work(it);
1360 		sctp_it_ctl.cur_it = NULL;
1361 		CURVNET_RESTORE();
1362 		SCTP_IPI_ITERATOR_WQ_LOCK();
1363 		/* sa_ignore FREED_MEMORY */
1364 	}
1365 	sctp_it_ctl.iterator_running = 0;
1366 	return;
1367 }
1368 
1369 
1370 static void
1371 sctp_handle_addr_wq(void)
1372 {
1373 	/* deal with the ADDR wq from the rtsock calls */
1374 	struct sctp_laddr *wi, *nwi;
1375 	struct sctp_asconf_iterator *asc;
1376 
1377 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1378 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1379 	if (asc == NULL) {
1380 		/* Try later, no memory */
1381 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1382 		    (struct sctp_inpcb *)NULL,
1383 		    (struct sctp_tcb *)NULL,
1384 		    (struct sctp_nets *)NULL);
1385 		return;
1386 	}
1387 	LIST_INIT(&asc->list_of_work);
1388 	asc->cnt = 0;
1389 
1390 	SCTP_WQ_ADDR_LOCK();
1391 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1392 		LIST_REMOVE(wi, sctp_nxt_addr);
1393 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1394 		asc->cnt++;
1395 	}
1396 	SCTP_WQ_ADDR_UNLOCK();
1397 
1398 	if (asc->cnt == 0) {
1399 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1400 	} else {
1401 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1402 		    sctp_asconf_iterator_stcb,
1403 		    NULL,	/* No ep end for boundall */
1404 		    SCTP_PCB_FLAGS_BOUNDALL,
1405 		    SCTP_PCB_ANY_FEATURES,
1406 		    SCTP_ASOC_ANY_STATE,
1407 		    (void *)asc, 0,
1408 		    sctp_asconf_iterator_end, NULL, 0);
1409 	}
1410 }
1411 
1412 int retcode = 0;
1413 int cur_oerr = 0;
1414 
1415 void
1416 sctp_timeout_handler(void *t)
1417 {
1418 	struct sctp_inpcb *inp;
1419 	struct sctp_tcb *stcb;
1420 	struct sctp_nets *net;
1421 	struct sctp_timer *tmr;
1422 
1423 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1424 	struct socket *so;
1425 
1426 #endif
1427 	int did_output, type;
1428 
1429 	tmr = (struct sctp_timer *)t;
1430 	inp = (struct sctp_inpcb *)tmr->ep;
1431 	stcb = (struct sctp_tcb *)tmr->tcb;
1432 	net = (struct sctp_nets *)tmr->net;
1433 	CURVNET_SET((struct vnet *)tmr->vnet);
1434 	did_output = 1;
1435 
1436 #ifdef SCTP_AUDITING_ENABLED
1437 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1438 	sctp_auditing(3, inp, stcb, net);
1439 #endif
1440 
1441 	/* sanity checks... */
1442 	if (tmr->self != (void *)tmr) {
1443 		/*
1444 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1445 		 * tmr);
1446 		 */
1447 		CURVNET_RESTORE();
1448 		return;
1449 	}
1450 	tmr->stopped_from = 0xa001;
1451 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1452 		/*
1453 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1454 		 * tmr->type);
1455 		 */
1456 		CURVNET_RESTORE();
1457 		return;
1458 	}
1459 	tmr->stopped_from = 0xa002;
1460 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1461 		CURVNET_RESTORE();
1462 		return;
1463 	}
1464 	/* if this is an iterator timeout, get the struct and clear inp */
1465 	tmr->stopped_from = 0xa003;
1466 	type = tmr->type;
1467 	if (inp) {
1468 		SCTP_INP_INCR_REF(inp);
1469 		if ((inp->sctp_socket == 0) &&
1470 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1471 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1472 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1473 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1474 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1475 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1476 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1477 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1478 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1479 		    ) {
1480 			SCTP_INP_DECR_REF(inp);
1481 			CURVNET_RESTORE();
1482 			return;
1483 		}
1484 	}
1485 	tmr->stopped_from = 0xa004;
1486 	if (stcb) {
1487 		atomic_add_int(&stcb->asoc.refcnt, 1);
1488 		if (stcb->asoc.state == 0) {
1489 			atomic_add_int(&stcb->asoc.refcnt, -1);
1490 			if (inp) {
1491 				SCTP_INP_DECR_REF(inp);
1492 			}
1493 			CURVNET_RESTORE();
1494 			return;
1495 		}
1496 	}
1497 	tmr->stopped_from = 0xa005;
1498 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1499 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1500 		if (inp) {
1501 			SCTP_INP_DECR_REF(inp);
1502 		}
1503 		if (stcb) {
1504 			atomic_add_int(&stcb->asoc.refcnt, -1);
1505 		}
1506 		CURVNET_RESTORE();
1507 		return;
1508 	}
1509 	tmr->stopped_from = 0xa006;
1510 
1511 	if (stcb) {
1512 		SCTP_TCB_LOCK(stcb);
1513 		atomic_add_int(&stcb->asoc.refcnt, -1);
1514 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1515 		    ((stcb->asoc.state == 0) ||
1516 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1517 			SCTP_TCB_UNLOCK(stcb);
1518 			if (inp) {
1519 				SCTP_INP_DECR_REF(inp);
1520 			}
1521 			CURVNET_RESTORE();
1522 			return;
1523 		}
1524 	}
1525 	/* record in stopped what t-o occured */
1526 	tmr->stopped_from = tmr->type;
1527 
1528 	/* mark as being serviced now */
1529 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1530 		/*
1531 		 * Callout has been rescheduled.
1532 		 */
1533 		goto get_out;
1534 	}
1535 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1536 		/*
1537 		 * Not active, so no action.
1538 		 */
1539 		goto get_out;
1540 	}
1541 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1542 
1543 	/* call the handler for the appropriate timer type */
1544 	switch (tmr->type) {
1545 	case SCTP_TIMER_TYPE_ZERO_COPY:
1546 		if (inp == NULL) {
1547 			break;
1548 		}
1549 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1550 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1551 		}
1552 		break;
1553 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1554 		if (inp == NULL) {
1555 			break;
1556 		}
1557 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1558 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1559 		}
1560 		break;
1561 	case SCTP_TIMER_TYPE_ADDR_WQ:
1562 		sctp_handle_addr_wq();
1563 		break;
1564 	case SCTP_TIMER_TYPE_SEND:
1565 		if ((stcb == NULL) || (inp == NULL)) {
1566 			break;
1567 		}
1568 		SCTP_STAT_INCR(sctps_timodata);
1569 		stcb->asoc.timodata++;
1570 		stcb->asoc.num_send_timers_up--;
1571 		if (stcb->asoc.num_send_timers_up < 0) {
1572 			stcb->asoc.num_send_timers_up = 0;
1573 		}
1574 		SCTP_TCB_LOCK_ASSERT(stcb);
1575 		cur_oerr = stcb->asoc.overall_error_count;
1576 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1577 		if (retcode) {
1578 			/* no need to unlock on tcb its gone */
1579 
1580 			goto out_decr;
1581 		}
1582 		SCTP_TCB_LOCK_ASSERT(stcb);
1583 #ifdef SCTP_AUDITING_ENABLED
1584 		sctp_auditing(4, inp, stcb, net);
1585 #endif
1586 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1587 		if ((stcb->asoc.num_send_timers_up == 0) &&
1588 		    (stcb->asoc.sent_queue_cnt > 0)) {
1589 			struct sctp_tmit_chunk *chk;
1590 
1591 			/*
1592 			 * safeguard. If there on some on the sent queue
1593 			 * somewhere but no timers running something is
1594 			 * wrong... so we start a timer on the first chunk
1595 			 * on the send queue on whatever net it is sent to.
1596 			 */
1597 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1598 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1599 			    chk->whoTo);
1600 		}
1601 		break;
1602 	case SCTP_TIMER_TYPE_INIT:
1603 		if ((stcb == NULL) || (inp == NULL)) {
1604 			break;
1605 		}
1606 		SCTP_STAT_INCR(sctps_timoinit);
1607 		stcb->asoc.timoinit++;
1608 		if (sctp_t1init_timer(inp, stcb, net)) {
1609 			/* no need to unlock on tcb its gone */
1610 			goto out_decr;
1611 		}
1612 		/* We do output but not here */
1613 		did_output = 0;
1614 		break;
1615 	case SCTP_TIMER_TYPE_RECV:
1616 		if ((stcb == NULL) || (inp == NULL)) {
1617 			break;
1618 		}
1619 		SCTP_STAT_INCR(sctps_timosack);
1620 		stcb->asoc.timosack++;
1621 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1622 #ifdef SCTP_AUDITING_ENABLED
1623 		sctp_auditing(4, inp, stcb, net);
1624 #endif
1625 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1626 		break;
1627 	case SCTP_TIMER_TYPE_SHUTDOWN:
1628 		if ((stcb == NULL) || (inp == NULL)) {
1629 			break;
1630 		}
1631 		if (sctp_shutdown_timer(inp, stcb, net)) {
1632 			/* no need to unlock on tcb its gone */
1633 			goto out_decr;
1634 		}
1635 		SCTP_STAT_INCR(sctps_timoshutdown);
1636 		stcb->asoc.timoshutdown++;
1637 #ifdef SCTP_AUDITING_ENABLED
1638 		sctp_auditing(4, inp, stcb, net);
1639 #endif
1640 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1641 		break;
1642 	case SCTP_TIMER_TYPE_HEARTBEAT:
1643 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1644 			break;
1645 		}
1646 		SCTP_STAT_INCR(sctps_timoheartbeat);
1647 		stcb->asoc.timoheartbeat++;
1648 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1649 			/* no need to unlock on tcb its gone */
1650 			goto out_decr;
1651 		}
1652 #ifdef SCTP_AUDITING_ENABLED
1653 		sctp_auditing(4, inp, stcb, net);
1654 #endif
1655 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1656 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1657 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1658 		}
1659 		break;
1660 	case SCTP_TIMER_TYPE_COOKIE:
1661 		if ((stcb == NULL) || (inp == NULL)) {
1662 			break;
1663 		}
1664 		if (sctp_cookie_timer(inp, stcb, net)) {
1665 			/* no need to unlock on tcb its gone */
1666 			goto out_decr;
1667 		}
1668 		SCTP_STAT_INCR(sctps_timocookie);
1669 		stcb->asoc.timocookie++;
1670 #ifdef SCTP_AUDITING_ENABLED
1671 		sctp_auditing(4, inp, stcb, net);
1672 #endif
1673 		/*
1674 		 * We consider T3 and Cookie timer pretty much the same with
1675 		 * respect to where from in chunk_output.
1676 		 */
1677 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1678 		break;
1679 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1680 		{
1681 			struct timeval tv;
1682 			int i, secret;
1683 
1684 			if (inp == NULL) {
1685 				break;
1686 			}
1687 			SCTP_STAT_INCR(sctps_timosecret);
1688 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1689 			SCTP_INP_WLOCK(inp);
1690 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1691 			inp->sctp_ep.last_secret_number =
1692 			    inp->sctp_ep.current_secret_number;
1693 			inp->sctp_ep.current_secret_number++;
1694 			if (inp->sctp_ep.current_secret_number >=
1695 			    SCTP_HOW_MANY_SECRETS) {
1696 				inp->sctp_ep.current_secret_number = 0;
1697 			}
1698 			secret = (int)inp->sctp_ep.current_secret_number;
1699 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1700 				inp->sctp_ep.secret_key[secret][i] =
1701 				    sctp_select_initial_TSN(&inp->sctp_ep);
1702 			}
1703 			SCTP_INP_WUNLOCK(inp);
1704 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1705 		}
1706 		did_output = 0;
1707 		break;
1708 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1709 		if ((stcb == NULL) || (inp == NULL)) {
1710 			break;
1711 		}
1712 		SCTP_STAT_INCR(sctps_timopathmtu);
1713 		sctp_pathmtu_timer(inp, stcb, net);
1714 		did_output = 0;
1715 		break;
1716 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1717 		if ((stcb == NULL) || (inp == NULL)) {
1718 			break;
1719 		}
1720 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1721 			/* no need to unlock on tcb its gone */
1722 			goto out_decr;
1723 		}
1724 		SCTP_STAT_INCR(sctps_timoshutdownack);
1725 		stcb->asoc.timoshutdownack++;
1726 #ifdef SCTP_AUDITING_ENABLED
1727 		sctp_auditing(4, inp, stcb, net);
1728 #endif
1729 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1730 		break;
1731 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1732 		if ((stcb == NULL) || (inp == NULL)) {
1733 			break;
1734 		}
1735 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1736 		sctp_abort_an_association(inp, stcb,
1737 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1738 		/* no need to unlock on tcb its gone */
1739 		goto out_decr;
1740 
1741 	case SCTP_TIMER_TYPE_STRRESET:
1742 		if ((stcb == NULL) || (inp == NULL)) {
1743 			break;
1744 		}
1745 		if (sctp_strreset_timer(inp, stcb, net)) {
1746 			/* no need to unlock on tcb its gone */
1747 			goto out_decr;
1748 		}
1749 		SCTP_STAT_INCR(sctps_timostrmrst);
1750 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1751 		break;
1752 	case SCTP_TIMER_TYPE_ASCONF:
1753 		if ((stcb == NULL) || (inp == NULL)) {
1754 			break;
1755 		}
1756 		if (sctp_asconf_timer(inp, stcb, net)) {
1757 			/* no need to unlock on tcb its gone */
1758 			goto out_decr;
1759 		}
1760 		SCTP_STAT_INCR(sctps_timoasconf);
1761 #ifdef SCTP_AUDITING_ENABLED
1762 		sctp_auditing(4, inp, stcb, net);
1763 #endif
1764 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1765 		break;
1766 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1767 		if ((stcb == NULL) || (inp == NULL)) {
1768 			break;
1769 		}
1770 		sctp_delete_prim_timer(inp, stcb, net);
1771 		SCTP_STAT_INCR(sctps_timodelprim);
1772 		break;
1773 
1774 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1775 		if ((stcb == NULL) || (inp == NULL)) {
1776 			break;
1777 		}
1778 		SCTP_STAT_INCR(sctps_timoautoclose);
1779 		sctp_autoclose_timer(inp, stcb, net);
1780 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1781 		did_output = 0;
1782 		break;
1783 	case SCTP_TIMER_TYPE_ASOCKILL:
1784 		if ((stcb == NULL) || (inp == NULL)) {
1785 			break;
1786 		}
1787 		SCTP_STAT_INCR(sctps_timoassockill);
1788 		/* Can we free it yet? */
1789 		SCTP_INP_DECR_REF(inp);
1790 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1791 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1792 		so = SCTP_INP_SO(inp);
1793 		atomic_add_int(&stcb->asoc.refcnt, 1);
1794 		SCTP_TCB_UNLOCK(stcb);
1795 		SCTP_SOCKET_LOCK(so, 1);
1796 		SCTP_TCB_LOCK(stcb);
1797 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1798 #endif
1799 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1800 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1801 		SCTP_SOCKET_UNLOCK(so, 1);
1802 #endif
1803 		/*
1804 		 * free asoc, always unlocks (or destroy's) so prevent
1805 		 * duplicate unlock or unlock of a free mtx :-0
1806 		 */
1807 		stcb = NULL;
1808 		goto out_no_decr;
1809 	case SCTP_TIMER_TYPE_INPKILL:
1810 		SCTP_STAT_INCR(sctps_timoinpkill);
1811 		if (inp == NULL) {
1812 			break;
1813 		}
1814 		/*
1815 		 * special case, take away our increment since WE are the
1816 		 * killer
1817 		 */
1818 		SCTP_INP_DECR_REF(inp);
1819 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1820 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1821 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1822 		inp = NULL;
1823 		goto out_no_decr;
1824 	default:
1825 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1826 		    tmr->type);
1827 		break;
1828 	};
1829 #ifdef SCTP_AUDITING_ENABLED
1830 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1831 	if (inp)
1832 		sctp_auditing(5, inp, stcb, net);
1833 #endif
1834 	if ((did_output) && stcb) {
1835 		/*
1836 		 * Now we need to clean up the control chunk chain if an
1837 		 * ECNE is on it. It must be marked as UNSENT again so next
1838 		 * call will continue to send it until such time that we get
1839 		 * a CWR, to remove it. It is, however, less likely that we
1840 		 * will find a ecn echo on the chain though.
1841 		 */
1842 		sctp_fix_ecn_echo(&stcb->asoc);
1843 	}
1844 get_out:
1845 	if (stcb) {
1846 		SCTP_TCB_UNLOCK(stcb);
1847 	}
1848 out_decr:
1849 	if (inp) {
1850 		SCTP_INP_DECR_REF(inp);
1851 	}
1852 out_no_decr:
1853 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1854 	    type);
1855 	CURVNET_RESTORE();
1856 }
1857 
1858 void
1859 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1860     struct sctp_nets *net)
1861 {
1862 	uint32_t to_ticks;
1863 	struct sctp_timer *tmr;
1864 
1865 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1866 		return;
1867 
1868 	to_ticks = 0;
1869 
1870 	tmr = NULL;
1871 	if (stcb) {
1872 		SCTP_TCB_LOCK_ASSERT(stcb);
1873 	}
1874 	switch (t_type) {
1875 	case SCTP_TIMER_TYPE_ZERO_COPY:
1876 		tmr = &inp->sctp_ep.zero_copy_timer;
1877 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1878 		break;
1879 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1880 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1881 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1882 		break;
1883 	case SCTP_TIMER_TYPE_ADDR_WQ:
1884 		/* Only 1 tick away :-) */
1885 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1886 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1887 		break;
1888 	case SCTP_TIMER_TYPE_SEND:
1889 		/* Here we use the RTO timer */
1890 		{
1891 			int rto_val;
1892 
1893 			if ((stcb == NULL) || (net == NULL)) {
1894 				return;
1895 			}
1896 			tmr = &net->rxt_timer;
1897 			if (net->RTO == 0) {
1898 				rto_val = stcb->asoc.initial_rto;
1899 			} else {
1900 				rto_val = net->RTO;
1901 			}
1902 			to_ticks = MSEC_TO_TICKS(rto_val);
1903 		}
1904 		break;
1905 	case SCTP_TIMER_TYPE_INIT:
1906 		/*
1907 		 * Here we use the INIT timer default usually about 1
1908 		 * minute.
1909 		 */
1910 		if ((stcb == NULL) || (net == NULL)) {
1911 			return;
1912 		}
1913 		tmr = &net->rxt_timer;
1914 		if (net->RTO == 0) {
1915 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1916 		} else {
1917 			to_ticks = MSEC_TO_TICKS(net->RTO);
1918 		}
1919 		break;
1920 	case SCTP_TIMER_TYPE_RECV:
1921 		/*
1922 		 * Here we use the Delayed-Ack timer value from the inp
1923 		 * ususually about 200ms.
1924 		 */
1925 		if (stcb == NULL) {
1926 			return;
1927 		}
1928 		tmr = &stcb->asoc.dack_timer;
1929 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1930 		break;
1931 	case SCTP_TIMER_TYPE_SHUTDOWN:
1932 		/* Here we use the RTO of the destination. */
1933 		if ((stcb == NULL) || (net == NULL)) {
1934 			return;
1935 		}
1936 		if (net->RTO == 0) {
1937 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1938 		} else {
1939 			to_ticks = MSEC_TO_TICKS(net->RTO);
1940 		}
1941 		tmr = &net->rxt_timer;
1942 		break;
1943 	case SCTP_TIMER_TYPE_HEARTBEAT:
1944 		/*
1945 		 * the net is used here so that we can add in the RTO. Even
1946 		 * though we use a different timer. We also add the HB timer
1947 		 * PLUS a random jitter.
1948 		 */
1949 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1950 			return;
1951 		} else {
1952 			uint32_t rndval;
1953 			uint32_t jitter;
1954 
1955 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1956 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1957 				return;
1958 			}
1959 			if (net->RTO == 0) {
1960 				to_ticks = stcb->asoc.initial_rto;
1961 			} else {
1962 				to_ticks = net->RTO;
1963 			}
1964 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1965 			jitter = rndval % to_ticks;
1966 			if (jitter >= (to_ticks >> 1)) {
1967 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1968 			} else {
1969 				to_ticks = to_ticks - jitter;
1970 			}
1971 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1972 			    !(net->dest_state & SCTP_ADDR_PF)) {
1973 				to_ticks += net->heart_beat_delay;
1974 			}
1975 			/*
1976 			 * Now we must convert the to_ticks that are now in
1977 			 * ms to ticks.
1978 			 */
1979 			to_ticks = MSEC_TO_TICKS(to_ticks);
1980 			tmr = &net->hb_timer;
1981 		}
1982 		break;
1983 	case SCTP_TIMER_TYPE_COOKIE:
1984 		/*
1985 		 * Here we can use the RTO timer from the network since one
1986 		 * RTT was compelete. If a retran happened then we will be
1987 		 * using the RTO initial value.
1988 		 */
1989 		if ((stcb == NULL) || (net == NULL)) {
1990 			return;
1991 		}
1992 		if (net->RTO == 0) {
1993 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1994 		} else {
1995 			to_ticks = MSEC_TO_TICKS(net->RTO);
1996 		}
1997 		tmr = &net->rxt_timer;
1998 		break;
1999 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2000 		/*
2001 		 * nothing needed but the endpoint here ususually about 60
2002 		 * minutes.
2003 		 */
2004 		if (inp == NULL) {
2005 			return;
2006 		}
2007 		tmr = &inp->sctp_ep.signature_change;
2008 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2009 		break;
2010 	case SCTP_TIMER_TYPE_ASOCKILL:
2011 		if (stcb == NULL) {
2012 			return;
2013 		}
2014 		tmr = &stcb->asoc.strreset_timer;
2015 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2016 		break;
2017 	case SCTP_TIMER_TYPE_INPKILL:
2018 		/*
2019 		 * The inp is setup to die. We re-use the signature_chage
2020 		 * timer since that has stopped and we are in the GONE
2021 		 * state.
2022 		 */
2023 		if (inp == NULL) {
2024 			return;
2025 		}
2026 		tmr = &inp->sctp_ep.signature_change;
2027 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2028 		break;
2029 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2030 		/*
2031 		 * Here we use the value found in the EP for PMTU ususually
2032 		 * about 10 minutes.
2033 		 */
2034 		if ((stcb == NULL) || (inp == NULL)) {
2035 			return;
2036 		}
2037 		if (net == NULL) {
2038 			return;
2039 		}
2040 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2041 			return;
2042 		}
2043 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2044 		tmr = &net->pmtu_timer;
2045 		break;
2046 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2047 		/* Here we use the RTO of the destination */
2048 		if ((stcb == NULL) || (net == NULL)) {
2049 			return;
2050 		}
2051 		if (net->RTO == 0) {
2052 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2053 		} else {
2054 			to_ticks = MSEC_TO_TICKS(net->RTO);
2055 		}
2056 		tmr = &net->rxt_timer;
2057 		break;
2058 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2059 		/*
2060 		 * Here we use the endpoints shutdown guard timer usually
2061 		 * about 3 minutes.
2062 		 */
2063 		if ((inp == NULL) || (stcb == NULL)) {
2064 			return;
2065 		}
2066 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2067 		tmr = &stcb->asoc.shut_guard_timer;
2068 		break;
2069 	case SCTP_TIMER_TYPE_STRRESET:
2070 		/*
2071 		 * Here the timer comes from the stcb but its value is from
2072 		 * the net's RTO.
2073 		 */
2074 		if ((stcb == NULL) || (net == NULL)) {
2075 			return;
2076 		}
2077 		if (net->RTO == 0) {
2078 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2079 		} else {
2080 			to_ticks = MSEC_TO_TICKS(net->RTO);
2081 		}
2082 		tmr = &stcb->asoc.strreset_timer;
2083 		break;
2084 	case SCTP_TIMER_TYPE_ASCONF:
2085 		/*
2086 		 * Here the timer comes from the stcb but its value is from
2087 		 * the net's RTO.
2088 		 */
2089 		if ((stcb == NULL) || (net == NULL)) {
2090 			return;
2091 		}
2092 		if (net->RTO == 0) {
2093 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2094 		} else {
2095 			to_ticks = MSEC_TO_TICKS(net->RTO);
2096 		}
2097 		tmr = &stcb->asoc.asconf_timer;
2098 		break;
2099 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2100 		if ((stcb == NULL) || (net != NULL)) {
2101 			return;
2102 		}
2103 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2104 		tmr = &stcb->asoc.delete_prim_timer;
2105 		break;
2106 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2107 		if (stcb == NULL) {
2108 			return;
2109 		}
2110 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2111 			/*
2112 			 * Really an error since stcb is NOT set to
2113 			 * autoclose
2114 			 */
2115 			return;
2116 		}
2117 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2118 		tmr = &stcb->asoc.autoclose_timer;
2119 		break;
2120 	default:
2121 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2122 		    __FUNCTION__, t_type);
2123 		return;
2124 		break;
2125 	};
2126 	if ((to_ticks <= 0) || (tmr == NULL)) {
2127 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2128 		    __FUNCTION__, t_type, to_ticks, tmr);
2129 		return;
2130 	}
2131 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2132 		/*
2133 		 * we do NOT allow you to have it already running. if it is
2134 		 * we leave the current one up unchanged
2135 		 */
2136 		return;
2137 	}
2138 	/* At this point we can proceed */
2139 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2140 		stcb->asoc.num_send_timers_up++;
2141 	}
2142 	tmr->stopped_from = 0;
2143 	tmr->type = t_type;
2144 	tmr->ep = (void *)inp;
2145 	tmr->tcb = (void *)stcb;
2146 	tmr->net = (void *)net;
2147 	tmr->self = (void *)tmr;
2148 	tmr->vnet = (void *)curvnet;
2149 	tmr->ticks = sctp_get_tick_count();
2150 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2151 	return;
2152 }
2153 
2154 void
2155 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2156     struct sctp_nets *net, uint32_t from)
2157 {
2158 	struct sctp_timer *tmr;
2159 
2160 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2161 	    (inp == NULL))
2162 		return;
2163 
2164 	tmr = NULL;
2165 	if (stcb) {
2166 		SCTP_TCB_LOCK_ASSERT(stcb);
2167 	}
2168 	switch (t_type) {
2169 	case SCTP_TIMER_TYPE_ZERO_COPY:
2170 		tmr = &inp->sctp_ep.zero_copy_timer;
2171 		break;
2172 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2173 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2174 		break;
2175 	case SCTP_TIMER_TYPE_ADDR_WQ:
2176 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2177 		break;
2178 	case SCTP_TIMER_TYPE_SEND:
2179 		if ((stcb == NULL) || (net == NULL)) {
2180 			return;
2181 		}
2182 		tmr = &net->rxt_timer;
2183 		break;
2184 	case SCTP_TIMER_TYPE_INIT:
2185 		if ((stcb == NULL) || (net == NULL)) {
2186 			return;
2187 		}
2188 		tmr = &net->rxt_timer;
2189 		break;
2190 	case SCTP_TIMER_TYPE_RECV:
2191 		if (stcb == NULL) {
2192 			return;
2193 		}
2194 		tmr = &stcb->asoc.dack_timer;
2195 		break;
2196 	case SCTP_TIMER_TYPE_SHUTDOWN:
2197 		if ((stcb == NULL) || (net == NULL)) {
2198 			return;
2199 		}
2200 		tmr = &net->rxt_timer;
2201 		break;
2202 	case SCTP_TIMER_TYPE_HEARTBEAT:
2203 		if ((stcb == NULL) || (net == NULL)) {
2204 			return;
2205 		}
2206 		tmr = &net->hb_timer;
2207 		break;
2208 	case SCTP_TIMER_TYPE_COOKIE:
2209 		if ((stcb == NULL) || (net == NULL)) {
2210 			return;
2211 		}
2212 		tmr = &net->rxt_timer;
2213 		break;
2214 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2215 		/* nothing needed but the endpoint here */
2216 		tmr = &inp->sctp_ep.signature_change;
2217 		/*
2218 		 * We re-use the newcookie timer for the INP kill timer. We
2219 		 * must assure that we do not kill it by accident.
2220 		 */
2221 		break;
2222 	case SCTP_TIMER_TYPE_ASOCKILL:
2223 		/*
2224 		 * Stop the asoc kill timer.
2225 		 */
2226 		if (stcb == NULL) {
2227 			return;
2228 		}
2229 		tmr = &stcb->asoc.strreset_timer;
2230 		break;
2231 
2232 	case SCTP_TIMER_TYPE_INPKILL:
2233 		/*
2234 		 * The inp is setup to die. We re-use the signature_chage
2235 		 * timer since that has stopped and we are in the GONE
2236 		 * state.
2237 		 */
2238 		tmr = &inp->sctp_ep.signature_change;
2239 		break;
2240 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2241 		if ((stcb == NULL) || (net == NULL)) {
2242 			return;
2243 		}
2244 		tmr = &net->pmtu_timer;
2245 		break;
2246 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2247 		if ((stcb == NULL) || (net == NULL)) {
2248 			return;
2249 		}
2250 		tmr = &net->rxt_timer;
2251 		break;
2252 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2253 		if (stcb == NULL) {
2254 			return;
2255 		}
2256 		tmr = &stcb->asoc.shut_guard_timer;
2257 		break;
2258 	case SCTP_TIMER_TYPE_STRRESET:
2259 		if (stcb == NULL) {
2260 			return;
2261 		}
2262 		tmr = &stcb->asoc.strreset_timer;
2263 		break;
2264 	case SCTP_TIMER_TYPE_ASCONF:
2265 		if (stcb == NULL) {
2266 			return;
2267 		}
2268 		tmr = &stcb->asoc.asconf_timer;
2269 		break;
2270 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2271 		if (stcb == NULL) {
2272 			return;
2273 		}
2274 		tmr = &stcb->asoc.delete_prim_timer;
2275 		break;
2276 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2277 		if (stcb == NULL) {
2278 			return;
2279 		}
2280 		tmr = &stcb->asoc.autoclose_timer;
2281 		break;
2282 	default:
2283 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2284 		    __FUNCTION__, t_type);
2285 		break;
2286 	};
2287 	if (tmr == NULL) {
2288 		return;
2289 	}
2290 	if ((tmr->type != t_type) && tmr->type) {
2291 		/*
2292 		 * Ok we have a timer that is under joint use. Cookie timer
2293 		 * per chance with the SEND timer. We therefore are NOT
2294 		 * running the timer that the caller wants stopped.  So just
2295 		 * return.
2296 		 */
2297 		return;
2298 	}
2299 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2300 		stcb->asoc.num_send_timers_up--;
2301 		if (stcb->asoc.num_send_timers_up < 0) {
2302 			stcb->asoc.num_send_timers_up = 0;
2303 		}
2304 	}
2305 	tmr->self = NULL;
2306 	tmr->stopped_from = from;
2307 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2308 	return;
2309 }
2310 
2311 uint32_t
2312 sctp_calculate_len(struct mbuf *m)
2313 {
2314 	uint32_t tlen = 0;
2315 	struct mbuf *at;
2316 
2317 	at = m;
2318 	while (at) {
2319 		tlen += SCTP_BUF_LEN(at);
2320 		at = SCTP_BUF_NEXT(at);
2321 	}
2322 	return (tlen);
2323 }
2324 
2325 void
2326 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2327     struct sctp_association *asoc, uint32_t mtu)
2328 {
2329 	/*
2330 	 * Reset the P-MTU size on this association, this involves changing
2331 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2332 	 * allow the DF flag to be cleared.
2333 	 */
2334 	struct sctp_tmit_chunk *chk;
2335 	unsigned int eff_mtu, ovh;
2336 
2337 	asoc->smallest_mtu = mtu;
2338 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2339 		ovh = SCTP_MIN_OVERHEAD;
2340 	} else {
2341 		ovh = SCTP_MIN_V4_OVERHEAD;
2342 	}
2343 	eff_mtu = mtu - ovh;
2344 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2345 		if (chk->send_size > eff_mtu) {
2346 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2347 		}
2348 	}
2349 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2350 		if (chk->send_size > eff_mtu) {
2351 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2352 		}
2353 	}
2354 }
2355 
2356 
2357 /*
2358  * given an association and starting time of the current RTT period return
2359  * RTO in number of msecs net should point to the current network
2360  */
2361 
2362 uint32_t
2363 sctp_calculate_rto(struct sctp_tcb *stcb,
2364     struct sctp_association *asoc,
2365     struct sctp_nets *net,
2366     struct timeval *told,
2367     int safe, int rtt_from_sack)
2368 {
2369 	/*-
2370 	 * given an association and the starting time of the current RTT
2371 	 * period (in value1/value2) return RTO in number of msecs.
2372 	 */
2373 	int32_t rtt;		/* RTT in ms */
2374 	uint32_t new_rto;
2375 	int first_measure = 0;
2376 	struct timeval now, then, *old;
2377 
2378 	/* Copy it out for sparc64 */
2379 	if (safe == sctp_align_unsafe_makecopy) {
2380 		old = &then;
2381 		memcpy(&then, told, sizeof(struct timeval));
2382 	} else if (safe == sctp_align_safe_nocopy) {
2383 		old = told;
2384 	} else {
2385 		/* error */
2386 		SCTP_PRINTF("Huh, bad rto calc call\n");
2387 		return (0);
2388 	}
2389 	/************************/
2390 	/* 1. calculate new RTT */
2391 	/************************/
2392 	/* get the current time */
2393 	if (stcb->asoc.use_precise_time) {
2394 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2395 	} else {
2396 		(void)SCTP_GETTIME_TIMEVAL(&now);
2397 	}
2398 	timevalsub(&now, old);
2399 	/* store the current RTT in us */
2400 	net->rtt = (uint64_t) 10000000 *(uint64_t) now.tv_sec +
2401 	         (uint64_t) now.tv_usec;
2402 
2403 	/* computer rtt in ms */
2404 	rtt = net->rtt / 1000;
2405 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2406 		/*
2407 		 * Tell the CC module that a new update has just occurred
2408 		 * from a sack
2409 		 */
2410 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2411 	}
2412 	/*
2413 	 * Do we need to determine the lan? We do this only on sacks i.e.
2414 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2415 	 */
2416 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2417 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2418 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2419 			net->lan_type = SCTP_LAN_INTERNET;
2420 		} else {
2421 			net->lan_type = SCTP_LAN_LOCAL;
2422 		}
2423 	}
2424 	/***************************/
2425 	/* 2. update RTTVAR & SRTT */
2426 	/***************************/
2427 	/*-
2428 	 * Compute the scaled average lastsa and the
2429 	 * scaled variance lastsv as described in van Jacobson
2430 	 * Paper "Congestion Avoidance and Control", Annex A.
2431 	 *
2432 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2433 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2434 	 */
2435 	if (net->RTO_measured) {
2436 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2437 		net->lastsa += rtt;
2438 		if (rtt < 0) {
2439 			rtt = -rtt;
2440 		}
2441 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2442 		net->lastsv += rtt;
2443 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2444 			rto_logging(net, SCTP_LOG_RTTVAR);
2445 		}
2446 	} else {
2447 		/* First RTO measurment */
2448 		net->RTO_measured = 1;
2449 		first_measure = 1;
2450 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2451 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2452 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2453 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2454 		}
2455 	}
2456 	if (net->lastsv == 0) {
2457 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2458 	}
2459 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2460 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2461 	    (stcb->asoc.sat_network_lockout == 0)) {
2462 		stcb->asoc.sat_network = 1;
2463 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2464 		stcb->asoc.sat_network = 0;
2465 		stcb->asoc.sat_network_lockout = 1;
2466 	}
2467 	/* bound it, per C6/C7 in Section 5.3.1 */
2468 	if (new_rto < stcb->asoc.minrto) {
2469 		new_rto = stcb->asoc.minrto;
2470 	}
2471 	if (new_rto > stcb->asoc.maxrto) {
2472 		new_rto = stcb->asoc.maxrto;
2473 	}
2474 	/* we are now returning the RTO */
2475 	return (new_rto);
2476 }
2477 
2478 /*
2479  * return a pointer to a contiguous piece of data from the given mbuf chain
2480  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2481  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2482  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2483  */
2484 caddr_t
2485 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2486 {
2487 	uint32_t count;
2488 	uint8_t *ptr;
2489 
2490 	ptr = in_ptr;
2491 	if ((off < 0) || (len <= 0))
2492 		return (NULL);
2493 
2494 	/* find the desired start location */
2495 	while ((m != NULL) && (off > 0)) {
2496 		if (off < SCTP_BUF_LEN(m))
2497 			break;
2498 		off -= SCTP_BUF_LEN(m);
2499 		m = SCTP_BUF_NEXT(m);
2500 	}
2501 	if (m == NULL)
2502 		return (NULL);
2503 
2504 	/* is the current mbuf large enough (eg. contiguous)? */
2505 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2506 		return (mtod(m, caddr_t)+off);
2507 	} else {
2508 		/* else, it spans more than one mbuf, so save a temp copy... */
2509 		while ((m != NULL) && (len > 0)) {
2510 			count = min(SCTP_BUF_LEN(m) - off, len);
2511 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2512 			len -= count;
2513 			ptr += count;
2514 			off = 0;
2515 			m = SCTP_BUF_NEXT(m);
2516 		}
2517 		if ((m == NULL) && (len > 0))
2518 			return (NULL);
2519 		else
2520 			return ((caddr_t)in_ptr);
2521 	}
2522 }
2523 
2524 
2525 
2526 struct sctp_paramhdr *
2527 sctp_get_next_param(struct mbuf *m,
2528     int offset,
2529     struct sctp_paramhdr *pull,
2530     int pull_limit)
2531 {
2532 	/* This just provides a typed signature to Peter's Pull routine */
2533 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2534 	    (uint8_t *) pull));
2535 }
2536 
2537 
2538 int
2539 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2540 {
2541 	/*
2542 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2543 	 * padlen is > 3 this routine will fail.
2544 	 */
2545 	uint8_t *dp;
2546 	int i;
2547 
2548 	if (padlen > 3) {
2549 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2550 		return (ENOBUFS);
2551 	}
2552 	if (padlen <= M_TRAILINGSPACE(m)) {
2553 		/*
2554 		 * The easy way. We hope the majority of the time we hit
2555 		 * here :)
2556 		 */
2557 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2558 		SCTP_BUF_LEN(m) += padlen;
2559 	} else {
2560 		/* Hard way we must grow the mbuf */
2561 		struct mbuf *tmp;
2562 
2563 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2564 		if (tmp == NULL) {
2565 			/* Out of space GAK! we are in big trouble. */
2566 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2567 			return (ENOSPC);
2568 		}
2569 		/* setup and insert in middle */
2570 		SCTP_BUF_LEN(tmp) = padlen;
2571 		SCTP_BUF_NEXT(tmp) = NULL;
2572 		SCTP_BUF_NEXT(m) = tmp;
2573 		dp = mtod(tmp, uint8_t *);
2574 	}
2575 	/* zero out the pad */
2576 	for (i = 0; i < padlen; i++) {
2577 		*dp = 0;
2578 		dp++;
2579 	}
2580 	return (0);
2581 }
2582 
2583 int
2584 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2585 {
2586 	/* find the last mbuf in chain and pad it */
2587 	struct mbuf *m_at;
2588 
2589 	m_at = m;
2590 	if (last_mbuf) {
2591 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2592 	} else {
2593 		while (m_at) {
2594 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2595 				return (sctp_add_pad_tombuf(m_at, padval));
2596 			}
2597 			m_at = SCTP_BUF_NEXT(m_at);
2598 		}
2599 	}
2600 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2601 	return (EFAULT);
2602 }
2603 
2604 static void
2605 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2606     uint32_t error, int so_locked
2607 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2608     SCTP_UNUSED
2609 #endif
2610 )
2611 {
2612 	struct mbuf *m_notify;
2613 	struct sctp_assoc_change *sac;
2614 	struct sctp_queued_to_read *control;
2615 
2616 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2617 	struct socket *so;
2618 
2619 #endif
2620 
2621 	/*
2622 	 * For TCP model AND UDP connected sockets we will send an error up
2623 	 * when an ABORT comes in.
2624 	 */
2625 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2626 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2627 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2628 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2629 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2630 			stcb->sctp_socket->so_error = ECONNREFUSED;
2631 		} else {
2632 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2633 			stcb->sctp_socket->so_error = ECONNRESET;
2634 		}
2635 		/* Wake ANY sleepers */
2636 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2637 		so = SCTP_INP_SO(stcb->sctp_ep);
2638 		if (!so_locked) {
2639 			atomic_add_int(&stcb->asoc.refcnt, 1);
2640 			SCTP_TCB_UNLOCK(stcb);
2641 			SCTP_SOCKET_LOCK(so, 1);
2642 			SCTP_TCB_LOCK(stcb);
2643 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2644 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2645 				SCTP_SOCKET_UNLOCK(so, 1);
2646 				return;
2647 			}
2648 		}
2649 #endif
2650 		socantrcvmore(stcb->sctp_socket);
2651 		sorwakeup(stcb->sctp_socket);
2652 		sowwakeup(stcb->sctp_socket);
2653 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2654 		if (!so_locked) {
2655 			SCTP_SOCKET_UNLOCK(so, 1);
2656 		}
2657 #endif
2658 	}
2659 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2660 		/* event not enabled */
2661 		return;
2662 	}
2663 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2664 	if (m_notify == NULL)
2665 		/* no space left */
2666 		return;
2667 	SCTP_BUF_LEN(m_notify) = 0;
2668 
2669 	sac = mtod(m_notify, struct sctp_assoc_change *);
2670 	sac->sac_type = SCTP_ASSOC_CHANGE;
2671 	sac->sac_flags = 0;
2672 	sac->sac_length = sizeof(struct sctp_assoc_change);
2673 	sac->sac_state = event;
2674 	sac->sac_error = error;
2675 	/* XXX verify these stream counts */
2676 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2677 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2678 	sac->sac_assoc_id = sctp_get_associd(stcb);
2679 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2680 	SCTP_BUF_NEXT(m_notify) = NULL;
2681 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2682 	    0, 0, stcb->asoc.context, 0, 0, 0,
2683 	    m_notify);
2684 	if (control == NULL) {
2685 		/* no memory */
2686 		sctp_m_freem(m_notify);
2687 		return;
2688 	}
2689 	control->length = SCTP_BUF_LEN(m_notify);
2690 	/* not that we need this */
2691 	control->tail_mbuf = m_notify;
2692 	control->spec_flags = M_NOTIFICATION;
2693 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2694 	    control,
2695 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2696 	    so_locked);
2697 	if (event == SCTP_COMM_LOST) {
2698 		/* Wake up any sleeper */
2699 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2700 		so = SCTP_INP_SO(stcb->sctp_ep);
2701 		if (!so_locked) {
2702 			atomic_add_int(&stcb->asoc.refcnt, 1);
2703 			SCTP_TCB_UNLOCK(stcb);
2704 			SCTP_SOCKET_LOCK(so, 1);
2705 			SCTP_TCB_LOCK(stcb);
2706 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2707 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2708 				SCTP_SOCKET_UNLOCK(so, 1);
2709 				return;
2710 			}
2711 		}
2712 #endif
2713 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2714 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2715 		if (!so_locked) {
2716 			SCTP_SOCKET_UNLOCK(so, 1);
2717 		}
2718 #endif
2719 	}
2720 }
2721 
2722 static void
2723 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2724     struct sockaddr *sa, uint32_t error)
2725 {
2726 	struct mbuf *m_notify;
2727 	struct sctp_paddr_change *spc;
2728 	struct sctp_queued_to_read *control;
2729 
2730 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2731 		/* event not enabled */
2732 		return;
2733 	}
2734 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2735 	if (m_notify == NULL)
2736 		return;
2737 	SCTP_BUF_LEN(m_notify) = 0;
2738 	spc = mtod(m_notify, struct sctp_paddr_change *);
2739 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2740 	spc->spc_flags = 0;
2741 	spc->spc_length = sizeof(struct sctp_paddr_change);
2742 	switch (sa->sa_family) {
2743 #ifdef INET
2744 	case AF_INET:
2745 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2746 		break;
2747 #endif
2748 #ifdef INET6
2749 	case AF_INET6:
2750 		{
2751 			struct sockaddr_in6 *sin6;
2752 
2753 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2754 
2755 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2756 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2757 				if (sin6->sin6_scope_id == 0) {
2758 					/* recover scope_id for user */
2759 					(void)sa6_recoverscope(sin6);
2760 				} else {
2761 					/* clear embedded scope_id for user */
2762 					in6_clearscope(&sin6->sin6_addr);
2763 				}
2764 			}
2765 			break;
2766 		}
2767 #endif
2768 	default:
2769 		/* TSNH */
2770 		break;
2771 	}
2772 	spc->spc_state = state;
2773 	spc->spc_error = error;
2774 	spc->spc_assoc_id = sctp_get_associd(stcb);
2775 
2776 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2777 	SCTP_BUF_NEXT(m_notify) = NULL;
2778 
2779 	/* append to socket */
2780 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2781 	    0, 0, stcb->asoc.context, 0, 0, 0,
2782 	    m_notify);
2783 	if (control == NULL) {
2784 		/* no memory */
2785 		sctp_m_freem(m_notify);
2786 		return;
2787 	}
2788 	control->length = SCTP_BUF_LEN(m_notify);
2789 	control->spec_flags = M_NOTIFICATION;
2790 	/* not that we need this */
2791 	control->tail_mbuf = m_notify;
2792 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2793 	    control,
2794 	    &stcb->sctp_socket->so_rcv, 1,
2795 	    SCTP_READ_LOCK_NOT_HELD,
2796 	    SCTP_SO_NOT_LOCKED);
2797 }
2798 
2799 
2800 static void
2801 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2802     struct sctp_tmit_chunk *chk, int so_locked
2803 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2804     SCTP_UNUSED
2805 #endif
2806 )
2807 {
2808 	struct mbuf *m_notify;
2809 	struct sctp_send_failed *ssf;
2810 	struct sctp_queued_to_read *control;
2811 	int length;
2812 
2813 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2814 		/* event not enabled */
2815 		return;
2816 	}
2817 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2818 	if (m_notify == NULL)
2819 		/* no space left */
2820 		return;
2821 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2822 	length -= sizeof(struct sctp_data_chunk);
2823 	SCTP_BUF_LEN(m_notify) = 0;
2824 	ssf = mtod(m_notify, struct sctp_send_failed *);
2825 	ssf->ssf_type = SCTP_SEND_FAILED;
2826 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2827 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2828 	else
2829 		ssf->ssf_flags = SCTP_DATA_SENT;
2830 	ssf->ssf_length = length;
2831 	ssf->ssf_error = error;
2832 	/* not exactly what the user sent in, but should be close :) */
2833 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2834 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2835 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2836 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2837 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2838 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2839 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2840 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2841 
2842 	if (chk->data) {
2843 		/*
2844 		 * trim off the sctp chunk header(it should be there)
2845 		 */
2846 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2847 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2848 			sctp_mbuf_crush(chk->data);
2849 			chk->send_size -= sizeof(struct sctp_data_chunk);
2850 		}
2851 	}
2852 	SCTP_BUF_NEXT(m_notify) = chk->data;
2853 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2854 	/* Steal off the mbuf */
2855 	chk->data = NULL;
2856 	/*
2857 	 * For this case, we check the actual socket buffer, since the assoc
2858 	 * is going away we don't want to overfill the socket buffer for a
2859 	 * non-reader
2860 	 */
2861 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2862 		sctp_m_freem(m_notify);
2863 		return;
2864 	}
2865 	/* append to socket */
2866 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2867 	    0, 0, stcb->asoc.context, 0, 0, 0,
2868 	    m_notify);
2869 	if (control == NULL) {
2870 		/* no memory */
2871 		sctp_m_freem(m_notify);
2872 		return;
2873 	}
2874 	control->spec_flags = M_NOTIFICATION;
2875 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2876 	    control,
2877 	    &stcb->sctp_socket->so_rcv, 1,
2878 	    SCTP_READ_LOCK_NOT_HELD,
2879 	    so_locked);
2880 }
2881 
2882 
2883 static void
2884 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2885     struct sctp_stream_queue_pending *sp, int so_locked
2886 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2887     SCTP_UNUSED
2888 #endif
2889 )
2890 {
2891 	struct mbuf *m_notify;
2892 	struct sctp_send_failed *ssf;
2893 	struct sctp_queued_to_read *control;
2894 	int length;
2895 
2896 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2897 		/* event not enabled */
2898 		return;
2899 	}
2900 	length = sizeof(struct sctp_send_failed) + sp->length;
2901 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2902 	if (m_notify == NULL)
2903 		/* no space left */
2904 		return;
2905 	SCTP_BUF_LEN(m_notify) = 0;
2906 	ssf = mtod(m_notify, struct sctp_send_failed *);
2907 	ssf->ssf_type = SCTP_SEND_FAILED;
2908 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2909 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2910 	else
2911 		ssf->ssf_flags = SCTP_DATA_SENT;
2912 	ssf->ssf_length = length;
2913 	ssf->ssf_error = error;
2914 	/* not exactly what the user sent in, but should be close :) */
2915 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2916 	ssf->ssf_info.sinfo_stream = sp->stream;
2917 	ssf->ssf_info.sinfo_ssn = sp->strseq;
2918 	if (sp->some_taken) {
2919 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2920 	} else {
2921 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2922 	}
2923 	ssf->ssf_info.sinfo_ppid = sp->ppid;
2924 	ssf->ssf_info.sinfo_context = sp->context;
2925 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2926 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2927 	SCTP_BUF_NEXT(m_notify) = sp->data;
2928 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2929 
2930 	/* Steal off the mbuf */
2931 	sp->data = NULL;
2932 	/*
2933 	 * For this case, we check the actual socket buffer, since the assoc
2934 	 * is going away we don't want to overfill the socket buffer for a
2935 	 * non-reader
2936 	 */
2937 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2938 		sctp_m_freem(m_notify);
2939 		return;
2940 	}
2941 	/* append to socket */
2942 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2943 	    0, 0, stcb->asoc.context, 0, 0, 0,
2944 	    m_notify);
2945 	if (control == NULL) {
2946 		/* no memory */
2947 		sctp_m_freem(m_notify);
2948 		return;
2949 	}
2950 	control->spec_flags = M_NOTIFICATION;
2951 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2952 	    control,
2953 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
2954 }
2955 
2956 
2957 
2958 static void
2959 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
2960 {
2961 	struct mbuf *m_notify;
2962 	struct sctp_adaptation_event *sai;
2963 	struct sctp_queued_to_read *control;
2964 
2965 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
2966 		/* event not enabled */
2967 		return;
2968 	}
2969 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
2970 	if (m_notify == NULL)
2971 		/* no space left */
2972 		return;
2973 	SCTP_BUF_LEN(m_notify) = 0;
2974 	sai = mtod(m_notify, struct sctp_adaptation_event *);
2975 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
2976 	sai->sai_flags = 0;
2977 	sai->sai_length = sizeof(struct sctp_adaptation_event);
2978 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
2979 	sai->sai_assoc_id = sctp_get_associd(stcb);
2980 
2981 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
2982 	SCTP_BUF_NEXT(m_notify) = NULL;
2983 
2984 	/* append to socket */
2985 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2986 	    0, 0, stcb->asoc.context, 0, 0, 0,
2987 	    m_notify);
2988 	if (control == NULL) {
2989 		/* no memory */
2990 		sctp_m_freem(m_notify);
2991 		return;
2992 	}
2993 	control->length = SCTP_BUF_LEN(m_notify);
2994 	control->spec_flags = M_NOTIFICATION;
2995 	/* not that we need this */
2996 	control->tail_mbuf = m_notify;
2997 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2998 	    control,
2999 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3000 }
3001 
3002 /* This always must be called with the read-queue LOCKED in the INP */
3003 static void
3004 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3005     uint32_t val, int so_locked
3006 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3007     SCTP_UNUSED
3008 #endif
3009 )
3010 {
3011 	struct mbuf *m_notify;
3012 	struct sctp_pdapi_event *pdapi;
3013 	struct sctp_queued_to_read *control;
3014 	struct sockbuf *sb;
3015 
3016 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3017 		/* event not enabled */
3018 		return;
3019 	}
3020 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3021 		return;
3022 	}
3023 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3024 	if (m_notify == NULL)
3025 		/* no space left */
3026 		return;
3027 	SCTP_BUF_LEN(m_notify) = 0;
3028 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3029 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3030 	pdapi->pdapi_flags = 0;
3031 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3032 	pdapi->pdapi_indication = error;
3033 	pdapi->pdapi_stream = (val >> 16);
3034 	pdapi->pdapi_seq = (val & 0x0000ffff);
3035 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3036 
3037 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3038 	SCTP_BUF_NEXT(m_notify) = NULL;
3039 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3040 	    0, 0, stcb->asoc.context, 0, 0, 0,
3041 	    m_notify);
3042 	if (control == NULL) {
3043 		/* no memory */
3044 		sctp_m_freem(m_notify);
3045 		return;
3046 	}
3047 	control->spec_flags = M_NOTIFICATION;
3048 	control->length = SCTP_BUF_LEN(m_notify);
3049 	/* not that we need this */
3050 	control->tail_mbuf = m_notify;
3051 	control->held_length = 0;
3052 	control->length = 0;
3053 	sb = &stcb->sctp_socket->so_rcv;
3054 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3055 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3056 	}
3057 	sctp_sballoc(stcb, sb, m_notify);
3058 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3059 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3060 	}
3061 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3062 	control->end_added = 1;
3063 	if (stcb->asoc.control_pdapi)
3064 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3065 	else {
3066 		/* we really should not see this case */
3067 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3068 	}
3069 	if (stcb->sctp_ep && stcb->sctp_socket) {
3070 		/* This should always be the case */
3071 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3072 		struct socket *so;
3073 
3074 		so = SCTP_INP_SO(stcb->sctp_ep);
3075 		if (!so_locked) {
3076 			atomic_add_int(&stcb->asoc.refcnt, 1);
3077 			SCTP_TCB_UNLOCK(stcb);
3078 			SCTP_SOCKET_LOCK(so, 1);
3079 			SCTP_TCB_LOCK(stcb);
3080 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3081 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3082 				SCTP_SOCKET_UNLOCK(so, 1);
3083 				return;
3084 			}
3085 		}
3086 #endif
3087 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3088 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3089 		if (!so_locked) {
3090 			SCTP_SOCKET_UNLOCK(so, 1);
3091 		}
3092 #endif
3093 	}
3094 }
3095 
3096 static void
3097 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3098 {
3099 	struct mbuf *m_notify;
3100 	struct sctp_shutdown_event *sse;
3101 	struct sctp_queued_to_read *control;
3102 
3103 	/*
3104 	 * For TCP model AND UDP connected sockets we will send an error up
3105 	 * when an SHUTDOWN completes
3106 	 */
3107 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3108 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3109 		/* mark socket closed for read/write and wakeup! */
3110 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3111 		struct socket *so;
3112 
3113 		so = SCTP_INP_SO(stcb->sctp_ep);
3114 		atomic_add_int(&stcb->asoc.refcnt, 1);
3115 		SCTP_TCB_UNLOCK(stcb);
3116 		SCTP_SOCKET_LOCK(so, 1);
3117 		SCTP_TCB_LOCK(stcb);
3118 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3119 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3120 			SCTP_SOCKET_UNLOCK(so, 1);
3121 			return;
3122 		}
3123 #endif
3124 		socantsendmore(stcb->sctp_socket);
3125 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3126 		SCTP_SOCKET_UNLOCK(so, 1);
3127 #endif
3128 	}
3129 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3130 		/* event not enabled */
3131 		return;
3132 	}
3133 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3134 	if (m_notify == NULL)
3135 		/* no space left */
3136 		return;
3137 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3138 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3139 	sse->sse_flags = 0;
3140 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3141 	sse->sse_assoc_id = sctp_get_associd(stcb);
3142 
3143 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3144 	SCTP_BUF_NEXT(m_notify) = NULL;
3145 
3146 	/* append to socket */
3147 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3148 	    0, 0, stcb->asoc.context, 0, 0, 0,
3149 	    m_notify);
3150 	if (control == NULL) {
3151 		/* no memory */
3152 		sctp_m_freem(m_notify);
3153 		return;
3154 	}
3155 	control->spec_flags = M_NOTIFICATION;
3156 	control->length = SCTP_BUF_LEN(m_notify);
3157 	/* not that we need this */
3158 	control->tail_mbuf = m_notify;
3159 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3160 	    control,
3161 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3162 }
3163 
3164 static void
3165 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3166     int so_locked
3167 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3168     SCTP_UNUSED
3169 #endif
3170 )
3171 {
3172 	struct mbuf *m_notify;
3173 	struct sctp_sender_dry_event *event;
3174 	struct sctp_queued_to_read *control;
3175 
3176 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3177 		/* event not enabled */
3178 		return;
3179 	}
3180 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3181 	if (m_notify == NULL) {
3182 		/* no space left */
3183 		return;
3184 	}
3185 	SCTP_BUF_LEN(m_notify) = 0;
3186 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3187 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3188 	event->sender_dry_flags = 0;
3189 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3190 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3191 
3192 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3193 	SCTP_BUF_NEXT(m_notify) = NULL;
3194 
3195 	/* append to socket */
3196 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3197 	    0, 0, stcb->asoc.context, 0, 0, 0,
3198 	    m_notify);
3199 	if (control == NULL) {
3200 		/* no memory */
3201 		sctp_m_freem(m_notify);
3202 		return;
3203 	}
3204 	control->length = SCTP_BUF_LEN(m_notify);
3205 	control->spec_flags = M_NOTIFICATION;
3206 	/* not that we need this */
3207 	control->tail_mbuf = m_notify;
3208 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3209 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3210 }
3211 
3212 
3213 static void
3214 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3215 {
3216 	struct mbuf *m_notify;
3217 	struct sctp_queued_to_read *control;
3218 	struct sctp_stream_reset_event *strreset;
3219 	int len;
3220 
3221 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3222 		/* event not enabled */
3223 		return;
3224 	}
3225 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3226 	if (m_notify == NULL)
3227 		/* no space left */
3228 		return;
3229 	SCTP_BUF_LEN(m_notify) = 0;
3230 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3231 	if (len > M_TRAILINGSPACE(m_notify)) {
3232 		/* never enough room */
3233 		sctp_m_freem(m_notify);
3234 		return;
3235 	}
3236 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3237 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3238 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3239 	strreset->strreset_length = len;
3240 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3241 	strreset->strreset_list[0] = number_entries;
3242 
3243 	SCTP_BUF_LEN(m_notify) = len;
3244 	SCTP_BUF_NEXT(m_notify) = NULL;
3245 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3246 		/* no space */
3247 		sctp_m_freem(m_notify);
3248 		return;
3249 	}
3250 	/* append to socket */
3251 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3252 	    0, 0, stcb->asoc.context, 0, 0, 0,
3253 	    m_notify);
3254 	if (control == NULL) {
3255 		/* no memory */
3256 		sctp_m_freem(m_notify);
3257 		return;
3258 	}
3259 	control->spec_flags = M_NOTIFICATION;
3260 	control->length = SCTP_BUF_LEN(m_notify);
3261 	/* not that we need this */
3262 	control->tail_mbuf = m_notify;
3263 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3264 	    control,
3265 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3266 }
3267 
3268 
3269 static void
3270 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3271     int number_entries, uint16_t * list, int flag)
3272 {
3273 	struct mbuf *m_notify;
3274 	struct sctp_queued_to_read *control;
3275 	struct sctp_stream_reset_event *strreset;
3276 	int len;
3277 
3278 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3279 		/* event not enabled */
3280 		return;
3281 	}
3282 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3283 	if (m_notify == NULL)
3284 		/* no space left */
3285 		return;
3286 	SCTP_BUF_LEN(m_notify) = 0;
3287 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3288 	if (len > M_TRAILINGSPACE(m_notify)) {
3289 		/* never enough room */
3290 		sctp_m_freem(m_notify);
3291 		return;
3292 	}
3293 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3294 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3295 	if (number_entries == 0) {
3296 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3297 	} else {
3298 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3299 	}
3300 	strreset->strreset_length = len;
3301 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3302 	if (number_entries) {
3303 		int i;
3304 
3305 		for (i = 0; i < number_entries; i++) {
3306 			strreset->strreset_list[i] = ntohs(list[i]);
3307 		}
3308 	}
3309 	SCTP_BUF_LEN(m_notify) = len;
3310 	SCTP_BUF_NEXT(m_notify) = NULL;
3311 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3312 		/* no space */
3313 		sctp_m_freem(m_notify);
3314 		return;
3315 	}
3316 	/* append to socket */
3317 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3318 	    0, 0, stcb->asoc.context, 0, 0, 0,
3319 	    m_notify);
3320 	if (control == NULL) {
3321 		/* no memory */
3322 		sctp_m_freem(m_notify);
3323 		return;
3324 	}
3325 	control->spec_flags = M_NOTIFICATION;
3326 	control->length = SCTP_BUF_LEN(m_notify);
3327 	/* not that we need this */
3328 	control->tail_mbuf = m_notify;
3329 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3330 	    control,
3331 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3332 }
3333 
3334 
3335 void
3336 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3337     uint32_t error, void *data, int so_locked
3338 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3339     SCTP_UNUSED
3340 #endif
3341 )
3342 {
3343 	if ((stcb == NULL) ||
3344 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3345 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3346 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3347 		/* If the socket is gone we are out of here */
3348 		return;
3349 	}
3350 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3351 		return;
3352 	}
3353 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3354 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3355 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3356 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3357 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3358 			/* Don't report these in front states */
3359 			return;
3360 		}
3361 	}
3362 	switch (notification) {
3363 	case SCTP_NOTIFY_ASSOC_UP:
3364 		if (stcb->asoc.assoc_up_sent == 0) {
3365 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, so_locked);
3366 			stcb->asoc.assoc_up_sent = 1;
3367 		}
3368 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3369 			sctp_notify_adaptation_layer(stcb);
3370 		}
3371 		if (stcb->asoc.peer_supports_auth == 0) {
3372 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3373 			    NULL, so_locked);
3374 		}
3375 		break;
3376 	case SCTP_NOTIFY_ASSOC_DOWN:
3377 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, so_locked);
3378 		break;
3379 	case SCTP_NOTIFY_INTERFACE_DOWN:
3380 		{
3381 			struct sctp_nets *net;
3382 
3383 			net = (struct sctp_nets *)data;
3384 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3385 			    (struct sockaddr *)&net->ro._l_addr, error);
3386 			break;
3387 		}
3388 	case SCTP_NOTIFY_INTERFACE_UP:
3389 		{
3390 			struct sctp_nets *net;
3391 
3392 			net = (struct sctp_nets *)data;
3393 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3394 			    (struct sockaddr *)&net->ro._l_addr, error);
3395 			break;
3396 		}
3397 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3398 		{
3399 			struct sctp_nets *net;
3400 
3401 			net = (struct sctp_nets *)data;
3402 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3403 			    (struct sockaddr *)&net->ro._l_addr, error);
3404 			break;
3405 		}
3406 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3407 		sctp_notify_send_failed2(stcb, error,
3408 		    (struct sctp_stream_queue_pending *)data, so_locked);
3409 		break;
3410 	case SCTP_NOTIFY_DG_FAIL:
3411 		sctp_notify_send_failed(stcb, error,
3412 		    (struct sctp_tmit_chunk *)data, so_locked);
3413 		break;
3414 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3415 		{
3416 			uint32_t val;
3417 
3418 			val = *((uint32_t *) data);
3419 
3420 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3421 			break;
3422 		}
3423 	case SCTP_NOTIFY_STRDATA_ERR:
3424 		break;
3425 	case SCTP_NOTIFY_ASSOC_ABORTED:
3426 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3427 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3428 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, so_locked);
3429 		} else {
3430 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, so_locked);
3431 		}
3432 		break;
3433 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3434 		break;
3435 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3436 		break;
3437 	case SCTP_NOTIFY_ASSOC_RESTART:
3438 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, so_locked);
3439 		if (stcb->asoc.peer_supports_auth == 0) {
3440 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3441 			    NULL, so_locked);
3442 		}
3443 		break;
3444 	case SCTP_NOTIFY_HB_RESP:
3445 		break;
3446 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3447 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3448 		break;
3449 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3450 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3451 		break;
3452 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3453 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3454 		break;
3455 
3456 	case SCTP_NOTIFY_STR_RESET_SEND:
3457 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3458 		break;
3459 	case SCTP_NOTIFY_STR_RESET_RECV:
3460 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3461 		break;
3462 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3463 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3464 		break;
3465 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3466 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3467 		break;
3468 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3469 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3470 		    error);
3471 		break;
3472 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3473 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3474 		    error);
3475 		break;
3476 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3477 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3478 		    error);
3479 		break;
3480 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3481 		break;
3482 	case SCTP_NOTIFY_ASCONF_FAILED:
3483 		break;
3484 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3485 		sctp_notify_shutdown_event(stcb);
3486 		break;
3487 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3488 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3489 		    (uint16_t) (uintptr_t) data,
3490 		    so_locked);
3491 		break;
3492 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3493 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3494 		    (uint16_t) (uintptr_t) data,
3495 		    so_locked);
3496 		break;
3497 	case SCTP_NOTIFY_NO_PEER_AUTH:
3498 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3499 		    (uint16_t) (uintptr_t) data,
3500 		    so_locked);
3501 		break;
3502 	case SCTP_NOTIFY_SENDER_DRY:
3503 		sctp_notify_sender_dry_event(stcb, so_locked);
3504 		break;
3505 	default:
3506 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3507 		    __FUNCTION__, notification, notification);
3508 		break;
3509 	}			/* end switch */
3510 }
3511 
3512 void
3513 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3514 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3515     SCTP_UNUSED
3516 #endif
3517 )
3518 {
3519 	struct sctp_association *asoc;
3520 	struct sctp_stream_out *outs;
3521 	struct sctp_tmit_chunk *chk, *nchk;
3522 	struct sctp_stream_queue_pending *sp, *nsp;
3523 	int i;
3524 
3525 	if (stcb == NULL) {
3526 		return;
3527 	}
3528 	asoc = &stcb->asoc;
3529 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3530 		/* already being freed */
3531 		return;
3532 	}
3533 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3534 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3535 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3536 		return;
3537 	}
3538 	/* now through all the gunk freeing chunks */
3539 	if (holds_lock == 0) {
3540 		SCTP_TCB_SEND_LOCK(stcb);
3541 	}
3542 	/* sent queue SHOULD be empty */
3543 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3544 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3545 		asoc->sent_queue_cnt--;
3546 		if (chk->data != NULL) {
3547 			sctp_free_bufspace(stcb, asoc, chk, 1);
3548 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3549 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3550 			if (chk->data) {
3551 				sctp_m_freem(chk->data);
3552 				chk->data = NULL;
3553 			}
3554 		}
3555 		sctp_free_a_chunk(stcb, chk, so_locked);
3556 		/* sa_ignore FREED_MEMORY */
3557 	}
3558 	/* pending send queue SHOULD be empty */
3559 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3560 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3561 		asoc->send_queue_cnt--;
3562 		if (chk->data != NULL) {
3563 			sctp_free_bufspace(stcb, asoc, chk, 1);
3564 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3565 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3566 			if (chk->data) {
3567 				sctp_m_freem(chk->data);
3568 				chk->data = NULL;
3569 			}
3570 		}
3571 		sctp_free_a_chunk(stcb, chk, so_locked);
3572 		/* sa_ignore FREED_MEMORY */
3573 	}
3574 	for (i = 0; i < asoc->streamoutcnt; i++) {
3575 		/* For each stream */
3576 		outs = &asoc->strmout[i];
3577 		/* clean up any sends there */
3578 		asoc->locked_on_sending = NULL;
3579 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3580 			asoc->stream_queue_cnt--;
3581 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3582 			sctp_free_spbufspace(stcb, asoc, sp);
3583 			if (sp->data) {
3584 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3585 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3586 				if (sp->data) {
3587 					sctp_m_freem(sp->data);
3588 					sp->data = NULL;
3589 				}
3590 			}
3591 			if (sp->net) {
3592 				sctp_free_remote_addr(sp->net);
3593 				sp->net = NULL;
3594 			}
3595 			/* Free the chunk */
3596 			sctp_free_a_strmoq(stcb, sp, so_locked);
3597 			/* sa_ignore FREED_MEMORY */
3598 		}
3599 	}
3600 
3601 	if (holds_lock == 0) {
3602 		SCTP_TCB_SEND_UNLOCK(stcb);
3603 	}
3604 }
3605 
3606 void
3607 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3608 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3609     SCTP_UNUSED
3610 #endif
3611 )
3612 {
3613 	if (stcb == NULL) {
3614 		return;
3615 	}
3616 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3617 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3618 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3619 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3620 	}
3621 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3622 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3623 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3624 		return;
3625 	}
3626 	/* Tell them we lost the asoc */
3627 	sctp_report_all_outbound(stcb, 1, so_locked);
3628 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3629 }
3630 
3631 void
3632 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3633     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3634     uint32_t vrf_id, uint16_t port)
3635 {
3636 	uint32_t vtag;
3637 
3638 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3639 	struct socket *so;
3640 
3641 #endif
3642 
3643 	vtag = 0;
3644 	if (stcb != NULL) {
3645 		/* We have a TCB to abort, send notification too */
3646 		vtag = stcb->asoc.peer_vtag;
3647 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3648 		/* get the assoc vrf id and table id */
3649 		vrf_id = stcb->asoc.vrf_id;
3650 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3651 	}
3652 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3653 	if (stcb != NULL) {
3654 		/* Ok, now lets free it */
3655 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3656 		so = SCTP_INP_SO(inp);
3657 		atomic_add_int(&stcb->asoc.refcnt, 1);
3658 		SCTP_TCB_UNLOCK(stcb);
3659 		SCTP_SOCKET_LOCK(so, 1);
3660 		SCTP_TCB_LOCK(stcb);
3661 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3662 #endif
3663 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3664 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3665 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3666 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3667 		}
3668 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3669 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3670 		SCTP_SOCKET_UNLOCK(so, 1);
3671 #endif
3672 	}
3673 }
3674 
3675 #ifdef SCTP_ASOCLOG_OF_TSNS
3676 void
3677 sctp_print_out_track_log(struct sctp_tcb *stcb)
3678 {
3679 #ifdef NOSIY_PRINTS
3680 	int i;
3681 
3682 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3683 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3684 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3685 		SCTP_PRINTF("None rcvd\n");
3686 		goto none_in;
3687 	}
3688 	if (stcb->asoc.tsn_in_wrapped) {
3689 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3690 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3691 			    stcb->asoc.in_tsnlog[i].tsn,
3692 			    stcb->asoc.in_tsnlog[i].strm,
3693 			    stcb->asoc.in_tsnlog[i].seq,
3694 			    stcb->asoc.in_tsnlog[i].flgs,
3695 			    stcb->asoc.in_tsnlog[i].sz);
3696 		}
3697 	}
3698 	if (stcb->asoc.tsn_in_at) {
3699 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3700 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3701 			    stcb->asoc.in_tsnlog[i].tsn,
3702 			    stcb->asoc.in_tsnlog[i].strm,
3703 			    stcb->asoc.in_tsnlog[i].seq,
3704 			    stcb->asoc.in_tsnlog[i].flgs,
3705 			    stcb->asoc.in_tsnlog[i].sz);
3706 		}
3707 	}
3708 none_in:
3709 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3710 	if ((stcb->asoc.tsn_out_at == 0) &&
3711 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3712 		SCTP_PRINTF("None sent\n");
3713 	}
3714 	if (stcb->asoc.tsn_out_wrapped) {
3715 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3716 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3717 			    stcb->asoc.out_tsnlog[i].tsn,
3718 			    stcb->asoc.out_tsnlog[i].strm,
3719 			    stcb->asoc.out_tsnlog[i].seq,
3720 			    stcb->asoc.out_tsnlog[i].flgs,
3721 			    stcb->asoc.out_tsnlog[i].sz);
3722 		}
3723 	}
3724 	if (stcb->asoc.tsn_out_at) {
3725 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3726 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3727 			    stcb->asoc.out_tsnlog[i].tsn,
3728 			    stcb->asoc.out_tsnlog[i].strm,
3729 			    stcb->asoc.out_tsnlog[i].seq,
3730 			    stcb->asoc.out_tsnlog[i].flgs,
3731 			    stcb->asoc.out_tsnlog[i].sz);
3732 		}
3733 	}
3734 #endif
3735 }
3736 
3737 #endif
3738 
3739 void
3740 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3741     int error, struct mbuf *op_err,
3742     int so_locked
3743 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3744     SCTP_UNUSED
3745 #endif
3746 )
3747 {
3748 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3749 	struct socket *so;
3750 
3751 #endif
3752 
3753 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3754 	so = SCTP_INP_SO(inp);
3755 #endif
3756 	if (stcb == NULL) {
3757 		/* Got to have a TCB */
3758 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3759 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3760 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3761 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3762 			}
3763 		}
3764 		return;
3765 	} else {
3766 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3767 	}
3768 	/* notify the ulp */
3769 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3770 		sctp_abort_notification(stcb, error, so_locked);
3771 	/* notify the peer */
3772 #if defined(SCTP_PANIC_ON_ABORT)
3773 	panic("aborting an association");
3774 #endif
3775 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3776 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3777 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3778 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3779 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3780 	}
3781 	/* now free the asoc */
3782 #ifdef SCTP_ASOCLOG_OF_TSNS
3783 	sctp_print_out_track_log(stcb);
3784 #endif
3785 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3786 	if (!so_locked) {
3787 		atomic_add_int(&stcb->asoc.refcnt, 1);
3788 		SCTP_TCB_UNLOCK(stcb);
3789 		SCTP_SOCKET_LOCK(so, 1);
3790 		SCTP_TCB_LOCK(stcb);
3791 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3792 	}
3793 #endif
3794 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3795 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3796 	if (!so_locked) {
3797 		SCTP_SOCKET_UNLOCK(so, 1);
3798 	}
3799 #endif
3800 }
3801 
3802 void
3803 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3804     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3805 {
3806 	struct sctp_chunkhdr *ch, chunk_buf;
3807 	unsigned int chk_length;
3808 
3809 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3810 	/* Generate a TO address for future reference */
3811 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3812 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3813 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3814 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3815 		}
3816 	}
3817 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3818 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3819 	while (ch != NULL) {
3820 		chk_length = ntohs(ch->chunk_length);
3821 		if (chk_length < sizeof(*ch)) {
3822 			/* break to abort land */
3823 			break;
3824 		}
3825 		switch (ch->chunk_type) {
3826 		case SCTP_COOKIE_ECHO:
3827 			/* We hit here only if the assoc is being freed */
3828 			return;
3829 		case SCTP_PACKET_DROPPED:
3830 			/* we don't respond to pkt-dropped */
3831 			return;
3832 		case SCTP_ABORT_ASSOCIATION:
3833 			/* we don't respond with an ABORT to an ABORT */
3834 			return;
3835 		case SCTP_SHUTDOWN_COMPLETE:
3836 			/*
3837 			 * we ignore it since we are not waiting for it and
3838 			 * peer is gone
3839 			 */
3840 			return;
3841 		case SCTP_SHUTDOWN_ACK:
3842 			sctp_send_shutdown_complete2(m, sh, vrf_id, port);
3843 			return;
3844 		default:
3845 			break;
3846 		}
3847 		offset += SCTP_SIZE32(chk_length);
3848 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3849 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3850 	}
3851 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
3852 }
3853 
3854 /*
3855  * check the inbound datagram to make sure there is not an abort inside it,
3856  * if there is return 1, else return 0.
3857  */
3858 int
3859 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3860 {
3861 	struct sctp_chunkhdr *ch;
3862 	struct sctp_init_chunk *init_chk, chunk_buf;
3863 	int offset;
3864 	unsigned int chk_length;
3865 
3866 	offset = iphlen + sizeof(struct sctphdr);
3867 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3868 	    (uint8_t *) & chunk_buf);
3869 	while (ch != NULL) {
3870 		chk_length = ntohs(ch->chunk_length);
3871 		if (chk_length < sizeof(*ch)) {
3872 			/* packet is probably corrupt */
3873 			break;
3874 		}
3875 		/* we seem to be ok, is it an abort? */
3876 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3877 			/* yep, tell them */
3878 			return (1);
3879 		}
3880 		if (ch->chunk_type == SCTP_INITIATION) {
3881 			/* need to update the Vtag */
3882 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
3883 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
3884 			if (init_chk != NULL) {
3885 				*vtagfill = ntohl(init_chk->init.initiate_tag);
3886 			}
3887 		}
3888 		/* Nope, move to the next chunk */
3889 		offset += SCTP_SIZE32(chk_length);
3890 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3891 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3892 	}
3893 	return (0);
3894 }
3895 
3896 /*
3897  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
3898  * set (i.e. it's 0) so, create this function to compare link local scopes
3899  */
3900 #ifdef INET6
3901 uint32_t
3902 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
3903 {
3904 	struct sockaddr_in6 a, b;
3905 
3906 	/* save copies */
3907 	a = *addr1;
3908 	b = *addr2;
3909 
3910 	if (a.sin6_scope_id == 0)
3911 		if (sa6_recoverscope(&a)) {
3912 			/* can't get scope, so can't match */
3913 			return (0);
3914 		}
3915 	if (b.sin6_scope_id == 0)
3916 		if (sa6_recoverscope(&b)) {
3917 			/* can't get scope, so can't match */
3918 			return (0);
3919 		}
3920 	if (a.sin6_scope_id != b.sin6_scope_id)
3921 		return (0);
3922 
3923 	return (1);
3924 }
3925 
3926 /*
3927  * returns a sockaddr_in6 with embedded scope recovered and removed
3928  */
3929 struct sockaddr_in6 *
3930 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
3931 {
3932 	/* check and strip embedded scope junk */
3933 	if (addr->sin6_family == AF_INET6) {
3934 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
3935 			if (addr->sin6_scope_id == 0) {
3936 				*store = *addr;
3937 				if (!sa6_recoverscope(store)) {
3938 					/* use the recovered scope */
3939 					addr = store;
3940 				}
3941 			} else {
3942 				/* else, return the original "to" addr */
3943 				in6_clearscope(&addr->sin6_addr);
3944 			}
3945 		}
3946 	}
3947 	return (addr);
3948 }
3949 
3950 #endif
3951 
3952 /*
3953  * are the two addresses the same?  currently a "scopeless" check returns: 1
3954  * if same, 0 if not
3955  */
3956 int
3957 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
3958 {
3959 
3960 	/* must be valid */
3961 	if (sa1 == NULL || sa2 == NULL)
3962 		return (0);
3963 
3964 	/* must be the same family */
3965 	if (sa1->sa_family != sa2->sa_family)
3966 		return (0);
3967 
3968 	switch (sa1->sa_family) {
3969 #ifdef INET6
3970 	case AF_INET6:
3971 		{
3972 			/* IPv6 addresses */
3973 			struct sockaddr_in6 *sin6_1, *sin6_2;
3974 
3975 			sin6_1 = (struct sockaddr_in6 *)sa1;
3976 			sin6_2 = (struct sockaddr_in6 *)sa2;
3977 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
3978 			    sin6_2));
3979 		}
3980 #endif
3981 #ifdef INET
3982 	case AF_INET:
3983 		{
3984 			/* IPv4 addresses */
3985 			struct sockaddr_in *sin_1, *sin_2;
3986 
3987 			sin_1 = (struct sockaddr_in *)sa1;
3988 			sin_2 = (struct sockaddr_in *)sa2;
3989 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
3990 		}
3991 #endif
3992 	default:
3993 		/* we don't do these... */
3994 		return (0);
3995 	}
3996 }
3997 
3998 void
3999 sctp_print_address(struct sockaddr *sa)
4000 {
4001 #ifdef INET6
4002 	char ip6buf[INET6_ADDRSTRLEN];
4003 
4004 	ip6buf[0] = 0;
4005 #endif
4006 
4007 	switch (sa->sa_family) {
4008 #ifdef INET6
4009 	case AF_INET6:
4010 		{
4011 			struct sockaddr_in6 *sin6;
4012 
4013 			sin6 = (struct sockaddr_in6 *)sa;
4014 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4015 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4016 			    ntohs(sin6->sin6_port),
4017 			    sin6->sin6_scope_id);
4018 			break;
4019 		}
4020 #endif
4021 #ifdef INET
4022 	case AF_INET:
4023 		{
4024 			struct sockaddr_in *sin;
4025 			unsigned char *p;
4026 
4027 			sin = (struct sockaddr_in *)sa;
4028 			p = (unsigned char *)&sin->sin_addr;
4029 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4030 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4031 			break;
4032 		}
4033 #endif
4034 	default:
4035 		SCTP_PRINTF("?\n");
4036 		break;
4037 	}
4038 }
4039 
4040 void
4041 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4042 {
4043 	switch (iph->ip_v) {
4044 #ifdef INET
4045 	case IPVERSION:
4046 		{
4047 			struct sockaddr_in lsa, fsa;
4048 
4049 			bzero(&lsa, sizeof(lsa));
4050 			lsa.sin_len = sizeof(lsa);
4051 			lsa.sin_family = AF_INET;
4052 			lsa.sin_addr = iph->ip_src;
4053 			lsa.sin_port = sh->src_port;
4054 			bzero(&fsa, sizeof(fsa));
4055 			fsa.sin_len = sizeof(fsa);
4056 			fsa.sin_family = AF_INET;
4057 			fsa.sin_addr = iph->ip_dst;
4058 			fsa.sin_port = sh->dest_port;
4059 			SCTP_PRINTF("src: ");
4060 			sctp_print_address((struct sockaddr *)&lsa);
4061 			SCTP_PRINTF("dest: ");
4062 			sctp_print_address((struct sockaddr *)&fsa);
4063 			break;
4064 		}
4065 #endif
4066 #ifdef INET6
4067 	case IPV6_VERSION >> 4:
4068 		{
4069 			struct ip6_hdr *ip6;
4070 			struct sockaddr_in6 lsa6, fsa6;
4071 
4072 			ip6 = (struct ip6_hdr *)iph;
4073 			bzero(&lsa6, sizeof(lsa6));
4074 			lsa6.sin6_len = sizeof(lsa6);
4075 			lsa6.sin6_family = AF_INET6;
4076 			lsa6.sin6_addr = ip6->ip6_src;
4077 			lsa6.sin6_port = sh->src_port;
4078 			bzero(&fsa6, sizeof(fsa6));
4079 			fsa6.sin6_len = sizeof(fsa6);
4080 			fsa6.sin6_family = AF_INET6;
4081 			fsa6.sin6_addr = ip6->ip6_dst;
4082 			fsa6.sin6_port = sh->dest_port;
4083 			SCTP_PRINTF("src: ");
4084 			sctp_print_address((struct sockaddr *)&lsa6);
4085 			SCTP_PRINTF("dest: ");
4086 			sctp_print_address((struct sockaddr *)&fsa6);
4087 			break;
4088 		}
4089 #endif
4090 	default:
4091 		/* TSNH */
4092 		break;
4093 	}
4094 }
4095 
4096 void
4097 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4098     struct sctp_inpcb *new_inp,
4099     struct sctp_tcb *stcb,
4100     int waitflags)
4101 {
4102 	/*
4103 	 * go through our old INP and pull off any control structures that
4104 	 * belong to stcb and move then to the new inp.
4105 	 */
4106 	struct socket *old_so, *new_so;
4107 	struct sctp_queued_to_read *control, *nctl;
4108 	struct sctp_readhead tmp_queue;
4109 	struct mbuf *m;
4110 	int error = 0;
4111 
4112 	old_so = old_inp->sctp_socket;
4113 	new_so = new_inp->sctp_socket;
4114 	TAILQ_INIT(&tmp_queue);
4115 	error = sblock(&old_so->so_rcv, waitflags);
4116 	if (error) {
4117 		/*
4118 		 * Gak, can't get sblock, we have a problem. data will be
4119 		 * left stranded.. and we don't dare look at it since the
4120 		 * other thread may be reading something. Oh well, its a
4121 		 * screwed up app that does a peeloff OR a accept while
4122 		 * reading from the main socket... actually its only the
4123 		 * peeloff() case, since I think read will fail on a
4124 		 * listening socket..
4125 		 */
4126 		return;
4127 	}
4128 	/* lock the socket buffers */
4129 	SCTP_INP_READ_LOCK(old_inp);
4130 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4131 		/* Pull off all for out target stcb */
4132 		if (control->stcb == stcb) {
4133 			/* remove it we want it */
4134 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4135 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4136 			m = control->data;
4137 			while (m) {
4138 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4139 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4140 				}
4141 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4142 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4143 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4144 				}
4145 				m = SCTP_BUF_NEXT(m);
4146 			}
4147 		}
4148 	}
4149 	SCTP_INP_READ_UNLOCK(old_inp);
4150 	/* Remove the sb-lock on the old socket */
4151 
4152 	sbunlock(&old_so->so_rcv);
4153 	/* Now we move them over to the new socket buffer */
4154 	SCTP_INP_READ_LOCK(new_inp);
4155 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4156 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4157 		m = control->data;
4158 		while (m) {
4159 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4160 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4161 			}
4162 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4163 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4164 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4165 			}
4166 			m = SCTP_BUF_NEXT(m);
4167 		}
4168 	}
4169 	SCTP_INP_READ_UNLOCK(new_inp);
4170 }
4171 
4172 void
4173 sctp_add_to_readq(struct sctp_inpcb *inp,
4174     struct sctp_tcb *stcb,
4175     struct sctp_queued_to_read *control,
4176     struct sockbuf *sb,
4177     int end,
4178     int inp_read_lock_held,
4179     int so_locked
4180 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4181     SCTP_UNUSED
4182 #endif
4183 )
4184 {
4185 	/*
4186 	 * Here we must place the control on the end of the socket read
4187 	 * queue AND increment sb_cc so that select will work properly on
4188 	 * read.
4189 	 */
4190 	struct mbuf *m, *prev = NULL;
4191 
4192 	if (inp == NULL) {
4193 		/* Gak, TSNH!! */
4194 #ifdef INVARIANTS
4195 		panic("Gak, inp NULL on add_to_readq");
4196 #endif
4197 		return;
4198 	}
4199 	if (inp_read_lock_held == 0)
4200 		SCTP_INP_READ_LOCK(inp);
4201 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4202 		sctp_free_remote_addr(control->whoFrom);
4203 		if (control->data) {
4204 			sctp_m_freem(control->data);
4205 			control->data = NULL;
4206 		}
4207 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4208 		if (inp_read_lock_held == 0)
4209 			SCTP_INP_READ_UNLOCK(inp);
4210 		return;
4211 	}
4212 	if (!(control->spec_flags & M_NOTIFICATION)) {
4213 		atomic_add_int(&inp->total_recvs, 1);
4214 		if (!control->do_not_ref_stcb) {
4215 			atomic_add_int(&stcb->total_recvs, 1);
4216 		}
4217 	}
4218 	m = control->data;
4219 	control->held_length = 0;
4220 	control->length = 0;
4221 	while (m) {
4222 		if (SCTP_BUF_LEN(m) == 0) {
4223 			/* Skip mbufs with NO length */
4224 			if (prev == NULL) {
4225 				/* First one */
4226 				control->data = sctp_m_free(m);
4227 				m = control->data;
4228 			} else {
4229 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4230 				m = SCTP_BUF_NEXT(prev);
4231 			}
4232 			if (m == NULL) {
4233 				control->tail_mbuf = prev;
4234 			}
4235 			continue;
4236 		}
4237 		prev = m;
4238 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4239 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4240 		}
4241 		sctp_sballoc(stcb, sb, m);
4242 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4243 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4244 		}
4245 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4246 		m = SCTP_BUF_NEXT(m);
4247 	}
4248 	if (prev != NULL) {
4249 		control->tail_mbuf = prev;
4250 	} else {
4251 		/* Everything got collapsed out?? */
4252 		sctp_free_remote_addr(control->whoFrom);
4253 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4254 		if (inp_read_lock_held == 0)
4255 			SCTP_INP_READ_UNLOCK(inp);
4256 		return;
4257 	}
4258 	if (end) {
4259 		control->end_added = 1;
4260 	}
4261 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4262 	if (inp_read_lock_held == 0)
4263 		SCTP_INP_READ_UNLOCK(inp);
4264 	if (inp && inp->sctp_socket) {
4265 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4266 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4267 		} else {
4268 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4269 			struct socket *so;
4270 
4271 			so = SCTP_INP_SO(inp);
4272 			if (!so_locked) {
4273 				atomic_add_int(&stcb->asoc.refcnt, 1);
4274 				SCTP_TCB_UNLOCK(stcb);
4275 				SCTP_SOCKET_LOCK(so, 1);
4276 				SCTP_TCB_LOCK(stcb);
4277 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4278 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4279 					SCTP_SOCKET_UNLOCK(so, 1);
4280 					return;
4281 				}
4282 			}
4283 #endif
4284 			sctp_sorwakeup(inp, inp->sctp_socket);
4285 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4286 			if (!so_locked) {
4287 				SCTP_SOCKET_UNLOCK(so, 1);
4288 			}
4289 #endif
4290 		}
4291 	}
4292 }
4293 
4294 
4295 int
4296 sctp_append_to_readq(struct sctp_inpcb *inp,
4297     struct sctp_tcb *stcb,
4298     struct sctp_queued_to_read *control,
4299     struct mbuf *m,
4300     int end,
4301     int ctls_cumack,
4302     struct sockbuf *sb)
4303 {
4304 	/*
4305 	 * A partial delivery API event is underway. OR we are appending on
4306 	 * the reassembly queue.
4307 	 *
4308 	 * If PDAPI this means we need to add m to the end of the data.
4309 	 * Increase the length in the control AND increment the sb_cc.
4310 	 * Otherwise sb is NULL and all we need to do is put it at the end
4311 	 * of the mbuf chain.
4312 	 */
4313 	int len = 0;
4314 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4315 
4316 	if (inp) {
4317 		SCTP_INP_READ_LOCK(inp);
4318 	}
4319 	if (control == NULL) {
4320 get_out:
4321 		if (inp) {
4322 			SCTP_INP_READ_UNLOCK(inp);
4323 		}
4324 		return (-1);
4325 	}
4326 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4327 		SCTP_INP_READ_UNLOCK(inp);
4328 		return 0;
4329 	}
4330 	if (control->end_added) {
4331 		/* huh this one is complete? */
4332 		goto get_out;
4333 	}
4334 	mm = m;
4335 	if (mm == NULL) {
4336 		goto get_out;
4337 	}
4338 	while (mm) {
4339 		if (SCTP_BUF_LEN(mm) == 0) {
4340 			/* Skip mbufs with NO lenght */
4341 			if (prev == NULL) {
4342 				/* First one */
4343 				m = sctp_m_free(mm);
4344 				mm = m;
4345 			} else {
4346 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4347 				mm = SCTP_BUF_NEXT(prev);
4348 			}
4349 			continue;
4350 		}
4351 		prev = mm;
4352 		len += SCTP_BUF_LEN(mm);
4353 		if (sb) {
4354 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4355 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4356 			}
4357 			sctp_sballoc(stcb, sb, mm);
4358 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4359 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4360 			}
4361 		}
4362 		mm = SCTP_BUF_NEXT(mm);
4363 	}
4364 	if (prev) {
4365 		tail = prev;
4366 	} else {
4367 		/* Really there should always be a prev */
4368 		if (m == NULL) {
4369 			/* Huh nothing left? */
4370 #ifdef INVARIANTS
4371 			panic("Nothing left to add?");
4372 #else
4373 			goto get_out;
4374 #endif
4375 		}
4376 		tail = m;
4377 	}
4378 	if (control->tail_mbuf) {
4379 		/* append */
4380 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4381 		control->tail_mbuf = tail;
4382 	} else {
4383 		/* nothing there */
4384 #ifdef INVARIANTS
4385 		if (control->data != NULL) {
4386 			panic("This should NOT happen");
4387 		}
4388 #endif
4389 		control->data = m;
4390 		control->tail_mbuf = tail;
4391 	}
4392 	atomic_add_int(&control->length, len);
4393 	if (end) {
4394 		/* message is complete */
4395 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4396 			stcb->asoc.control_pdapi = NULL;
4397 		}
4398 		control->held_length = 0;
4399 		control->end_added = 1;
4400 	}
4401 	if (stcb == NULL) {
4402 		control->do_not_ref_stcb = 1;
4403 	}
4404 	/*
4405 	 * When we are appending in partial delivery, the cum-ack is used
4406 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4407 	 * is populated in the outbound sinfo structure from the true cumack
4408 	 * if the association exists...
4409 	 */
4410 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4411 	if (inp) {
4412 		SCTP_INP_READ_UNLOCK(inp);
4413 	}
4414 	if (inp && inp->sctp_socket) {
4415 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4416 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4417 		} else {
4418 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4419 			struct socket *so;
4420 
4421 			so = SCTP_INP_SO(inp);
4422 			atomic_add_int(&stcb->asoc.refcnt, 1);
4423 			SCTP_TCB_UNLOCK(stcb);
4424 			SCTP_SOCKET_LOCK(so, 1);
4425 			SCTP_TCB_LOCK(stcb);
4426 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4427 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4428 				SCTP_SOCKET_UNLOCK(so, 1);
4429 				return (0);
4430 			}
4431 #endif
4432 			sctp_sorwakeup(inp, inp->sctp_socket);
4433 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4434 			SCTP_SOCKET_UNLOCK(so, 1);
4435 #endif
4436 		}
4437 	}
4438 	return (0);
4439 }
4440 
4441 
4442 
4443 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4444  *************ALTERNATE ROUTING CODE
4445  */
4446 
4447 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4448  *************ALTERNATE ROUTING CODE
4449  */
4450 
4451 struct mbuf *
4452 sctp_generate_invmanparam(int err)
4453 {
4454 	/* Return a MBUF with a invalid mandatory parameter */
4455 	struct mbuf *m;
4456 
4457 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4458 	if (m) {
4459 		struct sctp_paramhdr *ph;
4460 
4461 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4462 		ph = mtod(m, struct sctp_paramhdr *);
4463 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4464 		ph->param_type = htons(err);
4465 	}
4466 	return (m);
4467 }
4468 
4469 #ifdef SCTP_MBCNT_LOGGING
4470 void
4471 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4472     struct sctp_tmit_chunk *tp1, int chk_cnt)
4473 {
4474 	if (tp1->data == NULL) {
4475 		return;
4476 	}
4477 	asoc->chunks_on_out_queue -= chk_cnt;
4478 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4479 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4480 		    asoc->total_output_queue_size,
4481 		    tp1->book_size,
4482 		    0,
4483 		    tp1->mbcnt);
4484 	}
4485 	if (asoc->total_output_queue_size >= tp1->book_size) {
4486 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4487 	} else {
4488 		asoc->total_output_queue_size = 0;
4489 	}
4490 
4491 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4492 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4493 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4494 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4495 		} else {
4496 			stcb->sctp_socket->so_snd.sb_cc = 0;
4497 
4498 		}
4499 	}
4500 }
4501 
4502 #endif
4503 
4504 int
4505 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4506     int reason, int so_locked
4507 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4508     SCTP_UNUSED
4509 #endif
4510 )
4511 {
4512 	struct sctp_stream_out *strq;
4513 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4514 	struct sctp_stream_queue_pending *sp;
4515 	uint16_t stream = 0, seq = 0;
4516 	uint8_t foundeom = 0;
4517 	int ret_sz = 0;
4518 	int notdone;
4519 	int do_wakeup_routine = 0;
4520 
4521 	stream = tp1->rec.data.stream_number;
4522 	seq = tp1->rec.data.stream_seq;
4523 	do {
4524 		ret_sz += tp1->book_size;
4525 		if (tp1->data != NULL) {
4526 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4527 				sctp_flight_size_decrease(tp1);
4528 				sctp_total_flight_decrease(stcb, tp1);
4529 			}
4530 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4531 			stcb->asoc.peers_rwnd += tp1->send_size;
4532 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4533 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4534 			if (tp1->data) {
4535 				sctp_m_freem(tp1->data);
4536 				tp1->data = NULL;
4537 			}
4538 			do_wakeup_routine = 1;
4539 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4540 				stcb->asoc.sent_queue_cnt_removeable--;
4541 			}
4542 		}
4543 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4544 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4545 		    SCTP_DATA_NOT_FRAG) {
4546 			/* not frag'ed we ae done   */
4547 			notdone = 0;
4548 			foundeom = 1;
4549 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4550 			/* end of frag, we are done */
4551 			notdone = 0;
4552 			foundeom = 1;
4553 		} else {
4554 			/*
4555 			 * Its a begin or middle piece, we must mark all of
4556 			 * it
4557 			 */
4558 			notdone = 1;
4559 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4560 		}
4561 	} while (tp1 && notdone);
4562 	if (foundeom == 0) {
4563 		/*
4564 		 * The multi-part message was scattered across the send and
4565 		 * sent queue.
4566 		 */
4567 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4568 			if ((tp1->rec.data.stream_number != stream) ||
4569 			    (tp1->rec.data.stream_seq != seq)) {
4570 				break;
4571 			}
4572 			/*
4573 			 * save to chk in case we have some on stream out
4574 			 * queue. If so and we have an un-transmitted one we
4575 			 * don't have to fudge the TSN.
4576 			 */
4577 			chk = tp1;
4578 			ret_sz += tp1->book_size;
4579 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4580 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4581 			if (tp1->data) {
4582 				sctp_m_freem(tp1->data);
4583 				tp1->data = NULL;
4584 			}
4585 			/* No flight involved here book the size to 0 */
4586 			tp1->book_size = 0;
4587 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4588 				foundeom = 1;
4589 			}
4590 			do_wakeup_routine = 1;
4591 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4592 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4593 			/*
4594 			 * on to the sent queue so we can wait for it to be
4595 			 * passed by.
4596 			 */
4597 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4598 			    sctp_next);
4599 			stcb->asoc.send_queue_cnt--;
4600 			stcb->asoc.sent_queue_cnt++;
4601 		}
4602 	}
4603 	if (foundeom == 0) {
4604 		/*
4605 		 * Still no eom found. That means there is stuff left on the
4606 		 * stream out queue.. yuck.
4607 		 */
4608 		strq = &stcb->asoc.strmout[stream];
4609 		SCTP_TCB_SEND_LOCK(stcb);
4610 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4611 			/* FIXME: Shouldn't this be a serial number check? */
4612 			if (sp->strseq > seq) {
4613 				break;
4614 			}
4615 			/* Check if its our SEQ */
4616 			if (sp->strseq == seq) {
4617 				sp->discard_rest = 1;
4618 				/*
4619 				 * We may need to put a chunk on the queue
4620 				 * that holds the TSN that would have been
4621 				 * sent with the LAST bit.
4622 				 */
4623 				if (chk == NULL) {
4624 					/* Yep, we have to */
4625 					sctp_alloc_a_chunk(stcb, chk);
4626 					if (chk == NULL) {
4627 						/*
4628 						 * we are hosed. All we can
4629 						 * do is nothing.. which
4630 						 * will cause an abort if
4631 						 * the peer is paying
4632 						 * attention.
4633 						 */
4634 						goto oh_well;
4635 					}
4636 					memset(chk, 0, sizeof(*chk));
4637 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4638 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4639 					chk->asoc = &stcb->asoc;
4640 					chk->rec.data.stream_seq = sp->strseq;
4641 					chk->rec.data.stream_number = sp->stream;
4642 					chk->rec.data.payloadtype = sp->ppid;
4643 					chk->rec.data.context = sp->context;
4644 					chk->flags = sp->act_flags;
4645 					if (sp->net)
4646 						chk->whoTo = sp->net;
4647 					else
4648 						chk->whoTo = stcb->asoc.primary_destination;
4649 					atomic_add_int(&chk->whoTo->ref_count, 1);
4650 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4651 					stcb->asoc.pr_sctp_cnt++;
4652 					chk->pr_sctp_on = 1;
4653 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4654 					stcb->asoc.sent_queue_cnt++;
4655 					stcb->asoc.pr_sctp_cnt++;
4656 				} else {
4657 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4658 				}
4659 		oh_well:
4660 				if (sp->data) {
4661 					/*
4662 					 * Pull any data to free up the SB
4663 					 * and allow sender to "add more"
4664 					 * whilc we will throw away :-)
4665 					 */
4666 					sctp_free_spbufspace(stcb, &stcb->asoc,
4667 					    sp);
4668 					ret_sz += sp->length;
4669 					do_wakeup_routine = 1;
4670 					sp->some_taken = 1;
4671 					sctp_m_freem(sp->data);
4672 					sp->length = 0;
4673 					sp->data = NULL;
4674 					sp->tail_mbuf = NULL;
4675 				}
4676 				break;
4677 			}
4678 		}		/* End tailq_foreach */
4679 		SCTP_TCB_SEND_UNLOCK(stcb);
4680 	}
4681 	if (do_wakeup_routine) {
4682 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4683 		struct socket *so;
4684 
4685 		so = SCTP_INP_SO(stcb->sctp_ep);
4686 		if (!so_locked) {
4687 			atomic_add_int(&stcb->asoc.refcnt, 1);
4688 			SCTP_TCB_UNLOCK(stcb);
4689 			SCTP_SOCKET_LOCK(so, 1);
4690 			SCTP_TCB_LOCK(stcb);
4691 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4692 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4693 				/* assoc was freed while we were unlocked */
4694 				SCTP_SOCKET_UNLOCK(so, 1);
4695 				return (ret_sz);
4696 			}
4697 		}
4698 #endif
4699 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4700 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4701 		if (!so_locked) {
4702 			SCTP_SOCKET_UNLOCK(so, 1);
4703 		}
4704 #endif
4705 	}
4706 	return (ret_sz);
4707 }
4708 
4709 /*
4710  * checks to see if the given address, sa, is one that is currently known by
4711  * the kernel note: can't distinguish the same address on multiple interfaces
4712  * and doesn't handle multiple addresses with different zone/scope id's note:
4713  * ifa_ifwithaddr() compares the entire sockaddr struct
4714  */
4715 struct sctp_ifa *
4716 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4717     int holds_lock)
4718 {
4719 	struct sctp_laddr *laddr;
4720 
4721 	if (holds_lock == 0) {
4722 		SCTP_INP_RLOCK(inp);
4723 	}
4724 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4725 		if (laddr->ifa == NULL)
4726 			continue;
4727 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4728 			continue;
4729 #ifdef INET
4730 		if (addr->sa_family == AF_INET) {
4731 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4732 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4733 				/* found him. */
4734 				if (holds_lock == 0) {
4735 					SCTP_INP_RUNLOCK(inp);
4736 				}
4737 				return (laddr->ifa);
4738 				break;
4739 			}
4740 		}
4741 #endif
4742 #ifdef INET6
4743 		if (addr->sa_family == AF_INET6) {
4744 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4745 			    &laddr->ifa->address.sin6)) {
4746 				/* found him. */
4747 				if (holds_lock == 0) {
4748 					SCTP_INP_RUNLOCK(inp);
4749 				}
4750 				return (laddr->ifa);
4751 				break;
4752 			}
4753 		}
4754 #endif
4755 	}
4756 	if (holds_lock == 0) {
4757 		SCTP_INP_RUNLOCK(inp);
4758 	}
4759 	return (NULL);
4760 }
4761 
4762 uint32_t
4763 sctp_get_ifa_hash_val(struct sockaddr *addr)
4764 {
4765 	switch (addr->sa_family) {
4766 #ifdef INET
4767 	case AF_INET:
4768 		{
4769 			struct sockaddr_in *sin;
4770 
4771 			sin = (struct sockaddr_in *)addr;
4772 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4773 		}
4774 #endif
4775 #ifdef INET6
4776 	case INET6:
4777 		{
4778 			struct sockaddr_in6 *sin6;
4779 			uint32_t hash_of_addr;
4780 
4781 			sin6 = (struct sockaddr_in6 *)addr;
4782 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4783 			    sin6->sin6_addr.s6_addr32[1] +
4784 			    sin6->sin6_addr.s6_addr32[2] +
4785 			    sin6->sin6_addr.s6_addr32[3]);
4786 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4787 			return (hash_of_addr);
4788 		}
4789 #endif
4790 	default:
4791 		break;
4792 	}
4793 	return (0);
4794 }
4795 
4796 struct sctp_ifa *
4797 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4798 {
4799 	struct sctp_ifa *sctp_ifap;
4800 	struct sctp_vrf *vrf;
4801 	struct sctp_ifalist *hash_head;
4802 	uint32_t hash_of_addr;
4803 
4804 	if (holds_lock == 0)
4805 		SCTP_IPI_ADDR_RLOCK();
4806 
4807 	vrf = sctp_find_vrf(vrf_id);
4808 	if (vrf == NULL) {
4809 stage_right:
4810 		if (holds_lock == 0)
4811 			SCTP_IPI_ADDR_RUNLOCK();
4812 		return (NULL);
4813 	}
4814 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4815 
4816 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4817 	if (hash_head == NULL) {
4818 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4819 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4820 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4821 		sctp_print_address(addr);
4822 		SCTP_PRINTF("No such bucket for address\n");
4823 		if (holds_lock == 0)
4824 			SCTP_IPI_ADDR_RUNLOCK();
4825 
4826 		return (NULL);
4827 	}
4828 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4829 		if (sctp_ifap == NULL) {
4830 #ifdef INVARIANTS
4831 			panic("Huh LIST_FOREACH corrupt");
4832 			goto stage_right;
4833 #else
4834 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4835 			goto stage_right;
4836 #endif
4837 		}
4838 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4839 			continue;
4840 #ifdef INET
4841 		if (addr->sa_family == AF_INET) {
4842 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4843 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4844 				/* found him. */
4845 				if (holds_lock == 0)
4846 					SCTP_IPI_ADDR_RUNLOCK();
4847 				return (sctp_ifap);
4848 				break;
4849 			}
4850 		}
4851 #endif
4852 #ifdef INET6
4853 		if (addr->sa_family == AF_INET6) {
4854 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4855 			    &sctp_ifap->address.sin6)) {
4856 				/* found him. */
4857 				if (holds_lock == 0)
4858 					SCTP_IPI_ADDR_RUNLOCK();
4859 				return (sctp_ifap);
4860 				break;
4861 			}
4862 		}
4863 #endif
4864 	}
4865 	if (holds_lock == 0)
4866 		SCTP_IPI_ADDR_RUNLOCK();
4867 	return (NULL);
4868 }
4869 
4870 static void
4871 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4872     uint32_t rwnd_req)
4873 {
4874 	/* User pulled some data, do we need a rwnd update? */
4875 	int r_unlocked = 0;
4876 	uint32_t dif, rwnd;
4877 	struct socket *so = NULL;
4878 
4879 	if (stcb == NULL)
4880 		return;
4881 
4882 	atomic_add_int(&stcb->asoc.refcnt, 1);
4883 
4884 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4885 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4886 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4887 		/* Pre-check If we are freeing no update */
4888 		goto no_lock;
4889 	}
4890 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4891 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4892 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4893 		goto out;
4894 	}
4895 	so = stcb->sctp_socket;
4896 	if (so == NULL) {
4897 		goto out;
4898 	}
4899 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4900 	/* Have you have freed enough to look */
4901 	*freed_so_far = 0;
4902 	/* Yep, its worth a look and the lock overhead */
4903 
4904 	/* Figure out what the rwnd would be */
4905 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4906 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4907 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4908 	} else {
4909 		dif = 0;
4910 	}
4911 	if (dif >= rwnd_req) {
4912 		if (hold_rlock) {
4913 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4914 			r_unlocked = 1;
4915 		}
4916 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4917 			/*
4918 			 * One last check before we allow the guy possibly
4919 			 * to get in. There is a race, where the guy has not
4920 			 * reached the gate. In that case
4921 			 */
4922 			goto out;
4923 		}
4924 		SCTP_TCB_LOCK(stcb);
4925 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4926 			/* No reports here */
4927 			SCTP_TCB_UNLOCK(stcb);
4928 			goto out;
4929 		}
4930 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4931 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
4932 
4933 		sctp_chunk_output(stcb->sctp_ep, stcb,
4934 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4935 		/* make sure no timer is running */
4936 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4937 		SCTP_TCB_UNLOCK(stcb);
4938 	} else {
4939 		/* Update how much we have pending */
4940 		stcb->freed_by_sorcv_sincelast = dif;
4941 	}
4942 out:
4943 	if (so && r_unlocked && hold_rlock) {
4944 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4945 	}
4946 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4947 no_lock:
4948 	atomic_add_int(&stcb->asoc.refcnt, -1);
4949 	return;
4950 }
4951 
4952 int
4953 sctp_sorecvmsg(struct socket *so,
4954     struct uio *uio,
4955     struct mbuf **mp,
4956     struct sockaddr *from,
4957     int fromlen,
4958     int *msg_flags,
4959     struct sctp_sndrcvinfo *sinfo,
4960     int filling_sinfo)
4961 {
4962 	/*
4963 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4964 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4965 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4966 	 * On the way out we may send out any combination of:
4967 	 * MSG_NOTIFICATION MSG_EOR
4968 	 *
4969 	 */
4970 	struct sctp_inpcb *inp = NULL;
4971 	int my_len = 0;
4972 	int cp_len = 0, error = 0;
4973 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4974 	struct mbuf *m = NULL;
4975 	struct sctp_tcb *stcb = NULL;
4976 	int wakeup_read_socket = 0;
4977 	int freecnt_applied = 0;
4978 	int out_flags = 0, in_flags = 0;
4979 	int block_allowed = 1;
4980 	uint32_t freed_so_far = 0;
4981 	uint32_t copied_so_far = 0;
4982 	int in_eeor_mode = 0;
4983 	int no_rcv_needed = 0;
4984 	uint32_t rwnd_req = 0;
4985 	int hold_sblock = 0;
4986 	int hold_rlock = 0;
4987 	int slen = 0;
4988 	uint32_t held_length = 0;
4989 	int sockbuf_lock = 0;
4990 
4991 	if (uio == NULL) {
4992 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
4993 		return (EINVAL);
4994 	}
4995 	if (msg_flags) {
4996 		in_flags = *msg_flags;
4997 		if (in_flags & MSG_PEEK)
4998 			SCTP_STAT_INCR(sctps_read_peeks);
4999 	} else {
5000 		in_flags = 0;
5001 	}
5002 	slen = uio->uio_resid;
5003 
5004 	/* Pull in and set up our int flags */
5005 	if (in_flags & MSG_OOB) {
5006 		/* Out of band's NOT supported */
5007 		return (EOPNOTSUPP);
5008 	}
5009 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5010 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5011 		return (EINVAL);
5012 	}
5013 	if ((in_flags & (MSG_DONTWAIT
5014 	    | MSG_NBIO
5015 	    )) ||
5016 	    SCTP_SO_IS_NBIO(so)) {
5017 		block_allowed = 0;
5018 	}
5019 	/* setup the endpoint */
5020 	inp = (struct sctp_inpcb *)so->so_pcb;
5021 	if (inp == NULL) {
5022 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5023 		return (EFAULT);
5024 	}
5025 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5026 	/* Must be at least a MTU's worth */
5027 	if (rwnd_req < SCTP_MIN_RWND)
5028 		rwnd_req = SCTP_MIN_RWND;
5029 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5030 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5031 		sctp_misc_ints(SCTP_SORECV_ENTER,
5032 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5033 	}
5034 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5035 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5036 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5037 	}
5038 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5039 	sockbuf_lock = 1;
5040 	if (error) {
5041 		goto release_unlocked;
5042 	}
5043 restart:
5044 
5045 
5046 restart_nosblocks:
5047 	if (hold_sblock == 0) {
5048 		SOCKBUF_LOCK(&so->so_rcv);
5049 		hold_sblock = 1;
5050 	}
5051 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5052 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5053 		goto out;
5054 	}
5055 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5056 		if (so->so_error) {
5057 			error = so->so_error;
5058 			if ((in_flags & MSG_PEEK) == 0)
5059 				so->so_error = 0;
5060 			goto out;
5061 		} else {
5062 			if (so->so_rcv.sb_cc == 0) {
5063 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5064 				/* indicate EOF */
5065 				error = 0;
5066 				goto out;
5067 			}
5068 		}
5069 	}
5070 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5071 		/* we need to wait for data */
5072 		if ((so->so_rcv.sb_cc == 0) &&
5073 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5074 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5075 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5076 				/*
5077 				 * For active open side clear flags for
5078 				 * re-use passive open is blocked by
5079 				 * connect.
5080 				 */
5081 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5082 					/*
5083 					 * You were aborted, passive side
5084 					 * always hits here
5085 					 */
5086 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5087 					error = ECONNRESET;
5088 				}
5089 				so->so_state &= ~(SS_ISCONNECTING |
5090 				    SS_ISDISCONNECTING |
5091 				    SS_ISCONFIRMING |
5092 				    SS_ISCONNECTED);
5093 				if (error == 0) {
5094 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5095 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5096 						error = ENOTCONN;
5097 					}
5098 				}
5099 				goto out;
5100 			}
5101 		}
5102 		error = sbwait(&so->so_rcv);
5103 		if (error) {
5104 			goto out;
5105 		}
5106 		held_length = 0;
5107 		goto restart_nosblocks;
5108 	} else if (so->so_rcv.sb_cc == 0) {
5109 		if (so->so_error) {
5110 			error = so->so_error;
5111 			if ((in_flags & MSG_PEEK) == 0)
5112 				so->so_error = 0;
5113 		} else {
5114 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5115 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5116 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5117 					/*
5118 					 * For active open side clear flags
5119 					 * for re-use passive open is
5120 					 * blocked by connect.
5121 					 */
5122 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5123 						/*
5124 						 * You were aborted, passive
5125 						 * side always hits here
5126 						 */
5127 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5128 						error = ECONNRESET;
5129 					}
5130 					so->so_state &= ~(SS_ISCONNECTING |
5131 					    SS_ISDISCONNECTING |
5132 					    SS_ISCONFIRMING |
5133 					    SS_ISCONNECTED);
5134 					if (error == 0) {
5135 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5136 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5137 							error = ENOTCONN;
5138 						}
5139 					}
5140 					goto out;
5141 				}
5142 			}
5143 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5144 			error = EWOULDBLOCK;
5145 		}
5146 		goto out;
5147 	}
5148 	if (hold_sblock == 1) {
5149 		SOCKBUF_UNLOCK(&so->so_rcv);
5150 		hold_sblock = 0;
5151 	}
5152 	/* we possibly have data we can read */
5153 	/* sa_ignore FREED_MEMORY */
5154 	control = TAILQ_FIRST(&inp->read_queue);
5155 	if (control == NULL) {
5156 		/*
5157 		 * This could be happening since the appender did the
5158 		 * increment but as not yet did the tailq insert onto the
5159 		 * read_queue
5160 		 */
5161 		if (hold_rlock == 0) {
5162 			SCTP_INP_READ_LOCK(inp);
5163 			hold_rlock = 1;
5164 		}
5165 		control = TAILQ_FIRST(&inp->read_queue);
5166 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5167 #ifdef INVARIANTS
5168 			panic("Huh, its non zero and nothing on control?");
5169 #endif
5170 			so->so_rcv.sb_cc = 0;
5171 		}
5172 		SCTP_INP_READ_UNLOCK(inp);
5173 		hold_rlock = 0;
5174 		goto restart;
5175 	}
5176 	if ((control->length == 0) &&
5177 	    (control->do_not_ref_stcb)) {
5178 		/*
5179 		 * Clean up code for freeing assoc that left behind a
5180 		 * pdapi.. maybe a peer in EEOR that just closed after
5181 		 * sending and never indicated a EOR.
5182 		 */
5183 		if (hold_rlock == 0) {
5184 			hold_rlock = 1;
5185 			SCTP_INP_READ_LOCK(inp);
5186 		}
5187 		control->held_length = 0;
5188 		if (control->data) {
5189 			/* Hmm there is data here .. fix */
5190 			struct mbuf *m_tmp;
5191 			int cnt = 0;
5192 
5193 			m_tmp = control->data;
5194 			while (m_tmp) {
5195 				cnt += SCTP_BUF_LEN(m_tmp);
5196 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5197 					control->tail_mbuf = m_tmp;
5198 					control->end_added = 1;
5199 				}
5200 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5201 			}
5202 			control->length = cnt;
5203 		} else {
5204 			/* remove it */
5205 			TAILQ_REMOVE(&inp->read_queue, control, next);
5206 			/* Add back any hiddend data */
5207 			sctp_free_remote_addr(control->whoFrom);
5208 			sctp_free_a_readq(stcb, control);
5209 		}
5210 		if (hold_rlock) {
5211 			hold_rlock = 0;
5212 			SCTP_INP_READ_UNLOCK(inp);
5213 		}
5214 		goto restart;
5215 	}
5216 	if ((control->length == 0) &&
5217 	    (control->end_added == 1)) {
5218 		/*
5219 		 * Do we also need to check for (control->pdapi_aborted ==
5220 		 * 1)?
5221 		 */
5222 		if (hold_rlock == 0) {
5223 			hold_rlock = 1;
5224 			SCTP_INP_READ_LOCK(inp);
5225 		}
5226 		TAILQ_REMOVE(&inp->read_queue, control, next);
5227 		if (control->data) {
5228 #ifdef INVARIANTS
5229 			panic("control->data not null but control->length == 0");
5230 #else
5231 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5232 			sctp_m_freem(control->data);
5233 			control->data = NULL;
5234 #endif
5235 		}
5236 		if (control->aux_data) {
5237 			sctp_m_free(control->aux_data);
5238 			control->aux_data = NULL;
5239 		}
5240 		sctp_free_remote_addr(control->whoFrom);
5241 		sctp_free_a_readq(stcb, control);
5242 		if (hold_rlock) {
5243 			hold_rlock = 0;
5244 			SCTP_INP_READ_UNLOCK(inp);
5245 		}
5246 		goto restart;
5247 	}
5248 	if (control->length == 0) {
5249 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5250 		    (filling_sinfo)) {
5251 			/* find a more suitable one then this */
5252 			ctl = TAILQ_NEXT(control, next);
5253 			while (ctl) {
5254 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5255 				    (ctl->some_taken ||
5256 				    (ctl->spec_flags & M_NOTIFICATION) ||
5257 				    ((ctl->do_not_ref_stcb == 0) &&
5258 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5259 				    ) {
5260 					/*-
5261 					 * If we have a different TCB next, and there is data
5262 					 * present. If we have already taken some (pdapi), OR we can
5263 					 * ref the tcb and no delivery as started on this stream, we
5264 					 * take it. Note we allow a notification on a different
5265 					 * assoc to be delivered..
5266 					 */
5267 					control = ctl;
5268 					goto found_one;
5269 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5270 					    (ctl->length) &&
5271 					    ((ctl->some_taken) ||
5272 					    ((ctl->do_not_ref_stcb == 0) &&
5273 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5274 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5275 					/*-
5276 					 * If we have the same tcb, and there is data present, and we
5277 					 * have the strm interleave feature present. Then if we have
5278 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5279 					 * not started a delivery for this stream, we can take it.
5280 					 * Note we do NOT allow a notificaiton on the same assoc to
5281 					 * be delivered.
5282 					 */
5283 					control = ctl;
5284 					goto found_one;
5285 				}
5286 				ctl = TAILQ_NEXT(ctl, next);
5287 			}
5288 		}
5289 		/*
5290 		 * if we reach here, not suitable replacement is available
5291 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5292 		 * into the our held count, and its time to sleep again.
5293 		 */
5294 		held_length = so->so_rcv.sb_cc;
5295 		control->held_length = so->so_rcv.sb_cc;
5296 		goto restart;
5297 	}
5298 	/* Clear the held length since there is something to read */
5299 	control->held_length = 0;
5300 	if (hold_rlock) {
5301 		SCTP_INP_READ_UNLOCK(inp);
5302 		hold_rlock = 0;
5303 	}
5304 found_one:
5305 	/*
5306 	 * If we reach here, control has a some data for us to read off.
5307 	 * Note that stcb COULD be NULL.
5308 	 */
5309 	control->some_taken++;
5310 	if (hold_sblock) {
5311 		SOCKBUF_UNLOCK(&so->so_rcv);
5312 		hold_sblock = 0;
5313 	}
5314 	stcb = control->stcb;
5315 	if (stcb) {
5316 		if ((control->do_not_ref_stcb == 0) &&
5317 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5318 			if (freecnt_applied == 0)
5319 				stcb = NULL;
5320 		} else if (control->do_not_ref_stcb == 0) {
5321 			/* you can't free it on me please */
5322 			/*
5323 			 * The lock on the socket buffer protects us so the
5324 			 * free code will stop. But since we used the
5325 			 * socketbuf lock and the sender uses the tcb_lock
5326 			 * to increment, we need to use the atomic add to
5327 			 * the refcnt
5328 			 */
5329 			if (freecnt_applied) {
5330 #ifdef INVARIANTS
5331 				panic("refcnt already incremented");
5332 #else
5333 				printf("refcnt already incremented?\n");
5334 #endif
5335 			} else {
5336 				atomic_add_int(&stcb->asoc.refcnt, 1);
5337 				freecnt_applied = 1;
5338 			}
5339 			/*
5340 			 * Setup to remember how much we have not yet told
5341 			 * the peer our rwnd has opened up. Note we grab the
5342 			 * value from the tcb from last time. Note too that
5343 			 * sack sending clears this when a sack is sent,
5344 			 * which is fine. Once we hit the rwnd_req, we then
5345 			 * will go to the sctp_user_rcvd() that will not
5346 			 * lock until it KNOWs it MUST send a WUP-SACK.
5347 			 */
5348 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5349 			stcb->freed_by_sorcv_sincelast = 0;
5350 		}
5351 	}
5352 	if (stcb &&
5353 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5354 	    control->do_not_ref_stcb == 0) {
5355 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5356 	}
5357 	/* First lets get off the sinfo and sockaddr info */
5358 	if ((sinfo) && filling_sinfo) {
5359 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5360 		nxt = TAILQ_NEXT(control, next);
5361 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5362 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5363 			struct sctp_extrcvinfo *s_extra;
5364 
5365 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5366 			if ((nxt) &&
5367 			    (nxt->length)) {
5368 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5369 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5370 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5371 				}
5372 				if (nxt->spec_flags & M_NOTIFICATION) {
5373 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5374 				}
5375 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5376 				s_extra->sreinfo_next_length = nxt->length;
5377 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5378 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5379 				if (nxt->tail_mbuf != NULL) {
5380 					if (nxt->end_added) {
5381 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5382 					}
5383 				}
5384 			} else {
5385 				/*
5386 				 * we explicitly 0 this, since the memcpy
5387 				 * got some other things beyond the older
5388 				 * sinfo_ that is on the control's structure
5389 				 * :-D
5390 				 */
5391 				nxt = NULL;
5392 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5393 				s_extra->sreinfo_next_aid = 0;
5394 				s_extra->sreinfo_next_length = 0;
5395 				s_extra->sreinfo_next_ppid = 0;
5396 				s_extra->sreinfo_next_stream = 0;
5397 			}
5398 		}
5399 		/*
5400 		 * update off the real current cum-ack, if we have an stcb.
5401 		 */
5402 		if ((control->do_not_ref_stcb == 0) && stcb)
5403 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5404 		/*
5405 		 * mask off the high bits, we keep the actual chunk bits in
5406 		 * there.
5407 		 */
5408 		sinfo->sinfo_flags &= 0x00ff;
5409 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5410 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5411 		}
5412 	}
5413 #ifdef SCTP_ASOCLOG_OF_TSNS
5414 	{
5415 		int index, newindex;
5416 		struct sctp_pcbtsn_rlog *entry;
5417 
5418 		do {
5419 			index = inp->readlog_index;
5420 			newindex = index + 1;
5421 			if (newindex >= SCTP_READ_LOG_SIZE) {
5422 				newindex = 0;
5423 			}
5424 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5425 		entry = &inp->readlog[index];
5426 		entry->vtag = control->sinfo_assoc_id;
5427 		entry->strm = control->sinfo_stream;
5428 		entry->seq = control->sinfo_ssn;
5429 		entry->sz = control->length;
5430 		entry->flgs = control->sinfo_flags;
5431 	}
5432 #endif
5433 	if (fromlen && from) {
5434 		struct sockaddr *to;
5435 
5436 #ifdef INET
5437 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5438 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5439 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5440 #else
5441 		/* No AF_INET use AF_INET6 */
5442 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5443 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5444 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5445 #endif
5446 
5447 		to = from;
5448 #if defined(INET) && defined(INET6)
5449 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5450 		    (to->sa_family == AF_INET) &&
5451 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5452 			struct sockaddr_in *sin;
5453 			struct sockaddr_in6 sin6;
5454 
5455 			sin = (struct sockaddr_in *)to;
5456 			bzero(&sin6, sizeof(sin6));
5457 			sin6.sin6_family = AF_INET6;
5458 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5459 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5460 			bcopy(&sin->sin_addr,
5461 			    &sin6.sin6_addr.s6_addr32[3],
5462 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5463 			sin6.sin6_port = sin->sin_port;
5464 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5465 		}
5466 #endif
5467 #if defined(INET6)
5468 		{
5469 			struct sockaddr_in6 lsa6, *to6;
5470 
5471 			to6 = (struct sockaddr_in6 *)to;
5472 			sctp_recover_scope_mac(to6, (&lsa6));
5473 		}
5474 #endif
5475 	}
5476 	/* now copy out what data we can */
5477 	if (mp == NULL) {
5478 		/* copy out each mbuf in the chain up to length */
5479 get_more_data:
5480 		m = control->data;
5481 		while (m) {
5482 			/* Move out all we can */
5483 			cp_len = (int)uio->uio_resid;
5484 			my_len = (int)SCTP_BUF_LEN(m);
5485 			if (cp_len > my_len) {
5486 				/* not enough in this buf */
5487 				cp_len = my_len;
5488 			}
5489 			if (hold_rlock) {
5490 				SCTP_INP_READ_UNLOCK(inp);
5491 				hold_rlock = 0;
5492 			}
5493 			if (cp_len > 0)
5494 				error = uiomove(mtod(m, char *), cp_len, uio);
5495 			/* re-read */
5496 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5497 				goto release;
5498 			}
5499 			if ((control->do_not_ref_stcb == 0) && stcb &&
5500 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5501 				no_rcv_needed = 1;
5502 			}
5503 			if (error) {
5504 				/* error we are out of here */
5505 				goto release;
5506 			}
5507 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5508 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5509 			    ((control->end_added == 0) ||
5510 			    (control->end_added &&
5511 			    (TAILQ_NEXT(control, next) == NULL)))
5512 			    ) {
5513 				SCTP_INP_READ_LOCK(inp);
5514 				hold_rlock = 1;
5515 			}
5516 			if (cp_len == SCTP_BUF_LEN(m)) {
5517 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5518 				    (control->end_added)) {
5519 					out_flags |= MSG_EOR;
5520 					if ((control->do_not_ref_stcb == 0) &&
5521 					    (control->stcb != NULL) &&
5522 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5523 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5524 				}
5525 				if (control->spec_flags & M_NOTIFICATION) {
5526 					out_flags |= MSG_NOTIFICATION;
5527 				}
5528 				/* we ate up the mbuf */
5529 				if (in_flags & MSG_PEEK) {
5530 					/* just looking */
5531 					m = SCTP_BUF_NEXT(m);
5532 					copied_so_far += cp_len;
5533 				} else {
5534 					/* dispose of the mbuf */
5535 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5536 						sctp_sblog(&so->so_rcv,
5537 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5538 					}
5539 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5540 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5541 						sctp_sblog(&so->so_rcv,
5542 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5543 					}
5544 					copied_so_far += cp_len;
5545 					freed_so_far += cp_len;
5546 					freed_so_far += MSIZE;
5547 					atomic_subtract_int(&control->length, cp_len);
5548 					control->data = sctp_m_free(m);
5549 					m = control->data;
5550 					/*
5551 					 * been through it all, must hold sb
5552 					 * lock ok to null tail
5553 					 */
5554 					if (control->data == NULL) {
5555 #ifdef INVARIANTS
5556 						if ((control->end_added == 0) ||
5557 						    (TAILQ_NEXT(control, next) == NULL)) {
5558 							/*
5559 							 * If the end is not
5560 							 * added, OR the
5561 							 * next is NOT null
5562 							 * we MUST have the
5563 							 * lock.
5564 							 */
5565 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5566 								panic("Hmm we don't own the lock?");
5567 							}
5568 						}
5569 #endif
5570 						control->tail_mbuf = NULL;
5571 #ifdef INVARIANTS
5572 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5573 							panic("end_added, nothing left and no MSG_EOR");
5574 						}
5575 #endif
5576 					}
5577 				}
5578 			} else {
5579 				/* Do we need to trim the mbuf? */
5580 				if (control->spec_flags & M_NOTIFICATION) {
5581 					out_flags |= MSG_NOTIFICATION;
5582 				}
5583 				if ((in_flags & MSG_PEEK) == 0) {
5584 					SCTP_BUF_RESV_UF(m, cp_len);
5585 					SCTP_BUF_LEN(m) -= cp_len;
5586 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5587 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5588 					}
5589 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5590 					if ((control->do_not_ref_stcb == 0) &&
5591 					    stcb) {
5592 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5593 					}
5594 					copied_so_far += cp_len;
5595 					freed_so_far += cp_len;
5596 					freed_so_far += MSIZE;
5597 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5598 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5599 						    SCTP_LOG_SBRESULT, 0);
5600 					}
5601 					atomic_subtract_int(&control->length, cp_len);
5602 				} else {
5603 					copied_so_far += cp_len;
5604 				}
5605 			}
5606 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5607 				break;
5608 			}
5609 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5610 			    (control->do_not_ref_stcb == 0) &&
5611 			    (freed_so_far >= rwnd_req)) {
5612 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5613 			}
5614 		}		/* end while(m) */
5615 		/*
5616 		 * At this point we have looked at it all and we either have
5617 		 * a MSG_EOR/or read all the user wants... <OR>
5618 		 * control->length == 0.
5619 		 */
5620 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5621 			/* we are done with this control */
5622 			if (control->length == 0) {
5623 				if (control->data) {
5624 #ifdef INVARIANTS
5625 					panic("control->data not null at read eor?");
5626 #else
5627 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5628 					sctp_m_freem(control->data);
5629 					control->data = NULL;
5630 #endif
5631 				}
5632 		done_with_control:
5633 				if (TAILQ_NEXT(control, next) == NULL) {
5634 					/*
5635 					 * If we don't have a next we need a
5636 					 * lock, if there is a next
5637 					 * interrupt is filling ahead of us
5638 					 * and we don't need a lock to
5639 					 * remove this guy (which is the
5640 					 * head of the queue).
5641 					 */
5642 					if (hold_rlock == 0) {
5643 						SCTP_INP_READ_LOCK(inp);
5644 						hold_rlock = 1;
5645 					}
5646 				}
5647 				TAILQ_REMOVE(&inp->read_queue, control, next);
5648 				/* Add back any hiddend data */
5649 				if (control->held_length) {
5650 					held_length = 0;
5651 					control->held_length = 0;
5652 					wakeup_read_socket = 1;
5653 				}
5654 				if (control->aux_data) {
5655 					sctp_m_free(control->aux_data);
5656 					control->aux_data = NULL;
5657 				}
5658 				no_rcv_needed = control->do_not_ref_stcb;
5659 				sctp_free_remote_addr(control->whoFrom);
5660 				control->data = NULL;
5661 				sctp_free_a_readq(stcb, control);
5662 				control = NULL;
5663 				if ((freed_so_far >= rwnd_req) &&
5664 				    (no_rcv_needed == 0))
5665 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5666 
5667 			} else {
5668 				/*
5669 				 * The user did not read all of this
5670 				 * message, turn off the returned MSG_EOR
5671 				 * since we are leaving more behind on the
5672 				 * control to read.
5673 				 */
5674 #ifdef INVARIANTS
5675 				if (control->end_added &&
5676 				    (control->data == NULL) &&
5677 				    (control->tail_mbuf == NULL)) {
5678 					panic("Gak, control->length is corrupt?");
5679 				}
5680 #endif
5681 				no_rcv_needed = control->do_not_ref_stcb;
5682 				out_flags &= ~MSG_EOR;
5683 			}
5684 		}
5685 		if (out_flags & MSG_EOR) {
5686 			goto release;
5687 		}
5688 		if ((uio->uio_resid == 0) ||
5689 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5690 		    ) {
5691 			goto release;
5692 		}
5693 		/*
5694 		 * If I hit here the receiver wants more and this message is
5695 		 * NOT done (pd-api). So two questions. Can we block? if not
5696 		 * we are done. Did the user NOT set MSG_WAITALL?
5697 		 */
5698 		if (block_allowed == 0) {
5699 			goto release;
5700 		}
5701 		/*
5702 		 * We need to wait for more data a few things: - We don't
5703 		 * sbunlock() so we don't get someone else reading. - We
5704 		 * must be sure to account for the case where what is added
5705 		 * is NOT to our control when we wakeup.
5706 		 */
5707 
5708 		/*
5709 		 * Do we need to tell the transport a rwnd update might be
5710 		 * needed before we go to sleep?
5711 		 */
5712 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5713 		    ((freed_so_far >= rwnd_req) &&
5714 		    (control->do_not_ref_stcb == 0) &&
5715 		    (no_rcv_needed == 0))) {
5716 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5717 		}
5718 wait_some_more:
5719 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5720 			goto release;
5721 		}
5722 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5723 			goto release;
5724 
5725 		if (hold_rlock == 1) {
5726 			SCTP_INP_READ_UNLOCK(inp);
5727 			hold_rlock = 0;
5728 		}
5729 		if (hold_sblock == 0) {
5730 			SOCKBUF_LOCK(&so->so_rcv);
5731 			hold_sblock = 1;
5732 		}
5733 		if ((copied_so_far) && (control->length == 0) &&
5734 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5735 			goto release;
5736 		}
5737 		if (so->so_rcv.sb_cc <= control->held_length) {
5738 			error = sbwait(&so->so_rcv);
5739 			if (error) {
5740 				goto release;
5741 			}
5742 			control->held_length = 0;
5743 		}
5744 		if (hold_sblock) {
5745 			SOCKBUF_UNLOCK(&so->so_rcv);
5746 			hold_sblock = 0;
5747 		}
5748 		if (control->length == 0) {
5749 			/* still nothing here */
5750 			if (control->end_added == 1) {
5751 				/* he aborted, or is done i.e.did a shutdown */
5752 				out_flags |= MSG_EOR;
5753 				if (control->pdapi_aborted) {
5754 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5755 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5756 
5757 					out_flags |= MSG_TRUNC;
5758 				} else {
5759 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5760 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5761 				}
5762 				goto done_with_control;
5763 			}
5764 			if (so->so_rcv.sb_cc > held_length) {
5765 				control->held_length = so->so_rcv.sb_cc;
5766 				held_length = 0;
5767 			}
5768 			goto wait_some_more;
5769 		} else if (control->data == NULL) {
5770 			/*
5771 			 * we must re-sync since data is probably being
5772 			 * added
5773 			 */
5774 			SCTP_INP_READ_LOCK(inp);
5775 			if ((control->length > 0) && (control->data == NULL)) {
5776 				/*
5777 				 * big trouble.. we have the lock and its
5778 				 * corrupt?
5779 				 */
5780 #ifdef INVARIANTS
5781 				panic("Impossible data==NULL length !=0");
5782 #endif
5783 				out_flags |= MSG_EOR;
5784 				out_flags |= MSG_TRUNC;
5785 				control->length = 0;
5786 				SCTP_INP_READ_UNLOCK(inp);
5787 				goto done_with_control;
5788 			}
5789 			SCTP_INP_READ_UNLOCK(inp);
5790 			/* We will fall around to get more data */
5791 		}
5792 		goto get_more_data;
5793 	} else {
5794 		/*-
5795 		 * Give caller back the mbuf chain,
5796 		 * store in uio_resid the length
5797 		 */
5798 		wakeup_read_socket = 0;
5799 		if ((control->end_added == 0) ||
5800 		    (TAILQ_NEXT(control, next) == NULL)) {
5801 			/* Need to get rlock */
5802 			if (hold_rlock == 0) {
5803 				SCTP_INP_READ_LOCK(inp);
5804 				hold_rlock = 1;
5805 			}
5806 		}
5807 		if (control->end_added) {
5808 			out_flags |= MSG_EOR;
5809 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5810 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5811 		}
5812 		if (control->spec_flags & M_NOTIFICATION) {
5813 			out_flags |= MSG_NOTIFICATION;
5814 		}
5815 		uio->uio_resid = control->length;
5816 		*mp = control->data;
5817 		m = control->data;
5818 		while (m) {
5819 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5820 				sctp_sblog(&so->so_rcv,
5821 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5822 			}
5823 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5824 			freed_so_far += SCTP_BUF_LEN(m);
5825 			freed_so_far += MSIZE;
5826 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5827 				sctp_sblog(&so->so_rcv,
5828 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5829 			}
5830 			m = SCTP_BUF_NEXT(m);
5831 		}
5832 		control->data = control->tail_mbuf = NULL;
5833 		control->length = 0;
5834 		if (out_flags & MSG_EOR) {
5835 			/* Done with this control */
5836 			goto done_with_control;
5837 		}
5838 	}
5839 release:
5840 	if (hold_rlock == 1) {
5841 		SCTP_INP_READ_UNLOCK(inp);
5842 		hold_rlock = 0;
5843 	}
5844 	if (hold_sblock == 1) {
5845 		SOCKBUF_UNLOCK(&so->so_rcv);
5846 		hold_sblock = 0;
5847 	}
5848 	sbunlock(&so->so_rcv);
5849 	sockbuf_lock = 0;
5850 
5851 release_unlocked:
5852 	if (hold_sblock) {
5853 		SOCKBUF_UNLOCK(&so->so_rcv);
5854 		hold_sblock = 0;
5855 	}
5856 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5857 		if ((freed_so_far >= rwnd_req) &&
5858 		    (control && (control->do_not_ref_stcb == 0)) &&
5859 		    (no_rcv_needed == 0))
5860 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5861 	}
5862 out:
5863 	if (msg_flags) {
5864 		*msg_flags = out_flags;
5865 	}
5866 	if (((out_flags & MSG_EOR) == 0) &&
5867 	    ((in_flags & MSG_PEEK) == 0) &&
5868 	    (sinfo) &&
5869 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5870 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
5871 		struct sctp_extrcvinfo *s_extra;
5872 
5873 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5874 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5875 	}
5876 	if (hold_rlock == 1) {
5877 		SCTP_INP_READ_UNLOCK(inp);
5878 		hold_rlock = 0;
5879 	}
5880 	if (hold_sblock) {
5881 		SOCKBUF_UNLOCK(&so->so_rcv);
5882 		hold_sblock = 0;
5883 	}
5884 	if (sockbuf_lock) {
5885 		sbunlock(&so->so_rcv);
5886 	}
5887 	if (freecnt_applied) {
5888 		/*
5889 		 * The lock on the socket buffer protects us so the free
5890 		 * code will stop. But since we used the socketbuf lock and
5891 		 * the sender uses the tcb_lock to increment, we need to use
5892 		 * the atomic add to the refcnt.
5893 		 */
5894 		if (stcb == NULL) {
5895 #ifdef INVARIANTS
5896 			panic("stcb for refcnt has gone NULL?");
5897 			goto stage_left;
5898 #else
5899 			goto stage_left;
5900 #endif
5901 		}
5902 		atomic_add_int(&stcb->asoc.refcnt, -1);
5903 		freecnt_applied = 0;
5904 		/* Save the value back for next time */
5905 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5906 	}
5907 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5908 		if (stcb) {
5909 			sctp_misc_ints(SCTP_SORECV_DONE,
5910 			    freed_so_far,
5911 			    ((uio) ? (slen - uio->uio_resid) : slen),
5912 			    stcb->asoc.my_rwnd,
5913 			    so->so_rcv.sb_cc);
5914 		} else {
5915 			sctp_misc_ints(SCTP_SORECV_DONE,
5916 			    freed_so_far,
5917 			    ((uio) ? (slen - uio->uio_resid) : slen),
5918 			    0,
5919 			    so->so_rcv.sb_cc);
5920 		}
5921 	}
5922 stage_left:
5923 	if (wakeup_read_socket) {
5924 		sctp_sorwakeup(inp, so);
5925 	}
5926 	return (error);
5927 }
5928 
5929 
5930 #ifdef SCTP_MBUF_LOGGING
5931 struct mbuf *
5932 sctp_m_free(struct mbuf *m)
5933 {
5934 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5935 		if (SCTP_BUF_IS_EXTENDED(m)) {
5936 			sctp_log_mb(m, SCTP_MBUF_IFREE);
5937 		}
5938 	}
5939 	return (m_free(m));
5940 }
5941 
5942 void
5943 sctp_m_freem(struct mbuf *mb)
5944 {
5945 	while (mb != NULL)
5946 		mb = sctp_m_free(mb);
5947 }
5948 
5949 #endif
5950 
5951 int
5952 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5953 {
5954 	/*
5955 	 * Given a local address. For all associations that holds the
5956 	 * address, request a peer-set-primary.
5957 	 */
5958 	struct sctp_ifa *ifa;
5959 	struct sctp_laddr *wi;
5960 
5961 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5962 	if (ifa == NULL) {
5963 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
5964 		return (EADDRNOTAVAIL);
5965 	}
5966 	/*
5967 	 * Now that we have the ifa we must awaken the iterator with this
5968 	 * message.
5969 	 */
5970 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
5971 	if (wi == NULL) {
5972 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
5973 		return (ENOMEM);
5974 	}
5975 	/* Now incr the count and int wi structure */
5976 	SCTP_INCR_LADDR_COUNT();
5977 	bzero(wi, sizeof(*wi));
5978 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
5979 	wi->ifa = ifa;
5980 	wi->action = SCTP_SET_PRIM_ADDR;
5981 	atomic_add_int(&ifa->refcount, 1);
5982 
5983 	/* Now add it to the work queue */
5984 	SCTP_WQ_ADDR_LOCK();
5985 	/*
5986 	 * Should this really be a tailq? As it is we will process the
5987 	 * newest first :-0
5988 	 */
5989 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
5990 	SCTP_WQ_ADDR_UNLOCK();
5991 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
5992 	    (struct sctp_inpcb *)NULL,
5993 	    (struct sctp_tcb *)NULL,
5994 	    (struct sctp_nets *)NULL);
5995 	return (0);
5996 }
5997 
5998 
5999 int
6000 sctp_soreceive(struct socket *so,
6001     struct sockaddr **psa,
6002     struct uio *uio,
6003     struct mbuf **mp0,
6004     struct mbuf **controlp,
6005     int *flagsp)
6006 {
6007 	int error, fromlen;
6008 	uint8_t sockbuf[256];
6009 	struct sockaddr *from;
6010 	struct sctp_extrcvinfo sinfo;
6011 	int filling_sinfo = 1;
6012 	struct sctp_inpcb *inp;
6013 
6014 	inp = (struct sctp_inpcb *)so->so_pcb;
6015 	/* pickup the assoc we are reading from */
6016 	if (inp == NULL) {
6017 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6018 		return (EINVAL);
6019 	}
6020 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6021 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6022 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6023 	    (controlp == NULL)) {
6024 		/* user does not want the sndrcv ctl */
6025 		filling_sinfo = 0;
6026 	}
6027 	if (psa) {
6028 		from = (struct sockaddr *)sockbuf;
6029 		fromlen = sizeof(sockbuf);
6030 		from->sa_len = 0;
6031 	} else {
6032 		from = NULL;
6033 		fromlen = 0;
6034 	}
6035 
6036 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6037 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6038 	if ((controlp) && (filling_sinfo)) {
6039 		/* copy back the sinfo in a CMSG format */
6040 		if (filling_sinfo)
6041 			*controlp = sctp_build_ctl_nchunk(inp,
6042 			    (struct sctp_sndrcvinfo *)&sinfo);
6043 		else
6044 			*controlp = NULL;
6045 	}
6046 	if (psa) {
6047 		/* copy back the address info */
6048 		if (from && from->sa_len) {
6049 			*psa = sodupsockaddr(from, M_NOWAIT);
6050 		} else {
6051 			*psa = NULL;
6052 		}
6053 	}
6054 	return (error);
6055 }
6056 
6057 
6058 
6059 
6060 
6061 int
6062 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6063     int totaddr, int *error)
6064 {
6065 	int added = 0;
6066 	int i;
6067 	struct sctp_inpcb *inp;
6068 	struct sockaddr *sa;
6069 	size_t incr = 0;
6070 
6071 #ifdef INET
6072 	struct sockaddr_in *sin;
6073 
6074 #endif
6075 #ifdef INET6
6076 	struct sockaddr_in6 *sin6;
6077 
6078 #endif
6079 
6080 	sa = addr;
6081 	inp = stcb->sctp_ep;
6082 	*error = 0;
6083 	for (i = 0; i < totaddr; i++) {
6084 		switch (sa->sa_family) {
6085 #ifdef INET
6086 		case AF_INET:
6087 			incr = sizeof(struct sockaddr_in);
6088 			sin = (struct sockaddr_in *)sa;
6089 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6090 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6091 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6092 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6093 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6094 				*error = EINVAL;
6095 				goto out_now;
6096 			}
6097 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6098 				/* assoc gone no un-lock */
6099 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6100 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6101 				*error = ENOBUFS;
6102 				goto out_now;
6103 			}
6104 			added++;
6105 			break;
6106 #endif
6107 #ifdef INET6
6108 		case AF_INET6:
6109 			incr = sizeof(struct sockaddr_in6);
6110 			sin6 = (struct sockaddr_in6 *)sa;
6111 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6112 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6113 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6114 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6115 				*error = EINVAL;
6116 				goto out_now;
6117 			}
6118 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6119 				/* assoc gone no un-lock */
6120 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6121 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6122 				*error = ENOBUFS;
6123 				goto out_now;
6124 			}
6125 			added++;
6126 			break;
6127 #endif
6128 		default:
6129 			break;
6130 		}
6131 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6132 	}
6133 out_now:
6134 	return (added);
6135 }
6136 
6137 struct sctp_tcb *
6138 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6139     int *totaddr, int *num_v4, int *num_v6, int *error,
6140     int limit, int *bad_addr)
6141 {
6142 	struct sockaddr *sa;
6143 	struct sctp_tcb *stcb = NULL;
6144 	size_t incr, at, i;
6145 
6146 	at = incr = 0;
6147 	sa = addr;
6148 
6149 	*error = *num_v6 = *num_v4 = 0;
6150 	/* account and validate addresses */
6151 	for (i = 0; i < (size_t)*totaddr; i++) {
6152 		switch (sa->sa_family) {
6153 #ifdef INET
6154 		case AF_INET:
6155 			(*num_v4) += 1;
6156 			incr = sizeof(struct sockaddr_in);
6157 			if (sa->sa_len != incr) {
6158 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6159 				*error = EINVAL;
6160 				*bad_addr = 1;
6161 				return (NULL);
6162 			}
6163 			break;
6164 #endif
6165 #ifdef INET6
6166 		case AF_INET6:
6167 			{
6168 				struct sockaddr_in6 *sin6;
6169 
6170 				sin6 = (struct sockaddr_in6 *)sa;
6171 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6172 					/* Must be non-mapped for connectx */
6173 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6174 					*error = EINVAL;
6175 					*bad_addr = 1;
6176 					return (NULL);
6177 				}
6178 				(*num_v6) += 1;
6179 				incr = sizeof(struct sockaddr_in6);
6180 				if (sa->sa_len != incr) {
6181 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6182 					*error = EINVAL;
6183 					*bad_addr = 1;
6184 					return (NULL);
6185 				}
6186 				break;
6187 			}
6188 #endif
6189 		default:
6190 			*totaddr = i;
6191 			/* we are done */
6192 			break;
6193 		}
6194 		if (i == (size_t)*totaddr) {
6195 			break;
6196 		}
6197 		SCTP_INP_INCR_REF(inp);
6198 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6199 		if (stcb != NULL) {
6200 			/* Already have or am bring up an association */
6201 			return (stcb);
6202 		} else {
6203 			SCTP_INP_DECR_REF(inp);
6204 		}
6205 		if ((at + incr) > (size_t)limit) {
6206 			*totaddr = i;
6207 			break;
6208 		}
6209 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6210 	}
6211 	return ((struct sctp_tcb *)NULL);
6212 }
6213 
6214 /*
6215  * sctp_bindx(ADD) for one address.
6216  * assumes all arguments are valid/checked by caller.
6217  */
6218 void
6219 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6220     struct sockaddr *sa, sctp_assoc_t assoc_id,
6221     uint32_t vrf_id, int *error, void *p)
6222 {
6223 	struct sockaddr *addr_touse;
6224 
6225 #ifdef INET6
6226 	struct sockaddr_in sin;
6227 
6228 #endif
6229 
6230 	/* see if we're bound all already! */
6231 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6232 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6233 		*error = EINVAL;
6234 		return;
6235 	}
6236 	addr_touse = sa;
6237 #ifdef INET6
6238 	if (sa->sa_family == AF_INET6) {
6239 		struct sockaddr_in6 *sin6;
6240 
6241 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6242 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6243 			*error = EINVAL;
6244 			return;
6245 		}
6246 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6247 			/* can only bind v6 on PF_INET6 sockets */
6248 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6249 			*error = EINVAL;
6250 			return;
6251 		}
6252 		sin6 = (struct sockaddr_in6 *)addr_touse;
6253 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6254 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6255 			    SCTP_IPV6_V6ONLY(inp)) {
6256 				/* can't bind v4-mapped on PF_INET sockets */
6257 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6258 				*error = EINVAL;
6259 				return;
6260 			}
6261 			in6_sin6_2_sin(&sin, sin6);
6262 			addr_touse = (struct sockaddr *)&sin;
6263 		}
6264 	}
6265 #endif
6266 #ifdef INET
6267 	if (sa->sa_family == AF_INET) {
6268 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6269 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6270 			*error = EINVAL;
6271 			return;
6272 		}
6273 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6274 		    SCTP_IPV6_V6ONLY(inp)) {
6275 			/* can't bind v4 on PF_INET sockets */
6276 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6277 			*error = EINVAL;
6278 			return;
6279 		}
6280 	}
6281 #endif
6282 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6283 		if (p == NULL) {
6284 			/* Can't get proc for Net/Open BSD */
6285 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6286 			*error = EINVAL;
6287 			return;
6288 		}
6289 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6290 		return;
6291 	}
6292 	/*
6293 	 * No locks required here since bind and mgmt_ep_sa all do their own
6294 	 * locking. If we do something for the FIX: below we may need to
6295 	 * lock in that case.
6296 	 */
6297 	if (assoc_id == 0) {
6298 		/* add the address */
6299 		struct sctp_inpcb *lep;
6300 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6301 
6302 		/* validate the incoming port */
6303 		if ((lsin->sin_port != 0) &&
6304 		    (lsin->sin_port != inp->sctp_lport)) {
6305 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6306 			*error = EINVAL;
6307 			return;
6308 		} else {
6309 			/* user specified 0 port, set it to existing port */
6310 			lsin->sin_port = inp->sctp_lport;
6311 		}
6312 
6313 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6314 		if (lep != NULL) {
6315 			/*
6316 			 * We must decrement the refcount since we have the
6317 			 * ep already and are binding. No remove going on
6318 			 * here.
6319 			 */
6320 			SCTP_INP_DECR_REF(lep);
6321 		}
6322 		if (lep == inp) {
6323 			/* already bound to it.. ok */
6324 			return;
6325 		} else if (lep == NULL) {
6326 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6327 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6328 			    SCTP_ADD_IP_ADDRESS,
6329 			    vrf_id, NULL);
6330 		} else {
6331 			*error = EADDRINUSE;
6332 		}
6333 		if (*error)
6334 			return;
6335 	} else {
6336 		/*
6337 		 * FIX: decide whether we allow assoc based bindx
6338 		 */
6339 	}
6340 }
6341 
6342 /*
6343  * sctp_bindx(DELETE) for one address.
6344  * assumes all arguments are valid/checked by caller.
6345  */
6346 void
6347 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6348     struct sockaddr *sa, sctp_assoc_t assoc_id,
6349     uint32_t vrf_id, int *error)
6350 {
6351 	struct sockaddr *addr_touse;
6352 
6353 #ifdef INET6
6354 	struct sockaddr_in sin;
6355 
6356 #endif
6357 
6358 	/* see if we're bound all already! */
6359 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6360 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6361 		*error = EINVAL;
6362 		return;
6363 	}
6364 	addr_touse = sa;
6365 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6366 	if (sa->sa_family == AF_INET6) {
6367 		struct sockaddr_in6 *sin6;
6368 
6369 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6370 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6371 			*error = EINVAL;
6372 			return;
6373 		}
6374 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6375 			/* can only bind v6 on PF_INET6 sockets */
6376 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6377 			*error = EINVAL;
6378 			return;
6379 		}
6380 		sin6 = (struct sockaddr_in6 *)addr_touse;
6381 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6382 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6383 			    SCTP_IPV6_V6ONLY(inp)) {
6384 				/* can't bind mapped-v4 on PF_INET sockets */
6385 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6386 				*error = EINVAL;
6387 				return;
6388 			}
6389 			in6_sin6_2_sin(&sin, sin6);
6390 			addr_touse = (struct sockaddr *)&sin;
6391 		}
6392 	}
6393 #endif
6394 #ifdef INET
6395 	if (sa->sa_family == AF_INET) {
6396 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6397 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6398 			*error = EINVAL;
6399 			return;
6400 		}
6401 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6402 		    SCTP_IPV6_V6ONLY(inp)) {
6403 			/* can't bind v4 on PF_INET sockets */
6404 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6405 			*error = EINVAL;
6406 			return;
6407 		}
6408 	}
6409 #endif
6410 	/*
6411 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6412 	 * below is ever changed we may need to lock before calling
6413 	 * association level binding.
6414 	 */
6415 	if (assoc_id == 0) {
6416 		/* delete the address */
6417 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6418 		    SCTP_DEL_IP_ADDRESS,
6419 		    vrf_id, NULL);
6420 	} else {
6421 		/*
6422 		 * FIX: decide whether we allow assoc based bindx
6423 		 */
6424 	}
6425 }
6426 
6427 /*
6428  * returns the valid local address count for an assoc, taking into account
6429  * all scoping rules
6430  */
6431 int
6432 sctp_local_addr_count(struct sctp_tcb *stcb)
6433 {
6434 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6435 	int ipv4_addr_legal, ipv6_addr_legal;
6436 	struct sctp_vrf *vrf;
6437 	struct sctp_ifn *sctp_ifn;
6438 	struct sctp_ifa *sctp_ifa;
6439 	int count = 0;
6440 
6441 	/* Turn on all the appropriate scopes */
6442 	loopback_scope = stcb->asoc.loopback_scope;
6443 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6444 	local_scope = stcb->asoc.local_scope;
6445 	site_scope = stcb->asoc.site_scope;
6446 	ipv4_addr_legal = ipv6_addr_legal = 0;
6447 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6448 		ipv6_addr_legal = 1;
6449 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6450 			ipv4_addr_legal = 1;
6451 		}
6452 	} else {
6453 		ipv4_addr_legal = 1;
6454 	}
6455 
6456 	SCTP_IPI_ADDR_RLOCK();
6457 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6458 	if (vrf == NULL) {
6459 		/* no vrf, no addresses */
6460 		SCTP_IPI_ADDR_RUNLOCK();
6461 		return (0);
6462 	}
6463 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6464 		/*
6465 		 * bound all case: go through all ifns on the vrf
6466 		 */
6467 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6468 			if ((loopback_scope == 0) &&
6469 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6470 				continue;
6471 			}
6472 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6473 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6474 					continue;
6475 				switch (sctp_ifa->address.sa.sa_family) {
6476 #ifdef INET
6477 				case AF_INET:
6478 					if (ipv4_addr_legal) {
6479 						struct sockaddr_in *sin;
6480 
6481 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6482 						if (sin->sin_addr.s_addr == 0) {
6483 							/*
6484 							 * skip unspecified
6485 							 * addrs
6486 							 */
6487 							continue;
6488 						}
6489 						if ((ipv4_local_scope == 0) &&
6490 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6491 							continue;
6492 						}
6493 						/* count this one */
6494 						count++;
6495 					} else {
6496 						continue;
6497 					}
6498 					break;
6499 #endif
6500 #ifdef INET6
6501 				case AF_INET6:
6502 					if (ipv6_addr_legal) {
6503 						struct sockaddr_in6 *sin6;
6504 
6505 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6506 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6507 							continue;
6508 						}
6509 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6510 							if (local_scope == 0)
6511 								continue;
6512 							if (sin6->sin6_scope_id == 0) {
6513 								if (sa6_recoverscope(sin6) != 0)
6514 									/*
6515 									 *
6516 									 * bad
6517 									 *
6518 									 * li
6519 									 * nk
6520 									 *
6521 									 * loc
6522 									 * al
6523 									 *
6524 									 * add
6525 									 * re
6526 									 * ss
6527 									 * */
6528 									continue;
6529 							}
6530 						}
6531 						if ((site_scope == 0) &&
6532 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6533 							continue;
6534 						}
6535 						/* count this one */
6536 						count++;
6537 					}
6538 					break;
6539 #endif
6540 				default:
6541 					/* TSNH */
6542 					break;
6543 				}
6544 			}
6545 		}
6546 	} else {
6547 		/*
6548 		 * subset bound case
6549 		 */
6550 		struct sctp_laddr *laddr;
6551 
6552 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6553 		    sctp_nxt_addr) {
6554 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6555 				continue;
6556 			}
6557 			/* count this one */
6558 			count++;
6559 		}
6560 	}
6561 	SCTP_IPI_ADDR_RUNLOCK();
6562 	return (count);
6563 }
6564 
6565 #if defined(SCTP_LOCAL_TRACE_BUF)
6566 
6567 void
6568 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6569 {
6570 	uint32_t saveindex, newindex;
6571 
6572 	do {
6573 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6574 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6575 			newindex = 1;
6576 		} else {
6577 			newindex = saveindex + 1;
6578 		}
6579 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6580 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6581 		saveindex = 0;
6582 	}
6583 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6584 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6585 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6586 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6587 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6588 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6589 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6590 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6591 }
6592 
6593 #endif
6594 /* XXX: Remove the #ifdef after tunneling over IPv6 works also on FreeBSD. */
6595 #ifdef INET
6596 /* We will need to add support
6597  * to bind the ports and such here
6598  * so we can do UDP tunneling. In
6599  * the mean-time, we return error
6600  */
6601 #include <netinet/udp.h>
6602 #include <netinet/udp_var.h>
6603 #include <sys/proc.h>
6604 #ifdef INET6
6605 #include <netinet6/sctp6_var.h>
6606 #endif
6607 
6608 static void
6609 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6610 {
6611 	struct ip *iph;
6612 	struct mbuf *sp, *last;
6613 	struct udphdr *uhdr;
6614 	uint16_t port = 0;
6615 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6616 
6617 	/*
6618 	 * Split out the mbuf chain. Leave the IP header in m, place the
6619 	 * rest in the sp.
6620 	 */
6621 	if ((m->m_flags & M_PKTHDR) == 0) {
6622 		/* Can't handle one that is not a pkt hdr */
6623 		goto out;
6624 	}
6625 	/* pull the src port */
6626 	iph = mtod(m, struct ip *);
6627 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6628 
6629 	port = uhdr->uh_sport;
6630 	sp = m_split(m, off, M_DONTWAIT);
6631 	if (sp == NULL) {
6632 		/* Gak, drop packet, we can't do a split */
6633 		goto out;
6634 	}
6635 	if (sp->m_pkthdr.len < header_size) {
6636 		/* Gak, packet can't have an SCTP header in it - to small */
6637 		m_freem(sp);
6638 		goto out;
6639 	}
6640 	/* ok now pull up the UDP header and SCTP header together */
6641 	sp = m_pullup(sp, header_size);
6642 	if (sp == NULL) {
6643 		/* Gak pullup failed */
6644 		goto out;
6645 	}
6646 	/* trim out the UDP header */
6647 	m_adj(sp, sizeof(struct udphdr));
6648 
6649 	/* Now reconstruct the mbuf chain */
6650 	/* 1) find last one */
6651 	last = m;
6652 	while (last->m_next != NULL) {
6653 		last = last->m_next;
6654 	}
6655 	last->m_next = sp;
6656 	m->m_pkthdr.len += sp->m_pkthdr.len;
6657 	last = m;
6658 	while (last != NULL) {
6659 		last = last->m_next;
6660 	}
6661 	/* Now its ready for sctp_input or sctp6_input */
6662 	iph = mtod(m, struct ip *);
6663 	switch (iph->ip_v) {
6664 #ifdef INET
6665 	case IPVERSION:
6666 		{
6667 			uint16_t len;
6668 
6669 			/* its IPv4 */
6670 			len = SCTP_GET_IPV4_LENGTH(iph);
6671 			len -= sizeof(struct udphdr);
6672 			SCTP_GET_IPV4_LENGTH(iph) = len;
6673 			sctp_input_with_port(m, off, port);
6674 			break;
6675 		}
6676 #endif
6677 #ifdef INET6
6678 	case IPV6_VERSION >> 4:
6679 		{
6680 			/* its IPv6 - NOT supported */
6681 			goto out;
6682 			break;
6683 
6684 		}
6685 #endif
6686 	default:
6687 		{
6688 			m_freem(m);
6689 			break;
6690 		}
6691 	}
6692 	return;
6693 out:
6694 	m_freem(m);
6695 }
6696 
6697 void
6698 sctp_over_udp_stop(void)
6699 {
6700 	struct socket *sop;
6701 
6702 	/*
6703 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6704 	 * for writting!
6705 	 */
6706 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6707 		/* Nothing to do */
6708 		return;
6709 	}
6710 	sop = SCTP_BASE_INFO(udp_tun_socket);
6711 	soclose(sop);
6712 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6713 }
6714 
6715 int
6716 sctp_over_udp_start(void)
6717 {
6718 	uint16_t port;
6719 	int ret;
6720 	struct sockaddr_in sin;
6721 	struct socket *sop = NULL;
6722 	struct thread *th;
6723 	struct ucred *cred;
6724 
6725 	/*
6726 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6727 	 * for writting!
6728 	 */
6729 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6730 	if (port == 0) {
6731 		/* Must have a port set */
6732 		return (EINVAL);
6733 	}
6734 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6735 		/* Already running -- must stop first */
6736 		return (EALREADY);
6737 	}
6738 	th = curthread;
6739 	cred = th->td_ucred;
6740 	if ((ret = socreate(PF_INET, &sop,
6741 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6742 		return (ret);
6743 	}
6744 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6745 	/* call the special UDP hook */
6746 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6747 	if (ret) {
6748 		goto exit_stage_left;
6749 	}
6750 	/* Ok we have a socket, bind it to the port */
6751 	memset(&sin, 0, sizeof(sin));
6752 	sin.sin_len = sizeof(sin);
6753 	sin.sin_family = AF_INET;
6754 	sin.sin_port = htons(port);
6755 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6756 	if (ret) {
6757 		/* Close up we cant get the port */
6758 exit_stage_left:
6759 		sctp_over_udp_stop();
6760 		return (ret);
6761 	}
6762 	/*
6763 	 * Ok we should now get UDP packets directly to our input routine
6764 	 * sctp_recv_upd_tunneled_packet().
6765 	 */
6766 	return (0);
6767 }
6768 
6769 #endif
6770