xref: /freebsd/sys/netinet/sctputil.c (revision aaf04b7cb637af9400e6b9ae1bd531ef828a7c82)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #endif
45 #include <netinet/sctp_header.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_bsd_addr.h>
53 
54 
55 #ifndef KTR_SCTP
56 #define KTR_SCTP KTR_SUBSYS
57 #endif
58 
59 extern struct sctp_cc_functions sctp_cc_functions[];
60 extern struct sctp_ss_functions sctp_ss_functions[];
61 
62 void
63 sctp_sblog(struct sockbuf *sb,
64     struct sctp_tcb *stcb, int from, int incr)
65 {
66 	struct sctp_cwnd_log sctp_clog;
67 
68 	sctp_clog.x.sb.stcb = stcb;
69 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
70 	if (stcb)
71 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
72 	else
73 		sctp_clog.x.sb.stcb_sbcc = 0;
74 	sctp_clog.x.sb.incr = incr;
75 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
76 	    SCTP_LOG_EVENT_SB,
77 	    from,
78 	    sctp_clog.x.misc.log1,
79 	    sctp_clog.x.misc.log2,
80 	    sctp_clog.x.misc.log3,
81 	    sctp_clog.x.misc.log4);
82 }
83 
84 void
85 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
86 {
87 	struct sctp_cwnd_log sctp_clog;
88 
89 	sctp_clog.x.close.inp = (void *)inp;
90 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
91 	if (stcb) {
92 		sctp_clog.x.close.stcb = (void *)stcb;
93 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
94 	} else {
95 		sctp_clog.x.close.stcb = 0;
96 		sctp_clog.x.close.state = 0;
97 	}
98 	sctp_clog.x.close.loc = loc;
99 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
100 	    SCTP_LOG_EVENT_CLOSE,
101 	    0,
102 	    sctp_clog.x.misc.log1,
103 	    sctp_clog.x.misc.log2,
104 	    sctp_clog.x.misc.log3,
105 	    sctp_clog.x.misc.log4);
106 }
107 
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
204     int from)
205 {
206 	struct sctp_cwnd_log sctp_clog;
207 
208 	memset(&sctp_clog, 0, sizeof(sctp_clog));
209 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
210 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
211 	sctp_clog.x.fr.tsn = tsn;
212 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
213 	    SCTP_LOG_EVENT_FR,
214 	    from,
215 	    sctp_clog.x.misc.log1,
216 	    sctp_clog.x.misc.log2,
217 	    sctp_clog.x.misc.log3,
218 	    sctp_clog.x.misc.log4);
219 }
220 
221 void
222 sctp_log_mb(struct mbuf *m, int from)
223 {
224 	struct sctp_cwnd_log sctp_clog;
225 
226 	sctp_clog.x.mb.mp = m;
227 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog.x.mb.ext = 0;
235 		sctp_clog.x.mb.refcnt = 0;
236 	}
237 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238 	    SCTP_LOG_EVENT_MBUF,
239 	    from,
240 	    sctp_clog.x.misc.log1,
241 	    sctp_clog.x.misc.log2,
242 	    sctp_clog.x.misc.log3,
243 	    sctp_clog.x.misc.log4);
244 }
245 
246 void
247 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
248     int from)
249 {
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	if (control == NULL) {
253 		SCTP_PRINTF("Gak log of NULL?\n");
254 		return;
255 	}
256 	sctp_clog.x.strlog.stcb = control->stcb;
257 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
258 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
259 	sctp_clog.x.strlog.strm = control->sinfo_stream;
260 	if (poschk != NULL) {
261 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
262 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
263 	} else {
264 		sctp_clog.x.strlog.e_tsn = 0;
265 		sctp_clog.x.strlog.e_sseq = 0;
266 	}
267 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
268 	    SCTP_LOG_EVENT_STRM,
269 	    from,
270 	    sctp_clog.x.misc.log1,
271 	    sctp_clog.x.misc.log2,
272 	    sctp_clog.x.misc.log3,
273 	    sctp_clog.x.misc.log4);
274 }
275 
276 void
277 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
278 {
279 	struct sctp_cwnd_log sctp_clog;
280 
281 	sctp_clog.x.cwnd.net = net;
282 	if (stcb->asoc.send_queue_cnt > 255)
283 		sctp_clog.x.cwnd.cnt_in_send = 255;
284 	else
285 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
286 	if (stcb->asoc.stream_queue_cnt > 255)
287 		sctp_clog.x.cwnd.cnt_in_str = 255;
288 	else
289 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
290 
291 	if (net) {
292 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
293 		sctp_clog.x.cwnd.inflight = net->flight_size;
294 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
295 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
296 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
297 	}
298 	if (SCTP_CWNDLOG_PRESEND == from) {
299 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
300 	}
301 	sctp_clog.x.cwnd.cwnd_augment = augment;
302 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
303 	    SCTP_LOG_EVENT_CWND,
304 	    from,
305 	    sctp_clog.x.misc.log1,
306 	    sctp_clog.x.misc.log2,
307 	    sctp_clog.x.misc.log3,
308 	    sctp_clog.x.misc.log4);
309 }
310 
311 void
312 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
313 {
314 	struct sctp_cwnd_log sctp_clog;
315 
316 	memset(&sctp_clog, 0, sizeof(sctp_clog));
317 	if (inp) {
318 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
319 
320 	} else {
321 		sctp_clog.x.lock.sock = (void *)NULL;
322 	}
323 	sctp_clog.x.lock.inp = (void *)inp;
324 	if (stcb) {
325 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
326 	} else {
327 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
328 	}
329 	if (inp) {
330 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
331 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
332 	} else {
333 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
334 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
335 	}
336 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
337 	if (inp && (inp->sctp_socket)) {
338 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
339 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
340 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
341 	} else {
342 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
343 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
344 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
345 	}
346 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
347 	    SCTP_LOG_LOCK_EVENT,
348 	    from,
349 	    sctp_clog.x.misc.log1,
350 	    sctp_clog.x.misc.log2,
351 	    sctp_clog.x.misc.log3,
352 	    sctp_clog.x.misc.log4);
353 }
354 
355 void
356 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
357 {
358 	struct sctp_cwnd_log sctp_clog;
359 
360 	memset(&sctp_clog, 0, sizeof(sctp_clog));
361 	sctp_clog.x.cwnd.net = net;
362 	sctp_clog.x.cwnd.cwnd_new_value = error;
363 	sctp_clog.x.cwnd.inflight = net->flight_size;
364 	sctp_clog.x.cwnd.cwnd_augment = burst;
365 	if (stcb->asoc.send_queue_cnt > 255)
366 		sctp_clog.x.cwnd.cnt_in_send = 255;
367 	else
368 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
369 	if (stcb->asoc.stream_queue_cnt > 255)
370 		sctp_clog.x.cwnd.cnt_in_str = 255;
371 	else
372 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
373 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
374 	    SCTP_LOG_EVENT_MAXBURST,
375 	    from,
376 	    sctp_clog.x.misc.log1,
377 	    sctp_clog.x.misc.log2,
378 	    sctp_clog.x.misc.log3,
379 	    sctp_clog.x.misc.log4);
380 }
381 
382 void
383 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
384 {
385 	struct sctp_cwnd_log sctp_clog;
386 
387 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
388 	sctp_clog.x.rwnd.send_size = snd_size;
389 	sctp_clog.x.rwnd.overhead = overhead;
390 	sctp_clog.x.rwnd.new_rwnd = 0;
391 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
392 	    SCTP_LOG_EVENT_RWND,
393 	    from,
394 	    sctp_clog.x.misc.log1,
395 	    sctp_clog.x.misc.log2,
396 	    sctp_clog.x.misc.log3,
397 	    sctp_clog.x.misc.log4);
398 }
399 
400 void
401 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
402 {
403 	struct sctp_cwnd_log sctp_clog;
404 
405 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
406 	sctp_clog.x.rwnd.send_size = flight_size;
407 	sctp_clog.x.rwnd.overhead = overhead;
408 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
409 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
410 	    SCTP_LOG_EVENT_RWND,
411 	    from,
412 	    sctp_clog.x.misc.log1,
413 	    sctp_clog.x.misc.log2,
414 	    sctp_clog.x.misc.log3,
415 	    sctp_clog.x.misc.log4);
416 }
417 
418 void
419 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
420 {
421 	struct sctp_cwnd_log sctp_clog;
422 
423 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
424 	sctp_clog.x.mbcnt.size_change = book;
425 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
426 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
427 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
428 	    SCTP_LOG_EVENT_MBCNT,
429 	    from,
430 	    sctp_clog.x.misc.log1,
431 	    sctp_clog.x.misc.log2,
432 	    sctp_clog.x.misc.log3,
433 	    sctp_clog.x.misc.log4);
434 }
435 
436 void
437 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
438 {
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	    SCTP_LOG_MISC_EVENT,
441 	    from,
442 	    a, b, c, d);
443 }
444 
445 void
446 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
447 {
448 	struct sctp_cwnd_log sctp_clog;
449 
450 	sctp_clog.x.wake.stcb = (void *)stcb;
451 	sctp_clog.x.wake.wake_cnt = wake_cnt;
452 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
453 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
454 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
455 
456 	if (stcb->asoc.stream_queue_cnt < 0xff)
457 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
458 	else
459 		sctp_clog.x.wake.stream_qcnt = 0xff;
460 
461 	if (stcb->asoc.chunks_on_out_queue < 0xff)
462 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
463 	else
464 		sctp_clog.x.wake.chunks_on_oque = 0xff;
465 
466 	sctp_clog.x.wake.sctpflags = 0;
467 	/* set in the defered mode stuff */
468 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
469 		sctp_clog.x.wake.sctpflags |= 1;
470 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
471 		sctp_clog.x.wake.sctpflags |= 2;
472 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
473 		sctp_clog.x.wake.sctpflags |= 4;
474 	/* what about the sb */
475 	if (stcb->sctp_socket) {
476 		struct socket *so = stcb->sctp_socket;
477 
478 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
479 	} else {
480 		sctp_clog.x.wake.sbflags = 0xff;
481 	}
482 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
483 	    SCTP_LOG_EVENT_WAKE,
484 	    from,
485 	    sctp_clog.x.misc.log1,
486 	    sctp_clog.x.misc.log2,
487 	    sctp_clog.x.misc.log3,
488 	    sctp_clog.x.misc.log4);
489 }
490 
491 void
492 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
493 {
494 	struct sctp_cwnd_log sctp_clog;
495 
496 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
497 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
498 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
499 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
500 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
501 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
502 	sctp_clog.x.blk.sndlen = sendlen;
503 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
504 	    SCTP_LOG_EVENT_BLOCK,
505 	    from,
506 	    sctp_clog.x.misc.log1,
507 	    sctp_clog.x.misc.log2,
508 	    sctp_clog.x.misc.log3,
509 	    sctp_clog.x.misc.log4);
510 }
511 
512 int
513 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
514 {
515 	/* May need to fix this if ktrdump does not work */
516 	return (0);
517 }
518 
519 #ifdef SCTP_AUDITING_ENABLED
520 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
521 static int sctp_audit_indx = 0;
522 
523 static
524 void
525 sctp_print_audit_report(void)
526 {
527 	int i;
528 	int cnt;
529 
530 	cnt = 0;
531 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
532 		if ((sctp_audit_data[i][0] == 0xe0) &&
533 		    (sctp_audit_data[i][1] == 0x01)) {
534 			cnt = 0;
535 			SCTP_PRINTF("\n");
536 		} else if (sctp_audit_data[i][0] == 0xf0) {
537 			cnt = 0;
538 			SCTP_PRINTF("\n");
539 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
540 		    (sctp_audit_data[i][1] == 0x01)) {
541 			SCTP_PRINTF("\n");
542 			cnt = 0;
543 		}
544 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
545 		    (uint32_t) sctp_audit_data[i][1]);
546 		cnt++;
547 		if ((cnt % 14) == 0)
548 			SCTP_PRINTF("\n");
549 	}
550 	for (i = 0; i < sctp_audit_indx; i++) {
551 		if ((sctp_audit_data[i][0] == 0xe0) &&
552 		    (sctp_audit_data[i][1] == 0x01)) {
553 			cnt = 0;
554 			SCTP_PRINTF("\n");
555 		} else if (sctp_audit_data[i][0] == 0xf0) {
556 			cnt = 0;
557 			SCTP_PRINTF("\n");
558 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
559 		    (sctp_audit_data[i][1] == 0x01)) {
560 			SCTP_PRINTF("\n");
561 			cnt = 0;
562 		}
563 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
564 		    (uint32_t) sctp_audit_data[i][1]);
565 		cnt++;
566 		if ((cnt % 14) == 0)
567 			SCTP_PRINTF("\n");
568 	}
569 	SCTP_PRINTF("\n");
570 }
571 
572 void
573 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
574     struct sctp_nets *net)
575 {
576 	int resend_cnt, tot_out, rep, tot_book_cnt;
577 	struct sctp_nets *lnet;
578 	struct sctp_tmit_chunk *chk;
579 
580 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
581 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
582 	sctp_audit_indx++;
583 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
584 		sctp_audit_indx = 0;
585 	}
586 	if (inp == NULL) {
587 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
588 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
589 		sctp_audit_indx++;
590 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
591 			sctp_audit_indx = 0;
592 		}
593 		return;
594 	}
595 	if (stcb == NULL) {
596 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
597 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
598 		sctp_audit_indx++;
599 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
600 			sctp_audit_indx = 0;
601 		}
602 		return;
603 	}
604 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
605 	sctp_audit_data[sctp_audit_indx][1] =
606 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
607 	sctp_audit_indx++;
608 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609 		sctp_audit_indx = 0;
610 	}
611 	rep = 0;
612 	tot_book_cnt = 0;
613 	resend_cnt = tot_out = 0;
614 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
615 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
616 			resend_cnt++;
617 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
618 			tot_out += chk->book_size;
619 			tot_book_cnt++;
620 		}
621 	}
622 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
623 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
624 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
625 		sctp_audit_indx++;
626 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
627 			sctp_audit_indx = 0;
628 		}
629 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
630 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
631 		rep = 1;
632 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
633 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
634 		sctp_audit_data[sctp_audit_indx][1] =
635 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
636 		sctp_audit_indx++;
637 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
638 			sctp_audit_indx = 0;
639 		}
640 	}
641 	if (tot_out != stcb->asoc.total_flight) {
642 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
643 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
644 		sctp_audit_indx++;
645 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
646 			sctp_audit_indx = 0;
647 		}
648 		rep = 1;
649 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
650 		    (int)stcb->asoc.total_flight);
651 		stcb->asoc.total_flight = tot_out;
652 	}
653 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
654 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
655 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
656 		sctp_audit_indx++;
657 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
658 			sctp_audit_indx = 0;
659 		}
660 		rep = 1;
661 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
662 
663 		stcb->asoc.total_flight_count = tot_book_cnt;
664 	}
665 	tot_out = 0;
666 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
667 		tot_out += lnet->flight_size;
668 	}
669 	if (tot_out != stcb->asoc.total_flight) {
670 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
671 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
672 		sctp_audit_indx++;
673 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
674 			sctp_audit_indx = 0;
675 		}
676 		rep = 1;
677 		SCTP_PRINTF("real flight:%d net total was %d\n",
678 		    stcb->asoc.total_flight, tot_out);
679 		/* now corrective action */
680 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681 
682 			tot_out = 0;
683 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
684 				if ((chk->whoTo == lnet) &&
685 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
686 					tot_out += chk->book_size;
687 				}
688 			}
689 			if (lnet->flight_size != tot_out) {
690 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
691 				    lnet, lnet->flight_size,
692 				    tot_out);
693 				lnet->flight_size = tot_out;
694 			}
695 		}
696 	}
697 	if (rep) {
698 		sctp_print_audit_report();
699 	}
700 }
701 
702 void
703 sctp_audit_log(uint8_t ev, uint8_t fd)
704 {
705 
706 	sctp_audit_data[sctp_audit_indx][0] = ev;
707 	sctp_audit_data[sctp_audit_indx][1] = fd;
708 	sctp_audit_indx++;
709 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
710 		sctp_audit_indx = 0;
711 	}
712 }
713 
714 #endif
715 
716 /*
717  * sctp_stop_timers_for_shutdown() should be called
718  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
719  * state to make sure that all timers are stopped.
720  */
721 void
722 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
723 {
724 	struct sctp_association *asoc;
725 	struct sctp_nets *net;
726 
727 	asoc = &stcb->asoc;
728 
729 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
730 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
731 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
732 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
733 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
734 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
735 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
736 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
737 	}
738 }
739 
740 /*
741  * a list of sizes based on typical mtu's, used only if next hop size not
742  * returned.
743  */
744 static uint32_t sctp_mtu_sizes[] = {
745 	68,
746 	296,
747 	508,
748 	512,
749 	544,
750 	576,
751 	1006,
752 	1492,
753 	1500,
754 	1536,
755 	2002,
756 	2048,
757 	4352,
758 	4464,
759 	8166,
760 	17914,
761 	32000,
762 	65535
763 };
764 
765 /*
766  * Return the largest MTU smaller than val. If there is no
767  * entry, just return val.
768  */
769 uint32_t
770 sctp_get_prev_mtu(uint32_t val)
771 {
772 	uint32_t i;
773 
774 	if (val <= sctp_mtu_sizes[0]) {
775 		return (val);
776 	}
777 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
778 		if (val <= sctp_mtu_sizes[i]) {
779 			break;
780 		}
781 	}
782 	return (sctp_mtu_sizes[i - 1]);
783 }
784 
785 /*
786  * Return the smallest MTU larger than val. If there is no
787  * entry, just return val.
788  */
789 uint32_t
790 sctp_get_next_mtu(uint32_t val)
791 {
792 	/* select another MTU that is just bigger than this one */
793 	uint32_t i;
794 
795 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
796 		if (val < sctp_mtu_sizes[i]) {
797 			return (sctp_mtu_sizes[i]);
798 		}
799 	}
800 	return (val);
801 }
802 
803 void
804 sctp_fill_random_store(struct sctp_pcb *m)
805 {
806 	/*
807 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
808 	 * our counter. The result becomes our good random numbers and we
809 	 * then setup to give these out. Note that we do no locking to
810 	 * protect this. This is ok, since if competing folks call this we
811 	 * will get more gobbled gook in the random store which is what we
812 	 * want. There is a danger that two guys will use the same random
813 	 * numbers, but thats ok too since that is random as well :->
814 	 */
815 	m->store_at = 0;
816 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
817 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
818 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
819 	m->random_counter++;
820 }
821 
822 uint32_t
823 sctp_select_initial_TSN(struct sctp_pcb *inp)
824 {
825 	/*
826 	 * A true implementation should use random selection process to get
827 	 * the initial stream sequence number, using RFC1750 as a good
828 	 * guideline
829 	 */
830 	uint32_t x, *xp;
831 	uint8_t *p;
832 	int store_at, new_store;
833 
834 	if (inp->initial_sequence_debug != 0) {
835 		uint32_t ret;
836 
837 		ret = inp->initial_sequence_debug;
838 		inp->initial_sequence_debug++;
839 		return (ret);
840 	}
841 retry:
842 	store_at = inp->store_at;
843 	new_store = store_at + sizeof(uint32_t);
844 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
845 		new_store = 0;
846 	}
847 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
848 		goto retry;
849 	}
850 	if (new_store == 0) {
851 		/* Refill the random store */
852 		sctp_fill_random_store(inp);
853 	}
854 	p = &inp->random_store[store_at];
855 	xp = (uint32_t *) p;
856 	x = *xp;
857 	return (x);
858 }
859 
860 uint32_t
861 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
862 {
863 	uint32_t x;
864 	struct timeval now;
865 
866 	if (check) {
867 		(void)SCTP_GETTIME_TIMEVAL(&now);
868 	}
869 	for (;;) {
870 		x = sctp_select_initial_TSN(&inp->sctp_ep);
871 		if (x == 0) {
872 			/* we never use 0 */
873 			continue;
874 		}
875 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
876 			break;
877 		}
878 	}
879 	return (x);
880 }
881 
882 int
883 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
884     uint32_t override_tag, uint32_t vrf_id)
885 {
886 	struct sctp_association *asoc;
887 
888 	/*
889 	 * Anything set to zero is taken care of by the allocation routine's
890 	 * bzero
891 	 */
892 
893 	/*
894 	 * Up front select what scoping to apply on addresses I tell my peer
895 	 * Not sure what to do with these right now, we will need to come up
896 	 * with a way to set them. We may need to pass them through from the
897 	 * caller in the sctp_aloc_assoc() function.
898 	 */
899 	int i;
900 
901 	asoc = &stcb->asoc;
902 	/* init all variables to a known value. */
903 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
904 	asoc->max_burst = m->sctp_ep.max_burst;
905 	asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
906 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
907 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
908 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
909 	asoc->ecn_allowed = m->sctp_ecn_enable;
910 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
911 	asoc->sctp_cmt_pf = (uint8_t) 0;
912 	asoc->sctp_frag_point = m->sctp_frag_point;
913 	asoc->sctp_features = m->sctp_features;
914 	asoc->default_dscp = m->sctp_ep.default_dscp;
915 #ifdef INET6
916 	if (m->sctp_ep.default_flowlabel) {
917 		asoc->default_flowlabel = m->sctp_ep.default_flowlabel;
918 	} else {
919 		if (m->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
920 			asoc->default_flowlabel = sctp_select_initial_TSN(&m->sctp_ep);
921 			asoc->default_flowlabel &= 0x000fffff;
922 			asoc->default_flowlabel |= 0x80000000;
923 		} else {
924 			asoc->default_flowlabel = 0;
925 		}
926 	}
927 #endif
928 	asoc->sb_send_resv = 0;
929 	if (override_tag) {
930 		asoc->my_vtag = override_tag;
931 	} else {
932 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
933 	}
934 	/* Get the nonce tags */
935 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
936 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
937 	asoc->vrf_id = vrf_id;
938 
939 #ifdef SCTP_ASOCLOG_OF_TSNS
940 	asoc->tsn_in_at = 0;
941 	asoc->tsn_out_at = 0;
942 	asoc->tsn_in_wrapped = 0;
943 	asoc->tsn_out_wrapped = 0;
944 	asoc->cumack_log_at = 0;
945 	asoc->cumack_log_atsnt = 0;
946 #endif
947 #ifdef SCTP_FS_SPEC_LOG
948 	asoc->fs_index = 0;
949 #endif
950 	asoc->refcnt = 0;
951 	asoc->assoc_up_sent = 0;
952 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
953 	    sctp_select_initial_TSN(&m->sctp_ep);
954 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
955 	/* we are optimisitic here */
956 	asoc->peer_supports_pktdrop = 1;
957 	asoc->peer_supports_nat = 0;
958 	asoc->sent_queue_retran_cnt = 0;
959 
960 	/* for CMT */
961 	asoc->last_net_cmt_send_started = NULL;
962 
963 	/* This will need to be adjusted */
964 	asoc->last_acked_seq = asoc->init_seq_number - 1;
965 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
966 	asoc->asconf_seq_in = asoc->last_acked_seq;
967 
968 	/* here we are different, we hold the next one we expect */
969 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
970 
971 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
972 	asoc->initial_rto = m->sctp_ep.initial_rto;
973 
974 	asoc->max_init_times = m->sctp_ep.max_init_times;
975 	asoc->max_send_times = m->sctp_ep.max_send_times;
976 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
977 	asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold;
978 	asoc->free_chunk_cnt = 0;
979 
980 	asoc->iam_blocking = 0;
981 
982 	asoc->context = m->sctp_context;
983 	asoc->def_send = m->def_send;
984 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
985 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
986 	asoc->pr_sctp_cnt = 0;
987 	asoc->total_output_queue_size = 0;
988 
989 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
990 		struct in6pcb *inp6;
991 
992 		/* Its a V6 socket */
993 		inp6 = (struct in6pcb *)m;
994 		asoc->ipv6_addr_legal = 1;
995 		/* Now look at the binding flag to see if V4 will be legal */
996 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
997 			asoc->ipv4_addr_legal = 1;
998 		} else {
999 			/* V4 addresses are NOT legal on the association */
1000 			asoc->ipv4_addr_legal = 0;
1001 		}
1002 	} else {
1003 		/* Its a V4 socket, no - V6 */
1004 		asoc->ipv4_addr_legal = 1;
1005 		asoc->ipv6_addr_legal = 0;
1006 	}
1007 
1008 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1009 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1010 
1011 	asoc->smallest_mtu = m->sctp_frag_point;
1012 	asoc->minrto = m->sctp_ep.sctp_minrto;
1013 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1014 
1015 	asoc->locked_on_sending = NULL;
1016 	asoc->stream_locked_on = 0;
1017 	asoc->ecn_echo_cnt_onq = 0;
1018 	asoc->stream_locked = 0;
1019 
1020 	asoc->send_sack = 1;
1021 
1022 	LIST_INIT(&asoc->sctp_restricted_addrs);
1023 
1024 	TAILQ_INIT(&asoc->nets);
1025 	TAILQ_INIT(&asoc->pending_reply_queue);
1026 	TAILQ_INIT(&asoc->asconf_ack_sent);
1027 	/* Setup to fill the hb random cache at first HB */
1028 	asoc->hb_random_idx = 4;
1029 
1030 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1031 
1032 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1033 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1034 
1035 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1036 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1037 
1038 	/*
1039 	 * Now the stream parameters, here we allocate space for all streams
1040 	 * that we request by default.
1041 	 */
1042 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1043 	    m->sctp_ep.pre_open_stream_count;
1044 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1045 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1046 	    SCTP_M_STRMO);
1047 	if (asoc->strmout == NULL) {
1048 		/* big trouble no memory */
1049 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1050 		return (ENOMEM);
1051 	}
1052 	for (i = 0; i < asoc->streamoutcnt; i++) {
1053 		/*
1054 		 * inbound side must be set to 0xffff, also NOTE when we get
1055 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1056 		 * count (streamoutcnt) but first check if we sent to any of
1057 		 * the upper streams that were dropped (if some were). Those
1058 		 * that were dropped must be notified to the upper layer as
1059 		 * failed to send.
1060 		 */
1061 		asoc->strmout[i].next_sequence_sent = 0x0;
1062 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1063 		asoc->strmout[i].stream_no = i;
1064 		asoc->strmout[i].last_msg_incomplete = 0;
1065 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1066 	}
1067 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1068 
1069 	/* Now the mapping array */
1070 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1071 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1072 	    SCTP_M_MAP);
1073 	if (asoc->mapping_array == NULL) {
1074 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1075 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1076 		return (ENOMEM);
1077 	}
1078 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1079 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1080 	    SCTP_M_MAP);
1081 	if (asoc->nr_mapping_array == NULL) {
1082 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1083 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1084 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1085 		return (ENOMEM);
1086 	}
1087 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1088 
1089 	/* Now the init of the other outqueues */
1090 	TAILQ_INIT(&asoc->free_chunks);
1091 	TAILQ_INIT(&asoc->control_send_queue);
1092 	TAILQ_INIT(&asoc->asconf_send_queue);
1093 	TAILQ_INIT(&asoc->send_queue);
1094 	TAILQ_INIT(&asoc->sent_queue);
1095 	TAILQ_INIT(&asoc->reasmqueue);
1096 	TAILQ_INIT(&asoc->resetHead);
1097 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1098 	TAILQ_INIT(&asoc->asconf_queue);
1099 	/* authentication fields */
1100 	asoc->authinfo.random = NULL;
1101 	asoc->authinfo.active_keyid = 0;
1102 	asoc->authinfo.assoc_key = NULL;
1103 	asoc->authinfo.assoc_keyid = 0;
1104 	asoc->authinfo.recv_key = NULL;
1105 	asoc->authinfo.recv_keyid = 0;
1106 	LIST_INIT(&asoc->shared_keys);
1107 	asoc->marked_retrans = 0;
1108 	asoc->port = m->sctp_ep.port;
1109 	asoc->timoinit = 0;
1110 	asoc->timodata = 0;
1111 	asoc->timosack = 0;
1112 	asoc->timoshutdown = 0;
1113 	asoc->timoheartbeat = 0;
1114 	asoc->timocookie = 0;
1115 	asoc->timoshutdownack = 0;
1116 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1117 	asoc->discontinuity_time = asoc->start_time;
1118 	/*
1119 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1120 	 * freed later when the association is freed.
1121 	 */
1122 	return (0);
1123 }
1124 
1125 void
1126 sctp_print_mapping_array(struct sctp_association *asoc)
1127 {
1128 	unsigned int i, limit;
1129 
1130 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1131 	    asoc->mapping_array_size,
1132 	    asoc->mapping_array_base_tsn,
1133 	    asoc->cumulative_tsn,
1134 	    asoc->highest_tsn_inside_map,
1135 	    asoc->highest_tsn_inside_nr_map);
1136 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1137 		if (asoc->mapping_array[limit - 1] != 0) {
1138 			break;
1139 		}
1140 	}
1141 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1142 	for (i = 0; i < limit; i++) {
1143 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1144 	}
1145 	if (limit % 16)
1146 		printf("\n");
1147 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1148 		if (asoc->nr_mapping_array[limit - 1]) {
1149 			break;
1150 		}
1151 	}
1152 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1153 	for (i = 0; i < limit; i++) {
1154 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1155 	}
1156 	if (limit % 16)
1157 		printf("\n");
1158 }
1159 
1160 int
1161 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1162 {
1163 	/* mapping array needs to grow */
1164 	uint8_t *new_array1, *new_array2;
1165 	uint32_t new_size;
1166 
1167 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1168 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1169 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1170 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1171 		/* can't get more, forget it */
1172 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1173 		if (new_array1) {
1174 			SCTP_FREE(new_array1, SCTP_M_MAP);
1175 		}
1176 		if (new_array2) {
1177 			SCTP_FREE(new_array2, SCTP_M_MAP);
1178 		}
1179 		return (-1);
1180 	}
1181 	memset(new_array1, 0, new_size);
1182 	memset(new_array2, 0, new_size);
1183 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1184 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1185 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1186 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1187 	asoc->mapping_array = new_array1;
1188 	asoc->nr_mapping_array = new_array2;
1189 	asoc->mapping_array_size = new_size;
1190 	return (0);
1191 }
1192 
1193 
1194 static void
1195 sctp_iterator_work(struct sctp_iterator *it)
1196 {
1197 	int iteration_count = 0;
1198 	int inp_skip = 0;
1199 	int first_in = 1;
1200 	struct sctp_inpcb *tinp;
1201 
1202 	SCTP_INP_INFO_RLOCK();
1203 	SCTP_ITERATOR_LOCK();
1204 	if (it->inp) {
1205 		SCTP_INP_RLOCK(it->inp);
1206 		SCTP_INP_DECR_REF(it->inp);
1207 	}
1208 	if (it->inp == NULL) {
1209 		/* iterator is complete */
1210 done_with_iterator:
1211 		SCTP_ITERATOR_UNLOCK();
1212 		SCTP_INP_INFO_RUNLOCK();
1213 		if (it->function_atend != NULL) {
1214 			(*it->function_atend) (it->pointer, it->val);
1215 		}
1216 		SCTP_FREE(it, SCTP_M_ITER);
1217 		return;
1218 	}
1219 select_a_new_ep:
1220 	if (first_in) {
1221 		first_in = 0;
1222 	} else {
1223 		SCTP_INP_RLOCK(it->inp);
1224 	}
1225 	while (((it->pcb_flags) &&
1226 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1227 	    ((it->pcb_features) &&
1228 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1229 		/* endpoint flags or features don't match, so keep looking */
1230 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1231 			SCTP_INP_RUNLOCK(it->inp);
1232 			goto done_with_iterator;
1233 		}
1234 		tinp = it->inp;
1235 		it->inp = LIST_NEXT(it->inp, sctp_list);
1236 		SCTP_INP_RUNLOCK(tinp);
1237 		if (it->inp == NULL) {
1238 			goto done_with_iterator;
1239 		}
1240 		SCTP_INP_RLOCK(it->inp);
1241 	}
1242 	/* now go through each assoc which is in the desired state */
1243 	if (it->done_current_ep == 0) {
1244 		if (it->function_inp != NULL)
1245 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1246 		it->done_current_ep = 1;
1247 	}
1248 	if (it->stcb == NULL) {
1249 		/* run the per instance function */
1250 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1251 	}
1252 	if ((inp_skip) || it->stcb == NULL) {
1253 		if (it->function_inp_end != NULL) {
1254 			inp_skip = (*it->function_inp_end) (it->inp,
1255 			    it->pointer,
1256 			    it->val);
1257 		}
1258 		SCTP_INP_RUNLOCK(it->inp);
1259 		goto no_stcb;
1260 	}
1261 	while (it->stcb) {
1262 		SCTP_TCB_LOCK(it->stcb);
1263 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1264 			/* not in the right state... keep looking */
1265 			SCTP_TCB_UNLOCK(it->stcb);
1266 			goto next_assoc;
1267 		}
1268 		/* see if we have limited out the iterator loop */
1269 		iteration_count++;
1270 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1271 			/* Pause to let others grab the lock */
1272 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1273 			SCTP_TCB_UNLOCK(it->stcb);
1274 			SCTP_INP_INCR_REF(it->inp);
1275 			SCTP_INP_RUNLOCK(it->inp);
1276 			SCTP_ITERATOR_UNLOCK();
1277 			SCTP_INP_INFO_RUNLOCK();
1278 			SCTP_INP_INFO_RLOCK();
1279 			SCTP_ITERATOR_LOCK();
1280 			if (sctp_it_ctl.iterator_flags) {
1281 				/* We won't be staying here */
1282 				SCTP_INP_DECR_REF(it->inp);
1283 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1284 				if (sctp_it_ctl.iterator_flags &
1285 				    SCTP_ITERATOR_STOP_CUR_IT) {
1286 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1287 					goto done_with_iterator;
1288 				}
1289 				if (sctp_it_ctl.iterator_flags &
1290 				    SCTP_ITERATOR_STOP_CUR_INP) {
1291 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1292 					goto no_stcb;
1293 				}
1294 				/* If we reach here huh? */
1295 				printf("Unknown it ctl flag %x\n",
1296 				    sctp_it_ctl.iterator_flags);
1297 				sctp_it_ctl.iterator_flags = 0;
1298 			}
1299 			SCTP_INP_RLOCK(it->inp);
1300 			SCTP_INP_DECR_REF(it->inp);
1301 			SCTP_TCB_LOCK(it->stcb);
1302 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1303 			iteration_count = 0;
1304 		}
1305 		/* run function on this one */
1306 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1307 
1308 		/*
1309 		 * we lie here, it really needs to have its own type but
1310 		 * first I must verify that this won't effect things :-0
1311 		 */
1312 		if (it->no_chunk_output == 0)
1313 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1314 
1315 		SCTP_TCB_UNLOCK(it->stcb);
1316 next_assoc:
1317 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1318 		if (it->stcb == NULL) {
1319 			/* Run last function */
1320 			if (it->function_inp_end != NULL) {
1321 				inp_skip = (*it->function_inp_end) (it->inp,
1322 				    it->pointer,
1323 				    it->val);
1324 			}
1325 		}
1326 	}
1327 	SCTP_INP_RUNLOCK(it->inp);
1328 no_stcb:
1329 	/* done with all assocs on this endpoint, move on to next endpoint */
1330 	it->done_current_ep = 0;
1331 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1332 		it->inp = NULL;
1333 	} else {
1334 		it->inp = LIST_NEXT(it->inp, sctp_list);
1335 	}
1336 	if (it->inp == NULL) {
1337 		goto done_with_iterator;
1338 	}
1339 	goto select_a_new_ep;
1340 }
1341 
1342 void
1343 sctp_iterator_worker(void)
1344 {
1345 	struct sctp_iterator *it, *nit;
1346 
1347 	/* This function is called with the WQ lock in place */
1348 
1349 	sctp_it_ctl.iterator_running = 1;
1350 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1351 		sctp_it_ctl.cur_it = it;
1352 		/* now lets work on this one */
1353 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1354 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1355 		CURVNET_SET(it->vn);
1356 		sctp_iterator_work(it);
1357 		sctp_it_ctl.cur_it = NULL;
1358 		CURVNET_RESTORE();
1359 		SCTP_IPI_ITERATOR_WQ_LOCK();
1360 		/* sa_ignore FREED_MEMORY */
1361 	}
1362 	sctp_it_ctl.iterator_running = 0;
1363 	return;
1364 }
1365 
1366 
1367 static void
1368 sctp_handle_addr_wq(void)
1369 {
1370 	/* deal with the ADDR wq from the rtsock calls */
1371 	struct sctp_laddr *wi, *nwi;
1372 	struct sctp_asconf_iterator *asc;
1373 
1374 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1375 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1376 	if (asc == NULL) {
1377 		/* Try later, no memory */
1378 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1379 		    (struct sctp_inpcb *)NULL,
1380 		    (struct sctp_tcb *)NULL,
1381 		    (struct sctp_nets *)NULL);
1382 		return;
1383 	}
1384 	LIST_INIT(&asc->list_of_work);
1385 	asc->cnt = 0;
1386 
1387 	SCTP_WQ_ADDR_LOCK();
1388 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1389 		LIST_REMOVE(wi, sctp_nxt_addr);
1390 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1391 		asc->cnt++;
1392 	}
1393 	SCTP_WQ_ADDR_UNLOCK();
1394 
1395 	if (asc->cnt == 0) {
1396 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1397 	} else {
1398 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1399 		    sctp_asconf_iterator_stcb,
1400 		    NULL,	/* No ep end for boundall */
1401 		    SCTP_PCB_FLAGS_BOUNDALL,
1402 		    SCTP_PCB_ANY_FEATURES,
1403 		    SCTP_ASOC_ANY_STATE,
1404 		    (void *)asc, 0,
1405 		    sctp_asconf_iterator_end, NULL, 0);
1406 	}
1407 }
1408 
1409 void
1410 sctp_timeout_handler(void *t)
1411 {
1412 	struct sctp_inpcb *inp;
1413 	struct sctp_tcb *stcb;
1414 	struct sctp_nets *net;
1415 	struct sctp_timer *tmr;
1416 
1417 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1418 	struct socket *so;
1419 
1420 #endif
1421 	int did_output, type;
1422 
1423 	tmr = (struct sctp_timer *)t;
1424 	inp = (struct sctp_inpcb *)tmr->ep;
1425 	stcb = (struct sctp_tcb *)tmr->tcb;
1426 	net = (struct sctp_nets *)tmr->net;
1427 	CURVNET_SET((struct vnet *)tmr->vnet);
1428 	did_output = 1;
1429 
1430 #ifdef SCTP_AUDITING_ENABLED
1431 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1432 	sctp_auditing(3, inp, stcb, net);
1433 #endif
1434 
1435 	/* sanity checks... */
1436 	if (tmr->self != (void *)tmr) {
1437 		/*
1438 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1439 		 * tmr);
1440 		 */
1441 		CURVNET_RESTORE();
1442 		return;
1443 	}
1444 	tmr->stopped_from = 0xa001;
1445 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1446 		/*
1447 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1448 		 * tmr->type);
1449 		 */
1450 		CURVNET_RESTORE();
1451 		return;
1452 	}
1453 	tmr->stopped_from = 0xa002;
1454 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1455 		CURVNET_RESTORE();
1456 		return;
1457 	}
1458 	/* if this is an iterator timeout, get the struct and clear inp */
1459 	tmr->stopped_from = 0xa003;
1460 	type = tmr->type;
1461 	if (inp) {
1462 		SCTP_INP_INCR_REF(inp);
1463 		if ((inp->sctp_socket == NULL) &&
1464 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1465 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1466 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1467 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1468 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1469 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1470 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1471 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1472 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1473 		    ) {
1474 			SCTP_INP_DECR_REF(inp);
1475 			CURVNET_RESTORE();
1476 			return;
1477 		}
1478 	}
1479 	tmr->stopped_from = 0xa004;
1480 	if (stcb) {
1481 		atomic_add_int(&stcb->asoc.refcnt, 1);
1482 		if (stcb->asoc.state == 0) {
1483 			atomic_add_int(&stcb->asoc.refcnt, -1);
1484 			if (inp) {
1485 				SCTP_INP_DECR_REF(inp);
1486 			}
1487 			CURVNET_RESTORE();
1488 			return;
1489 		}
1490 	}
1491 	tmr->stopped_from = 0xa005;
1492 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1493 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1494 		if (inp) {
1495 			SCTP_INP_DECR_REF(inp);
1496 		}
1497 		if (stcb) {
1498 			atomic_add_int(&stcb->asoc.refcnt, -1);
1499 		}
1500 		CURVNET_RESTORE();
1501 		return;
1502 	}
1503 	tmr->stopped_from = 0xa006;
1504 
1505 	if (stcb) {
1506 		SCTP_TCB_LOCK(stcb);
1507 		atomic_add_int(&stcb->asoc.refcnt, -1);
1508 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1509 		    ((stcb->asoc.state == 0) ||
1510 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1511 			SCTP_TCB_UNLOCK(stcb);
1512 			if (inp) {
1513 				SCTP_INP_DECR_REF(inp);
1514 			}
1515 			CURVNET_RESTORE();
1516 			return;
1517 		}
1518 	}
1519 	/* record in stopped what t-o occured */
1520 	tmr->stopped_from = tmr->type;
1521 
1522 	/* mark as being serviced now */
1523 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1524 		/*
1525 		 * Callout has been rescheduled.
1526 		 */
1527 		goto get_out;
1528 	}
1529 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1530 		/*
1531 		 * Not active, so no action.
1532 		 */
1533 		goto get_out;
1534 	}
1535 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1536 
1537 	/* call the handler for the appropriate timer type */
1538 	switch (tmr->type) {
1539 	case SCTP_TIMER_TYPE_ZERO_COPY:
1540 		if (inp == NULL) {
1541 			break;
1542 		}
1543 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1544 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1545 		}
1546 		break;
1547 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1548 		if (inp == NULL) {
1549 			break;
1550 		}
1551 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1552 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1553 		}
1554 		break;
1555 	case SCTP_TIMER_TYPE_ADDR_WQ:
1556 		sctp_handle_addr_wq();
1557 		break;
1558 	case SCTP_TIMER_TYPE_SEND:
1559 		if ((stcb == NULL) || (inp == NULL)) {
1560 			break;
1561 		}
1562 		SCTP_STAT_INCR(sctps_timodata);
1563 		stcb->asoc.timodata++;
1564 		stcb->asoc.num_send_timers_up--;
1565 		if (stcb->asoc.num_send_timers_up < 0) {
1566 			stcb->asoc.num_send_timers_up = 0;
1567 		}
1568 		SCTP_TCB_LOCK_ASSERT(stcb);
1569 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1570 			/* no need to unlock on tcb its gone */
1571 
1572 			goto out_decr;
1573 		}
1574 		SCTP_TCB_LOCK_ASSERT(stcb);
1575 #ifdef SCTP_AUDITING_ENABLED
1576 		sctp_auditing(4, inp, stcb, net);
1577 #endif
1578 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1579 		if ((stcb->asoc.num_send_timers_up == 0) &&
1580 		    (stcb->asoc.sent_queue_cnt > 0)) {
1581 			struct sctp_tmit_chunk *chk;
1582 
1583 			/*
1584 			 * safeguard. If there on some on the sent queue
1585 			 * somewhere but no timers running something is
1586 			 * wrong... so we start a timer on the first chunk
1587 			 * on the send queue on whatever net it is sent to.
1588 			 */
1589 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1590 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1591 			    chk->whoTo);
1592 		}
1593 		break;
1594 	case SCTP_TIMER_TYPE_INIT:
1595 		if ((stcb == NULL) || (inp == NULL)) {
1596 			break;
1597 		}
1598 		SCTP_STAT_INCR(sctps_timoinit);
1599 		stcb->asoc.timoinit++;
1600 		if (sctp_t1init_timer(inp, stcb, net)) {
1601 			/* no need to unlock on tcb its gone */
1602 			goto out_decr;
1603 		}
1604 		/* We do output but not here */
1605 		did_output = 0;
1606 		break;
1607 	case SCTP_TIMER_TYPE_RECV:
1608 		if ((stcb == NULL) || (inp == NULL)) {
1609 			break;
1610 		}
1611 		SCTP_STAT_INCR(sctps_timosack);
1612 		stcb->asoc.timosack++;
1613 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1614 #ifdef SCTP_AUDITING_ENABLED
1615 		sctp_auditing(4, inp, stcb, net);
1616 #endif
1617 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1618 		break;
1619 	case SCTP_TIMER_TYPE_SHUTDOWN:
1620 		if ((stcb == NULL) || (inp == NULL)) {
1621 			break;
1622 		}
1623 		if (sctp_shutdown_timer(inp, stcb, net)) {
1624 			/* no need to unlock on tcb its gone */
1625 			goto out_decr;
1626 		}
1627 		SCTP_STAT_INCR(sctps_timoshutdown);
1628 		stcb->asoc.timoshutdown++;
1629 #ifdef SCTP_AUDITING_ENABLED
1630 		sctp_auditing(4, inp, stcb, net);
1631 #endif
1632 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1633 		break;
1634 	case SCTP_TIMER_TYPE_HEARTBEAT:
1635 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1636 			break;
1637 		}
1638 		SCTP_STAT_INCR(sctps_timoheartbeat);
1639 		stcb->asoc.timoheartbeat++;
1640 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1641 			/* no need to unlock on tcb its gone */
1642 			goto out_decr;
1643 		}
1644 #ifdef SCTP_AUDITING_ENABLED
1645 		sctp_auditing(4, inp, stcb, net);
1646 #endif
1647 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1648 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1649 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1650 		}
1651 		break;
1652 	case SCTP_TIMER_TYPE_COOKIE:
1653 		if ((stcb == NULL) || (inp == NULL)) {
1654 			break;
1655 		}
1656 		if (sctp_cookie_timer(inp, stcb, net)) {
1657 			/* no need to unlock on tcb its gone */
1658 			goto out_decr;
1659 		}
1660 		SCTP_STAT_INCR(sctps_timocookie);
1661 		stcb->asoc.timocookie++;
1662 #ifdef SCTP_AUDITING_ENABLED
1663 		sctp_auditing(4, inp, stcb, net);
1664 #endif
1665 		/*
1666 		 * We consider T3 and Cookie timer pretty much the same with
1667 		 * respect to where from in chunk_output.
1668 		 */
1669 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1670 		break;
1671 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1672 		{
1673 			struct timeval tv;
1674 			int i, secret;
1675 
1676 			if (inp == NULL) {
1677 				break;
1678 			}
1679 			SCTP_STAT_INCR(sctps_timosecret);
1680 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1681 			SCTP_INP_WLOCK(inp);
1682 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1683 			inp->sctp_ep.last_secret_number =
1684 			    inp->sctp_ep.current_secret_number;
1685 			inp->sctp_ep.current_secret_number++;
1686 			if (inp->sctp_ep.current_secret_number >=
1687 			    SCTP_HOW_MANY_SECRETS) {
1688 				inp->sctp_ep.current_secret_number = 0;
1689 			}
1690 			secret = (int)inp->sctp_ep.current_secret_number;
1691 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1692 				inp->sctp_ep.secret_key[secret][i] =
1693 				    sctp_select_initial_TSN(&inp->sctp_ep);
1694 			}
1695 			SCTP_INP_WUNLOCK(inp);
1696 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1697 		}
1698 		did_output = 0;
1699 		break;
1700 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1701 		if ((stcb == NULL) || (inp == NULL)) {
1702 			break;
1703 		}
1704 		SCTP_STAT_INCR(sctps_timopathmtu);
1705 		sctp_pathmtu_timer(inp, stcb, net);
1706 		did_output = 0;
1707 		break;
1708 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1709 		if ((stcb == NULL) || (inp == NULL)) {
1710 			break;
1711 		}
1712 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1713 			/* no need to unlock on tcb its gone */
1714 			goto out_decr;
1715 		}
1716 		SCTP_STAT_INCR(sctps_timoshutdownack);
1717 		stcb->asoc.timoshutdownack++;
1718 #ifdef SCTP_AUDITING_ENABLED
1719 		sctp_auditing(4, inp, stcb, net);
1720 #endif
1721 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1722 		break;
1723 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1724 		if ((stcb == NULL) || (inp == NULL)) {
1725 			break;
1726 		}
1727 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1728 		sctp_abort_an_association(inp, stcb,
1729 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1730 		/* no need to unlock on tcb its gone */
1731 		goto out_decr;
1732 
1733 	case SCTP_TIMER_TYPE_STRRESET:
1734 		if ((stcb == NULL) || (inp == NULL)) {
1735 			break;
1736 		}
1737 		if (sctp_strreset_timer(inp, stcb, net)) {
1738 			/* no need to unlock on tcb its gone */
1739 			goto out_decr;
1740 		}
1741 		SCTP_STAT_INCR(sctps_timostrmrst);
1742 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1743 		break;
1744 	case SCTP_TIMER_TYPE_ASCONF:
1745 		if ((stcb == NULL) || (inp == NULL)) {
1746 			break;
1747 		}
1748 		if (sctp_asconf_timer(inp, stcb, net)) {
1749 			/* no need to unlock on tcb its gone */
1750 			goto out_decr;
1751 		}
1752 		SCTP_STAT_INCR(sctps_timoasconf);
1753 #ifdef SCTP_AUDITING_ENABLED
1754 		sctp_auditing(4, inp, stcb, net);
1755 #endif
1756 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1757 		break;
1758 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1759 		if ((stcb == NULL) || (inp == NULL)) {
1760 			break;
1761 		}
1762 		sctp_delete_prim_timer(inp, stcb, net);
1763 		SCTP_STAT_INCR(sctps_timodelprim);
1764 		break;
1765 
1766 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1767 		if ((stcb == NULL) || (inp == NULL)) {
1768 			break;
1769 		}
1770 		SCTP_STAT_INCR(sctps_timoautoclose);
1771 		sctp_autoclose_timer(inp, stcb, net);
1772 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1773 		did_output = 0;
1774 		break;
1775 	case SCTP_TIMER_TYPE_ASOCKILL:
1776 		if ((stcb == NULL) || (inp == NULL)) {
1777 			break;
1778 		}
1779 		SCTP_STAT_INCR(sctps_timoassockill);
1780 		/* Can we free it yet? */
1781 		SCTP_INP_DECR_REF(inp);
1782 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1783 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1784 		so = SCTP_INP_SO(inp);
1785 		atomic_add_int(&stcb->asoc.refcnt, 1);
1786 		SCTP_TCB_UNLOCK(stcb);
1787 		SCTP_SOCKET_LOCK(so, 1);
1788 		SCTP_TCB_LOCK(stcb);
1789 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1790 #endif
1791 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1792 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1793 		SCTP_SOCKET_UNLOCK(so, 1);
1794 #endif
1795 		/*
1796 		 * free asoc, always unlocks (or destroy's) so prevent
1797 		 * duplicate unlock or unlock of a free mtx :-0
1798 		 */
1799 		stcb = NULL;
1800 		goto out_no_decr;
1801 	case SCTP_TIMER_TYPE_INPKILL:
1802 		SCTP_STAT_INCR(sctps_timoinpkill);
1803 		if (inp == NULL) {
1804 			break;
1805 		}
1806 		/*
1807 		 * special case, take away our increment since WE are the
1808 		 * killer
1809 		 */
1810 		SCTP_INP_DECR_REF(inp);
1811 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1812 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1813 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1814 		inp = NULL;
1815 		goto out_no_decr;
1816 	default:
1817 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1818 		    tmr->type);
1819 		break;
1820 	}
1821 #ifdef SCTP_AUDITING_ENABLED
1822 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1823 	if (inp)
1824 		sctp_auditing(5, inp, stcb, net);
1825 #endif
1826 	if ((did_output) && stcb) {
1827 		/*
1828 		 * Now we need to clean up the control chunk chain if an
1829 		 * ECNE is on it. It must be marked as UNSENT again so next
1830 		 * call will continue to send it until such time that we get
1831 		 * a CWR, to remove it. It is, however, less likely that we
1832 		 * will find a ecn echo on the chain though.
1833 		 */
1834 		sctp_fix_ecn_echo(&stcb->asoc);
1835 	}
1836 get_out:
1837 	if (stcb) {
1838 		SCTP_TCB_UNLOCK(stcb);
1839 	}
1840 out_decr:
1841 	if (inp) {
1842 		SCTP_INP_DECR_REF(inp);
1843 	}
1844 out_no_decr:
1845 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1846 	    type);
1847 	CURVNET_RESTORE();
1848 }
1849 
1850 void
1851 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1852     struct sctp_nets *net)
1853 {
1854 	uint32_t to_ticks;
1855 	struct sctp_timer *tmr;
1856 
1857 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1858 		return;
1859 
1860 	tmr = NULL;
1861 	if (stcb) {
1862 		SCTP_TCB_LOCK_ASSERT(stcb);
1863 	}
1864 	switch (t_type) {
1865 	case SCTP_TIMER_TYPE_ZERO_COPY:
1866 		tmr = &inp->sctp_ep.zero_copy_timer;
1867 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1868 		break;
1869 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1870 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1871 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1872 		break;
1873 	case SCTP_TIMER_TYPE_ADDR_WQ:
1874 		/* Only 1 tick away :-) */
1875 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1876 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1877 		break;
1878 	case SCTP_TIMER_TYPE_SEND:
1879 		/* Here we use the RTO timer */
1880 		{
1881 			int rto_val;
1882 
1883 			if ((stcb == NULL) || (net == NULL)) {
1884 				return;
1885 			}
1886 			tmr = &net->rxt_timer;
1887 			if (net->RTO == 0) {
1888 				rto_val = stcb->asoc.initial_rto;
1889 			} else {
1890 				rto_val = net->RTO;
1891 			}
1892 			to_ticks = MSEC_TO_TICKS(rto_val);
1893 		}
1894 		break;
1895 	case SCTP_TIMER_TYPE_INIT:
1896 		/*
1897 		 * Here we use the INIT timer default usually about 1
1898 		 * minute.
1899 		 */
1900 		if ((stcb == NULL) || (net == NULL)) {
1901 			return;
1902 		}
1903 		tmr = &net->rxt_timer;
1904 		if (net->RTO == 0) {
1905 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1906 		} else {
1907 			to_ticks = MSEC_TO_TICKS(net->RTO);
1908 		}
1909 		break;
1910 	case SCTP_TIMER_TYPE_RECV:
1911 		/*
1912 		 * Here we use the Delayed-Ack timer value from the inp
1913 		 * ususually about 200ms.
1914 		 */
1915 		if (stcb == NULL) {
1916 			return;
1917 		}
1918 		tmr = &stcb->asoc.dack_timer;
1919 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1920 		break;
1921 	case SCTP_TIMER_TYPE_SHUTDOWN:
1922 		/* Here we use the RTO of the destination. */
1923 		if ((stcb == NULL) || (net == NULL)) {
1924 			return;
1925 		}
1926 		if (net->RTO == 0) {
1927 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1928 		} else {
1929 			to_ticks = MSEC_TO_TICKS(net->RTO);
1930 		}
1931 		tmr = &net->rxt_timer;
1932 		break;
1933 	case SCTP_TIMER_TYPE_HEARTBEAT:
1934 		/*
1935 		 * the net is used here so that we can add in the RTO. Even
1936 		 * though we use a different timer. We also add the HB timer
1937 		 * PLUS a random jitter.
1938 		 */
1939 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1940 			return;
1941 		} else {
1942 			uint32_t rndval;
1943 			uint32_t jitter;
1944 
1945 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1946 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1947 				return;
1948 			}
1949 			if (net->RTO == 0) {
1950 				to_ticks = stcb->asoc.initial_rto;
1951 			} else {
1952 				to_ticks = net->RTO;
1953 			}
1954 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1955 			jitter = rndval % to_ticks;
1956 			if (jitter >= (to_ticks >> 1)) {
1957 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1958 			} else {
1959 				to_ticks = to_ticks - jitter;
1960 			}
1961 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1962 			    !(net->dest_state & SCTP_ADDR_PF)) {
1963 				to_ticks += net->heart_beat_delay;
1964 			}
1965 			/*
1966 			 * Now we must convert the to_ticks that are now in
1967 			 * ms to ticks.
1968 			 */
1969 			to_ticks = MSEC_TO_TICKS(to_ticks);
1970 			tmr = &net->hb_timer;
1971 		}
1972 		break;
1973 	case SCTP_TIMER_TYPE_COOKIE:
1974 		/*
1975 		 * Here we can use the RTO timer from the network since one
1976 		 * RTT was compelete. If a retran happened then we will be
1977 		 * using the RTO initial value.
1978 		 */
1979 		if ((stcb == NULL) || (net == NULL)) {
1980 			return;
1981 		}
1982 		if (net->RTO == 0) {
1983 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1984 		} else {
1985 			to_ticks = MSEC_TO_TICKS(net->RTO);
1986 		}
1987 		tmr = &net->rxt_timer;
1988 		break;
1989 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1990 		/*
1991 		 * nothing needed but the endpoint here ususually about 60
1992 		 * minutes.
1993 		 */
1994 		if (inp == NULL) {
1995 			return;
1996 		}
1997 		tmr = &inp->sctp_ep.signature_change;
1998 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1999 		break;
2000 	case SCTP_TIMER_TYPE_ASOCKILL:
2001 		if (stcb == NULL) {
2002 			return;
2003 		}
2004 		tmr = &stcb->asoc.strreset_timer;
2005 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2006 		break;
2007 	case SCTP_TIMER_TYPE_INPKILL:
2008 		/*
2009 		 * The inp is setup to die. We re-use the signature_chage
2010 		 * timer since that has stopped and we are in the GONE
2011 		 * state.
2012 		 */
2013 		if (inp == NULL) {
2014 			return;
2015 		}
2016 		tmr = &inp->sctp_ep.signature_change;
2017 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2018 		break;
2019 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2020 		/*
2021 		 * Here we use the value found in the EP for PMTU ususually
2022 		 * about 10 minutes.
2023 		 */
2024 		if ((stcb == NULL) || (inp == NULL)) {
2025 			return;
2026 		}
2027 		if (net == NULL) {
2028 			return;
2029 		}
2030 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2031 			return;
2032 		}
2033 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2034 		tmr = &net->pmtu_timer;
2035 		break;
2036 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2037 		/* Here we use the RTO of the destination */
2038 		if ((stcb == NULL) || (net == NULL)) {
2039 			return;
2040 		}
2041 		if (net->RTO == 0) {
2042 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2043 		} else {
2044 			to_ticks = MSEC_TO_TICKS(net->RTO);
2045 		}
2046 		tmr = &net->rxt_timer;
2047 		break;
2048 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2049 		/*
2050 		 * Here we use the endpoints shutdown guard timer usually
2051 		 * about 3 minutes.
2052 		 */
2053 		if ((inp == NULL) || (stcb == NULL)) {
2054 			return;
2055 		}
2056 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2057 		tmr = &stcb->asoc.shut_guard_timer;
2058 		break;
2059 	case SCTP_TIMER_TYPE_STRRESET:
2060 		/*
2061 		 * Here the timer comes from the stcb but its value is from
2062 		 * the net's RTO.
2063 		 */
2064 		if ((stcb == NULL) || (net == NULL)) {
2065 			return;
2066 		}
2067 		if (net->RTO == 0) {
2068 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2069 		} else {
2070 			to_ticks = MSEC_TO_TICKS(net->RTO);
2071 		}
2072 		tmr = &stcb->asoc.strreset_timer;
2073 		break;
2074 	case SCTP_TIMER_TYPE_ASCONF:
2075 		/*
2076 		 * Here the timer comes from the stcb but its value is from
2077 		 * the net's RTO.
2078 		 */
2079 		if ((stcb == NULL) || (net == NULL)) {
2080 			return;
2081 		}
2082 		if (net->RTO == 0) {
2083 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2084 		} else {
2085 			to_ticks = MSEC_TO_TICKS(net->RTO);
2086 		}
2087 		tmr = &stcb->asoc.asconf_timer;
2088 		break;
2089 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2090 		if ((stcb == NULL) || (net != NULL)) {
2091 			return;
2092 		}
2093 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2094 		tmr = &stcb->asoc.delete_prim_timer;
2095 		break;
2096 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2097 		if (stcb == NULL) {
2098 			return;
2099 		}
2100 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2101 			/*
2102 			 * Really an error since stcb is NOT set to
2103 			 * autoclose
2104 			 */
2105 			return;
2106 		}
2107 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2108 		tmr = &stcb->asoc.autoclose_timer;
2109 		break;
2110 	default:
2111 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2112 		    __FUNCTION__, t_type);
2113 		return;
2114 		break;
2115 	}
2116 	if ((to_ticks <= 0) || (tmr == NULL)) {
2117 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2118 		    __FUNCTION__, t_type, to_ticks, tmr);
2119 		return;
2120 	}
2121 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2122 		/*
2123 		 * we do NOT allow you to have it already running. if it is
2124 		 * we leave the current one up unchanged
2125 		 */
2126 		return;
2127 	}
2128 	/* At this point we can proceed */
2129 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2130 		stcb->asoc.num_send_timers_up++;
2131 	}
2132 	tmr->stopped_from = 0;
2133 	tmr->type = t_type;
2134 	tmr->ep = (void *)inp;
2135 	tmr->tcb = (void *)stcb;
2136 	tmr->net = (void *)net;
2137 	tmr->self = (void *)tmr;
2138 	tmr->vnet = (void *)curvnet;
2139 	tmr->ticks = sctp_get_tick_count();
2140 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2141 	return;
2142 }
2143 
2144 void
2145 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2146     struct sctp_nets *net, uint32_t from)
2147 {
2148 	struct sctp_timer *tmr;
2149 
2150 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2151 	    (inp == NULL))
2152 		return;
2153 
2154 	tmr = NULL;
2155 	if (stcb) {
2156 		SCTP_TCB_LOCK_ASSERT(stcb);
2157 	}
2158 	switch (t_type) {
2159 	case SCTP_TIMER_TYPE_ZERO_COPY:
2160 		tmr = &inp->sctp_ep.zero_copy_timer;
2161 		break;
2162 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2163 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2164 		break;
2165 	case SCTP_TIMER_TYPE_ADDR_WQ:
2166 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2167 		break;
2168 	case SCTP_TIMER_TYPE_SEND:
2169 		if ((stcb == NULL) || (net == NULL)) {
2170 			return;
2171 		}
2172 		tmr = &net->rxt_timer;
2173 		break;
2174 	case SCTP_TIMER_TYPE_INIT:
2175 		if ((stcb == NULL) || (net == NULL)) {
2176 			return;
2177 		}
2178 		tmr = &net->rxt_timer;
2179 		break;
2180 	case SCTP_TIMER_TYPE_RECV:
2181 		if (stcb == NULL) {
2182 			return;
2183 		}
2184 		tmr = &stcb->asoc.dack_timer;
2185 		break;
2186 	case SCTP_TIMER_TYPE_SHUTDOWN:
2187 		if ((stcb == NULL) || (net == NULL)) {
2188 			return;
2189 		}
2190 		tmr = &net->rxt_timer;
2191 		break;
2192 	case SCTP_TIMER_TYPE_HEARTBEAT:
2193 		if ((stcb == NULL) || (net == NULL)) {
2194 			return;
2195 		}
2196 		tmr = &net->hb_timer;
2197 		break;
2198 	case SCTP_TIMER_TYPE_COOKIE:
2199 		if ((stcb == NULL) || (net == NULL)) {
2200 			return;
2201 		}
2202 		tmr = &net->rxt_timer;
2203 		break;
2204 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2205 		/* nothing needed but the endpoint here */
2206 		tmr = &inp->sctp_ep.signature_change;
2207 		/*
2208 		 * We re-use the newcookie timer for the INP kill timer. We
2209 		 * must assure that we do not kill it by accident.
2210 		 */
2211 		break;
2212 	case SCTP_TIMER_TYPE_ASOCKILL:
2213 		/*
2214 		 * Stop the asoc kill timer.
2215 		 */
2216 		if (stcb == NULL) {
2217 			return;
2218 		}
2219 		tmr = &stcb->asoc.strreset_timer;
2220 		break;
2221 
2222 	case SCTP_TIMER_TYPE_INPKILL:
2223 		/*
2224 		 * The inp is setup to die. We re-use the signature_chage
2225 		 * timer since that has stopped and we are in the GONE
2226 		 * state.
2227 		 */
2228 		tmr = &inp->sctp_ep.signature_change;
2229 		break;
2230 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2231 		if ((stcb == NULL) || (net == NULL)) {
2232 			return;
2233 		}
2234 		tmr = &net->pmtu_timer;
2235 		break;
2236 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2237 		if ((stcb == NULL) || (net == NULL)) {
2238 			return;
2239 		}
2240 		tmr = &net->rxt_timer;
2241 		break;
2242 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2243 		if (stcb == NULL) {
2244 			return;
2245 		}
2246 		tmr = &stcb->asoc.shut_guard_timer;
2247 		break;
2248 	case SCTP_TIMER_TYPE_STRRESET:
2249 		if (stcb == NULL) {
2250 			return;
2251 		}
2252 		tmr = &stcb->asoc.strreset_timer;
2253 		break;
2254 	case SCTP_TIMER_TYPE_ASCONF:
2255 		if (stcb == NULL) {
2256 			return;
2257 		}
2258 		tmr = &stcb->asoc.asconf_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2261 		if (stcb == NULL) {
2262 			return;
2263 		}
2264 		tmr = &stcb->asoc.delete_prim_timer;
2265 		break;
2266 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2267 		if (stcb == NULL) {
2268 			return;
2269 		}
2270 		tmr = &stcb->asoc.autoclose_timer;
2271 		break;
2272 	default:
2273 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2274 		    __FUNCTION__, t_type);
2275 		break;
2276 	}
2277 	if (tmr == NULL) {
2278 		return;
2279 	}
2280 	if ((tmr->type != t_type) && tmr->type) {
2281 		/*
2282 		 * Ok we have a timer that is under joint use. Cookie timer
2283 		 * per chance with the SEND timer. We therefore are NOT
2284 		 * running the timer that the caller wants stopped.  So just
2285 		 * return.
2286 		 */
2287 		return;
2288 	}
2289 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2290 		stcb->asoc.num_send_timers_up--;
2291 		if (stcb->asoc.num_send_timers_up < 0) {
2292 			stcb->asoc.num_send_timers_up = 0;
2293 		}
2294 	}
2295 	tmr->self = NULL;
2296 	tmr->stopped_from = from;
2297 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2298 	return;
2299 }
2300 
2301 uint32_t
2302 sctp_calculate_len(struct mbuf *m)
2303 {
2304 	uint32_t tlen = 0;
2305 	struct mbuf *at;
2306 
2307 	at = m;
2308 	while (at) {
2309 		tlen += SCTP_BUF_LEN(at);
2310 		at = SCTP_BUF_NEXT(at);
2311 	}
2312 	return (tlen);
2313 }
2314 
2315 void
2316 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2317     struct sctp_association *asoc, uint32_t mtu)
2318 {
2319 	/*
2320 	 * Reset the P-MTU size on this association, this involves changing
2321 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2322 	 * allow the DF flag to be cleared.
2323 	 */
2324 	struct sctp_tmit_chunk *chk;
2325 	unsigned int eff_mtu, ovh;
2326 
2327 	asoc->smallest_mtu = mtu;
2328 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2329 		ovh = SCTP_MIN_OVERHEAD;
2330 	} else {
2331 		ovh = SCTP_MIN_V4_OVERHEAD;
2332 	}
2333 	eff_mtu = mtu - ovh;
2334 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2335 		if (chk->send_size > eff_mtu) {
2336 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2337 		}
2338 	}
2339 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2340 		if (chk->send_size > eff_mtu) {
2341 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2342 		}
2343 	}
2344 }
2345 
2346 
2347 /*
2348  * given an association and starting time of the current RTT period return
2349  * RTO in number of msecs net should point to the current network
2350  */
2351 
2352 uint32_t
2353 sctp_calculate_rto(struct sctp_tcb *stcb,
2354     struct sctp_association *asoc,
2355     struct sctp_nets *net,
2356     struct timeval *told,
2357     int safe, int rtt_from_sack)
2358 {
2359 	/*-
2360 	 * given an association and the starting time of the current RTT
2361 	 * period (in value1/value2) return RTO in number of msecs.
2362 	 */
2363 	int32_t rtt;		/* RTT in ms */
2364 	uint32_t new_rto;
2365 	int first_measure = 0;
2366 	struct timeval now, then, *old;
2367 
2368 	/* Copy it out for sparc64 */
2369 	if (safe == sctp_align_unsafe_makecopy) {
2370 		old = &then;
2371 		memcpy(&then, told, sizeof(struct timeval));
2372 	} else if (safe == sctp_align_safe_nocopy) {
2373 		old = told;
2374 	} else {
2375 		/* error */
2376 		SCTP_PRINTF("Huh, bad rto calc call\n");
2377 		return (0);
2378 	}
2379 	/************************/
2380 	/* 1. calculate new RTT */
2381 	/************************/
2382 	/* get the current time */
2383 	if (stcb->asoc.use_precise_time) {
2384 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2385 	} else {
2386 		(void)SCTP_GETTIME_TIMEVAL(&now);
2387 	}
2388 	timevalsub(&now, old);
2389 	/* store the current RTT in us */
2390 	net->rtt = (uint64_t) 10000000 *(uint64_t) now.tv_sec +
2391 	         (uint64_t) now.tv_usec;
2392 
2393 	/* computer rtt in ms */
2394 	rtt = net->rtt / 1000;
2395 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2396 		/*
2397 		 * Tell the CC module that a new update has just occurred
2398 		 * from a sack
2399 		 */
2400 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2401 	}
2402 	/*
2403 	 * Do we need to determine the lan? We do this only on sacks i.e.
2404 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2405 	 */
2406 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2407 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2408 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2409 			net->lan_type = SCTP_LAN_INTERNET;
2410 		} else {
2411 			net->lan_type = SCTP_LAN_LOCAL;
2412 		}
2413 	}
2414 	/***************************/
2415 	/* 2. update RTTVAR & SRTT */
2416 	/***************************/
2417 	/*-
2418 	 * Compute the scaled average lastsa and the
2419 	 * scaled variance lastsv as described in van Jacobson
2420 	 * Paper "Congestion Avoidance and Control", Annex A.
2421 	 *
2422 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2423 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2424 	 */
2425 	if (net->RTO_measured) {
2426 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2427 		net->lastsa += rtt;
2428 		if (rtt < 0) {
2429 			rtt = -rtt;
2430 		}
2431 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2432 		net->lastsv += rtt;
2433 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2434 			rto_logging(net, SCTP_LOG_RTTVAR);
2435 		}
2436 	} else {
2437 		/* First RTO measurment */
2438 		net->RTO_measured = 1;
2439 		first_measure = 1;
2440 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2441 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2442 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2443 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2444 		}
2445 	}
2446 	if (net->lastsv == 0) {
2447 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2448 	}
2449 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2450 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2451 	    (stcb->asoc.sat_network_lockout == 0)) {
2452 		stcb->asoc.sat_network = 1;
2453 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2454 		stcb->asoc.sat_network = 0;
2455 		stcb->asoc.sat_network_lockout = 1;
2456 	}
2457 	/* bound it, per C6/C7 in Section 5.3.1 */
2458 	if (new_rto < stcb->asoc.minrto) {
2459 		new_rto = stcb->asoc.minrto;
2460 	}
2461 	if (new_rto > stcb->asoc.maxrto) {
2462 		new_rto = stcb->asoc.maxrto;
2463 	}
2464 	/* we are now returning the RTO */
2465 	return (new_rto);
2466 }
2467 
2468 /*
2469  * return a pointer to a contiguous piece of data from the given mbuf chain
2470  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2471  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2472  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2473  */
2474 caddr_t
2475 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2476 {
2477 	uint32_t count;
2478 	uint8_t *ptr;
2479 
2480 	ptr = in_ptr;
2481 	if ((off < 0) || (len <= 0))
2482 		return (NULL);
2483 
2484 	/* find the desired start location */
2485 	while ((m != NULL) && (off > 0)) {
2486 		if (off < SCTP_BUF_LEN(m))
2487 			break;
2488 		off -= SCTP_BUF_LEN(m);
2489 		m = SCTP_BUF_NEXT(m);
2490 	}
2491 	if (m == NULL)
2492 		return (NULL);
2493 
2494 	/* is the current mbuf large enough (eg. contiguous)? */
2495 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2496 		return (mtod(m, caddr_t)+off);
2497 	} else {
2498 		/* else, it spans more than one mbuf, so save a temp copy... */
2499 		while ((m != NULL) && (len > 0)) {
2500 			count = min(SCTP_BUF_LEN(m) - off, len);
2501 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2502 			len -= count;
2503 			ptr += count;
2504 			off = 0;
2505 			m = SCTP_BUF_NEXT(m);
2506 		}
2507 		if ((m == NULL) && (len > 0))
2508 			return (NULL);
2509 		else
2510 			return ((caddr_t)in_ptr);
2511 	}
2512 }
2513 
2514 
2515 
2516 struct sctp_paramhdr *
2517 sctp_get_next_param(struct mbuf *m,
2518     int offset,
2519     struct sctp_paramhdr *pull,
2520     int pull_limit)
2521 {
2522 	/* This just provides a typed signature to Peter's Pull routine */
2523 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2524 	    (uint8_t *) pull));
2525 }
2526 
2527 
2528 int
2529 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2530 {
2531 	/*
2532 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2533 	 * padlen is > 3 this routine will fail.
2534 	 */
2535 	uint8_t *dp;
2536 	int i;
2537 
2538 	if (padlen > 3) {
2539 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2540 		return (ENOBUFS);
2541 	}
2542 	if (padlen <= M_TRAILINGSPACE(m)) {
2543 		/*
2544 		 * The easy way. We hope the majority of the time we hit
2545 		 * here :)
2546 		 */
2547 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2548 		SCTP_BUF_LEN(m) += padlen;
2549 	} else {
2550 		/* Hard way we must grow the mbuf */
2551 		struct mbuf *tmp;
2552 
2553 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2554 		if (tmp == NULL) {
2555 			/* Out of space GAK! we are in big trouble. */
2556 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2557 			return (ENOSPC);
2558 		}
2559 		/* setup and insert in middle */
2560 		SCTP_BUF_LEN(tmp) = padlen;
2561 		SCTP_BUF_NEXT(tmp) = NULL;
2562 		SCTP_BUF_NEXT(m) = tmp;
2563 		dp = mtod(tmp, uint8_t *);
2564 	}
2565 	/* zero out the pad */
2566 	for (i = 0; i < padlen; i++) {
2567 		*dp = 0;
2568 		dp++;
2569 	}
2570 	return (0);
2571 }
2572 
2573 int
2574 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2575 {
2576 	/* find the last mbuf in chain and pad it */
2577 	struct mbuf *m_at;
2578 
2579 	m_at = m;
2580 	if (last_mbuf) {
2581 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2582 	} else {
2583 		while (m_at) {
2584 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2585 				return (sctp_add_pad_tombuf(m_at, padval));
2586 			}
2587 			m_at = SCTP_BUF_NEXT(m_at);
2588 		}
2589 	}
2590 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2591 	return (EFAULT);
2592 }
2593 
2594 static void
2595 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2596     uint32_t error, int so_locked
2597 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2598     SCTP_UNUSED
2599 #endif
2600 )
2601 {
2602 	struct mbuf *m_notify;
2603 	struct sctp_assoc_change *sac;
2604 	struct sctp_queued_to_read *control;
2605 
2606 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2607 	struct socket *so;
2608 
2609 #endif
2610 
2611 	/*
2612 	 * For TCP model AND UDP connected sockets we will send an error up
2613 	 * when an ABORT comes in.
2614 	 */
2615 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2616 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2617 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2618 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2619 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2620 			stcb->sctp_socket->so_error = ECONNREFUSED;
2621 		} else {
2622 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2623 			stcb->sctp_socket->so_error = ECONNRESET;
2624 		}
2625 		/* Wake ANY sleepers */
2626 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2627 		so = SCTP_INP_SO(stcb->sctp_ep);
2628 		if (!so_locked) {
2629 			atomic_add_int(&stcb->asoc.refcnt, 1);
2630 			SCTP_TCB_UNLOCK(stcb);
2631 			SCTP_SOCKET_LOCK(so, 1);
2632 			SCTP_TCB_LOCK(stcb);
2633 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2634 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2635 				SCTP_SOCKET_UNLOCK(so, 1);
2636 				return;
2637 			}
2638 		}
2639 #endif
2640 		socantrcvmore(stcb->sctp_socket);
2641 		sorwakeup(stcb->sctp_socket);
2642 		sowwakeup(stcb->sctp_socket);
2643 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2644 		if (!so_locked) {
2645 			SCTP_SOCKET_UNLOCK(so, 1);
2646 		}
2647 #endif
2648 	}
2649 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2650 		/* event not enabled */
2651 		return;
2652 	}
2653 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2654 	if (m_notify == NULL)
2655 		/* no space left */
2656 		return;
2657 	SCTP_BUF_LEN(m_notify) = 0;
2658 
2659 	sac = mtod(m_notify, struct sctp_assoc_change *);
2660 	sac->sac_type = SCTP_ASSOC_CHANGE;
2661 	sac->sac_flags = 0;
2662 	sac->sac_length = sizeof(struct sctp_assoc_change);
2663 	sac->sac_state = event;
2664 	sac->sac_error = error;
2665 	/* XXX verify these stream counts */
2666 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2667 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2668 	sac->sac_assoc_id = sctp_get_associd(stcb);
2669 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2670 	SCTP_BUF_NEXT(m_notify) = NULL;
2671 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2672 	    0, 0, stcb->asoc.context, 0, 0, 0,
2673 	    m_notify);
2674 	if (control == NULL) {
2675 		/* no memory */
2676 		sctp_m_freem(m_notify);
2677 		return;
2678 	}
2679 	control->length = SCTP_BUF_LEN(m_notify);
2680 	/* not that we need this */
2681 	control->tail_mbuf = m_notify;
2682 	control->spec_flags = M_NOTIFICATION;
2683 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2684 	    control,
2685 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2686 	    so_locked);
2687 	if (event == SCTP_COMM_LOST) {
2688 		/* Wake up any sleeper */
2689 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2690 		so = SCTP_INP_SO(stcb->sctp_ep);
2691 		if (!so_locked) {
2692 			atomic_add_int(&stcb->asoc.refcnt, 1);
2693 			SCTP_TCB_UNLOCK(stcb);
2694 			SCTP_SOCKET_LOCK(so, 1);
2695 			SCTP_TCB_LOCK(stcb);
2696 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2697 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2698 				SCTP_SOCKET_UNLOCK(so, 1);
2699 				return;
2700 			}
2701 		}
2702 #endif
2703 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2704 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2705 		if (!so_locked) {
2706 			SCTP_SOCKET_UNLOCK(so, 1);
2707 		}
2708 #endif
2709 	}
2710 }
2711 
2712 static void
2713 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2714     struct sockaddr *sa, uint32_t error)
2715 {
2716 	struct mbuf *m_notify;
2717 	struct sctp_paddr_change *spc;
2718 	struct sctp_queued_to_read *control;
2719 
2720 	if ((stcb == NULL) ||
2721 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2722 		/* event not enabled */
2723 		return;
2724 	}
2725 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2726 	if (m_notify == NULL)
2727 		return;
2728 	SCTP_BUF_LEN(m_notify) = 0;
2729 	spc = mtod(m_notify, struct sctp_paddr_change *);
2730 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2731 	spc->spc_flags = 0;
2732 	spc->spc_length = sizeof(struct sctp_paddr_change);
2733 	switch (sa->sa_family) {
2734 #ifdef INET
2735 	case AF_INET:
2736 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2737 		break;
2738 #endif
2739 #ifdef INET6
2740 	case AF_INET6:
2741 		{
2742 			struct sockaddr_in6 *sin6;
2743 
2744 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2745 
2746 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2747 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2748 				if (sin6->sin6_scope_id == 0) {
2749 					/* recover scope_id for user */
2750 					(void)sa6_recoverscope(sin6);
2751 				} else {
2752 					/* clear embedded scope_id for user */
2753 					in6_clearscope(&sin6->sin6_addr);
2754 				}
2755 			}
2756 			break;
2757 		}
2758 #endif
2759 	default:
2760 		/* TSNH */
2761 		break;
2762 	}
2763 	spc->spc_state = state;
2764 	spc->spc_error = error;
2765 	spc->spc_assoc_id = sctp_get_associd(stcb);
2766 
2767 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2768 	SCTP_BUF_NEXT(m_notify) = NULL;
2769 
2770 	/* append to socket */
2771 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2772 	    0, 0, stcb->asoc.context, 0, 0, 0,
2773 	    m_notify);
2774 	if (control == NULL) {
2775 		/* no memory */
2776 		sctp_m_freem(m_notify);
2777 		return;
2778 	}
2779 	control->length = SCTP_BUF_LEN(m_notify);
2780 	control->spec_flags = M_NOTIFICATION;
2781 	/* not that we need this */
2782 	control->tail_mbuf = m_notify;
2783 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2784 	    control,
2785 	    &stcb->sctp_socket->so_rcv, 1,
2786 	    SCTP_READ_LOCK_NOT_HELD,
2787 	    SCTP_SO_NOT_LOCKED);
2788 }
2789 
2790 
2791 static void
2792 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2793     struct sctp_tmit_chunk *chk, int so_locked
2794 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2795     SCTP_UNUSED
2796 #endif
2797 )
2798 {
2799 	struct mbuf *m_notify;
2800 	struct sctp_send_failed *ssf;
2801 	struct sctp_queued_to_read *control;
2802 	int length;
2803 
2804 	if ((stcb == NULL) ||
2805 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2806 		/* event not enabled */
2807 		return;
2808 	}
2809 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2810 	if (m_notify == NULL)
2811 		/* no space left */
2812 		return;
2813 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2814 	length -= sizeof(struct sctp_data_chunk);
2815 	SCTP_BUF_LEN(m_notify) = 0;
2816 	ssf = mtod(m_notify, struct sctp_send_failed *);
2817 	ssf->ssf_type = SCTP_SEND_FAILED;
2818 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2819 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2820 	else
2821 		ssf->ssf_flags = SCTP_DATA_SENT;
2822 	ssf->ssf_length = length;
2823 	ssf->ssf_error = error;
2824 	/* not exactly what the user sent in, but should be close :) */
2825 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2826 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2827 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2828 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2829 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2830 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2831 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2832 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2833 
2834 	if (chk->data) {
2835 		/*
2836 		 * trim off the sctp chunk header(it should be there)
2837 		 */
2838 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2839 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2840 			sctp_mbuf_crush(chk->data);
2841 			chk->send_size -= sizeof(struct sctp_data_chunk);
2842 		}
2843 	}
2844 	SCTP_BUF_NEXT(m_notify) = chk->data;
2845 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2846 	/* Steal off the mbuf */
2847 	chk->data = NULL;
2848 	/*
2849 	 * For this case, we check the actual socket buffer, since the assoc
2850 	 * is going away we don't want to overfill the socket buffer for a
2851 	 * non-reader
2852 	 */
2853 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2854 		sctp_m_freem(m_notify);
2855 		return;
2856 	}
2857 	/* append to socket */
2858 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2859 	    0, 0, stcb->asoc.context, 0, 0, 0,
2860 	    m_notify);
2861 	if (control == NULL) {
2862 		/* no memory */
2863 		sctp_m_freem(m_notify);
2864 		return;
2865 	}
2866 	control->spec_flags = M_NOTIFICATION;
2867 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2868 	    control,
2869 	    &stcb->sctp_socket->so_rcv, 1,
2870 	    SCTP_READ_LOCK_NOT_HELD,
2871 	    so_locked);
2872 }
2873 
2874 
2875 static void
2876 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2877     struct sctp_stream_queue_pending *sp, int so_locked
2878 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2879     SCTP_UNUSED
2880 #endif
2881 )
2882 {
2883 	struct mbuf *m_notify;
2884 	struct sctp_send_failed *ssf;
2885 	struct sctp_queued_to_read *control;
2886 	int length;
2887 
2888 	if ((stcb == NULL) ||
2889 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2890 		/* event not enabled */
2891 		return;
2892 	}
2893 	length = sizeof(struct sctp_send_failed) + sp->length;
2894 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2895 	if (m_notify == NULL)
2896 		/* no space left */
2897 		return;
2898 	SCTP_BUF_LEN(m_notify) = 0;
2899 	ssf = mtod(m_notify, struct sctp_send_failed *);
2900 	ssf->ssf_type = SCTP_SEND_FAILED;
2901 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2902 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2903 	else
2904 		ssf->ssf_flags = SCTP_DATA_SENT;
2905 	ssf->ssf_length = length;
2906 	ssf->ssf_error = error;
2907 	/* not exactly what the user sent in, but should be close :) */
2908 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2909 	ssf->ssf_info.sinfo_stream = sp->stream;
2910 	ssf->ssf_info.sinfo_ssn = sp->strseq;
2911 	if (sp->some_taken) {
2912 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2913 	} else {
2914 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2915 	}
2916 	ssf->ssf_info.sinfo_ppid = sp->ppid;
2917 	ssf->ssf_info.sinfo_context = sp->context;
2918 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2919 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2920 	SCTP_BUF_NEXT(m_notify) = sp->data;
2921 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2922 
2923 	/* Steal off the mbuf */
2924 	sp->data = NULL;
2925 	/*
2926 	 * For this case, we check the actual socket buffer, since the assoc
2927 	 * is going away we don't want to overfill the socket buffer for a
2928 	 * non-reader
2929 	 */
2930 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2931 		sctp_m_freem(m_notify);
2932 		return;
2933 	}
2934 	/* append to socket */
2935 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2936 	    0, 0, stcb->asoc.context, 0, 0, 0,
2937 	    m_notify);
2938 	if (control == NULL) {
2939 		/* no memory */
2940 		sctp_m_freem(m_notify);
2941 		return;
2942 	}
2943 	control->spec_flags = M_NOTIFICATION;
2944 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2945 	    control,
2946 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
2947 }
2948 
2949 
2950 
2951 static void
2952 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
2953 {
2954 	struct mbuf *m_notify;
2955 	struct sctp_adaptation_event *sai;
2956 	struct sctp_queued_to_read *control;
2957 
2958 	if ((stcb == NULL) ||
2959 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
2960 		/* event not enabled */
2961 		return;
2962 	}
2963 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
2964 	if (m_notify == NULL)
2965 		/* no space left */
2966 		return;
2967 	SCTP_BUF_LEN(m_notify) = 0;
2968 	sai = mtod(m_notify, struct sctp_adaptation_event *);
2969 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
2970 	sai->sai_flags = 0;
2971 	sai->sai_length = sizeof(struct sctp_adaptation_event);
2972 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
2973 	sai->sai_assoc_id = sctp_get_associd(stcb);
2974 
2975 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
2976 	SCTP_BUF_NEXT(m_notify) = NULL;
2977 
2978 	/* append to socket */
2979 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2980 	    0, 0, stcb->asoc.context, 0, 0, 0,
2981 	    m_notify);
2982 	if (control == NULL) {
2983 		/* no memory */
2984 		sctp_m_freem(m_notify);
2985 		return;
2986 	}
2987 	control->length = SCTP_BUF_LEN(m_notify);
2988 	control->spec_flags = M_NOTIFICATION;
2989 	/* not that we need this */
2990 	control->tail_mbuf = m_notify;
2991 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2992 	    control,
2993 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2994 }
2995 
2996 /* This always must be called with the read-queue LOCKED in the INP */
2997 static void
2998 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
2999     uint32_t val, int so_locked
3000 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3001     SCTP_UNUSED
3002 #endif
3003 )
3004 {
3005 	struct mbuf *m_notify;
3006 	struct sctp_pdapi_event *pdapi;
3007 	struct sctp_queued_to_read *control;
3008 	struct sockbuf *sb;
3009 
3010 	if ((stcb == NULL) ||
3011 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3012 		/* event not enabled */
3013 		return;
3014 	}
3015 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3016 		return;
3017 	}
3018 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3019 	if (m_notify == NULL)
3020 		/* no space left */
3021 		return;
3022 	SCTP_BUF_LEN(m_notify) = 0;
3023 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3024 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3025 	pdapi->pdapi_flags = 0;
3026 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3027 	pdapi->pdapi_indication = error;
3028 	pdapi->pdapi_stream = (val >> 16);
3029 	pdapi->pdapi_seq = (val & 0x0000ffff);
3030 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3031 
3032 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3033 	SCTP_BUF_NEXT(m_notify) = NULL;
3034 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3035 	    0, 0, stcb->asoc.context, 0, 0, 0,
3036 	    m_notify);
3037 	if (control == NULL) {
3038 		/* no memory */
3039 		sctp_m_freem(m_notify);
3040 		return;
3041 	}
3042 	control->spec_flags = M_NOTIFICATION;
3043 	control->length = SCTP_BUF_LEN(m_notify);
3044 	/* not that we need this */
3045 	control->tail_mbuf = m_notify;
3046 	control->held_length = 0;
3047 	control->length = 0;
3048 	sb = &stcb->sctp_socket->so_rcv;
3049 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3050 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3051 	}
3052 	sctp_sballoc(stcb, sb, m_notify);
3053 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3054 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3055 	}
3056 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3057 	control->end_added = 1;
3058 	if (stcb->asoc.control_pdapi)
3059 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3060 	else {
3061 		/* we really should not see this case */
3062 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3063 	}
3064 	if (stcb->sctp_ep && stcb->sctp_socket) {
3065 		/* This should always be the case */
3066 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3067 		struct socket *so;
3068 
3069 		so = SCTP_INP_SO(stcb->sctp_ep);
3070 		if (!so_locked) {
3071 			atomic_add_int(&stcb->asoc.refcnt, 1);
3072 			SCTP_TCB_UNLOCK(stcb);
3073 			SCTP_SOCKET_LOCK(so, 1);
3074 			SCTP_TCB_LOCK(stcb);
3075 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3076 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3077 				SCTP_SOCKET_UNLOCK(so, 1);
3078 				return;
3079 			}
3080 		}
3081 #endif
3082 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3083 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3084 		if (!so_locked) {
3085 			SCTP_SOCKET_UNLOCK(so, 1);
3086 		}
3087 #endif
3088 	}
3089 }
3090 
3091 static void
3092 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3093 {
3094 	struct mbuf *m_notify;
3095 	struct sctp_shutdown_event *sse;
3096 	struct sctp_queued_to_read *control;
3097 
3098 	/*
3099 	 * For TCP model AND UDP connected sockets we will send an error up
3100 	 * when an SHUTDOWN completes
3101 	 */
3102 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3103 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3104 		/* mark socket closed for read/write and wakeup! */
3105 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3106 		struct socket *so;
3107 
3108 		so = SCTP_INP_SO(stcb->sctp_ep);
3109 		atomic_add_int(&stcb->asoc.refcnt, 1);
3110 		SCTP_TCB_UNLOCK(stcb);
3111 		SCTP_SOCKET_LOCK(so, 1);
3112 		SCTP_TCB_LOCK(stcb);
3113 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3114 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3115 			SCTP_SOCKET_UNLOCK(so, 1);
3116 			return;
3117 		}
3118 #endif
3119 		socantsendmore(stcb->sctp_socket);
3120 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3121 		SCTP_SOCKET_UNLOCK(so, 1);
3122 #endif
3123 	}
3124 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3125 		/* event not enabled */
3126 		return;
3127 	}
3128 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3129 	if (m_notify == NULL)
3130 		/* no space left */
3131 		return;
3132 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3133 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3134 	sse->sse_flags = 0;
3135 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3136 	sse->sse_assoc_id = sctp_get_associd(stcb);
3137 
3138 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3139 	SCTP_BUF_NEXT(m_notify) = NULL;
3140 
3141 	/* append to socket */
3142 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3143 	    0, 0, stcb->asoc.context, 0, 0, 0,
3144 	    m_notify);
3145 	if (control == NULL) {
3146 		/* no memory */
3147 		sctp_m_freem(m_notify);
3148 		return;
3149 	}
3150 	control->spec_flags = M_NOTIFICATION;
3151 	control->length = SCTP_BUF_LEN(m_notify);
3152 	/* not that we need this */
3153 	control->tail_mbuf = m_notify;
3154 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3155 	    control,
3156 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3157 }
3158 
3159 static void
3160 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3161     int so_locked
3162 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3163     SCTP_UNUSED
3164 #endif
3165 )
3166 {
3167 	struct mbuf *m_notify;
3168 	struct sctp_sender_dry_event *event;
3169 	struct sctp_queued_to_read *control;
3170 
3171 	if ((stcb == NULL) ||
3172 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3173 		/* event not enabled */
3174 		return;
3175 	}
3176 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3177 	if (m_notify == NULL) {
3178 		/* no space left */
3179 		return;
3180 	}
3181 	SCTP_BUF_LEN(m_notify) = 0;
3182 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3183 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3184 	event->sender_dry_flags = 0;
3185 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3186 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3187 
3188 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3189 	SCTP_BUF_NEXT(m_notify) = NULL;
3190 
3191 	/* append to socket */
3192 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3193 	    0, 0, stcb->asoc.context, 0, 0, 0,
3194 	    m_notify);
3195 	if (control == NULL) {
3196 		/* no memory */
3197 		sctp_m_freem(m_notify);
3198 		return;
3199 	}
3200 	control->length = SCTP_BUF_LEN(m_notify);
3201 	control->spec_flags = M_NOTIFICATION;
3202 	/* not that we need this */
3203 	control->tail_mbuf = m_notify;
3204 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3205 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3206 }
3207 
3208 
3209 static void
3210 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3211 {
3212 	struct mbuf *m_notify;
3213 	struct sctp_queued_to_read *control;
3214 	struct sctp_stream_reset_event *strreset;
3215 	int len;
3216 
3217 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3218 		/* event not enabled */
3219 		return;
3220 	}
3221 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3222 	if (m_notify == NULL)
3223 		/* no space left */
3224 		return;
3225 	SCTP_BUF_LEN(m_notify) = 0;
3226 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3227 	if (len > M_TRAILINGSPACE(m_notify)) {
3228 		/* never enough room */
3229 		sctp_m_freem(m_notify);
3230 		return;
3231 	}
3232 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3233 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3234 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3235 	strreset->strreset_length = len;
3236 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3237 	strreset->strreset_list[0] = number_entries;
3238 
3239 	SCTP_BUF_LEN(m_notify) = len;
3240 	SCTP_BUF_NEXT(m_notify) = NULL;
3241 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3242 		/* no space */
3243 		sctp_m_freem(m_notify);
3244 		return;
3245 	}
3246 	/* append to socket */
3247 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3248 	    0, 0, stcb->asoc.context, 0, 0, 0,
3249 	    m_notify);
3250 	if (control == NULL) {
3251 		/* no memory */
3252 		sctp_m_freem(m_notify);
3253 		return;
3254 	}
3255 	control->spec_flags = M_NOTIFICATION;
3256 	control->length = SCTP_BUF_LEN(m_notify);
3257 	/* not that we need this */
3258 	control->tail_mbuf = m_notify;
3259 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3260 	    control,
3261 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3262 }
3263 
3264 
3265 static void
3266 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3267     int number_entries, uint16_t * list, int flag)
3268 {
3269 	struct mbuf *m_notify;
3270 	struct sctp_queued_to_read *control;
3271 	struct sctp_stream_reset_event *strreset;
3272 	int len;
3273 
3274 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3275 		/* event not enabled */
3276 		return;
3277 	}
3278 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3279 	if (m_notify == NULL)
3280 		/* no space left */
3281 		return;
3282 	SCTP_BUF_LEN(m_notify) = 0;
3283 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3284 	if (len > M_TRAILINGSPACE(m_notify)) {
3285 		/* never enough room */
3286 		sctp_m_freem(m_notify);
3287 		return;
3288 	}
3289 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3290 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3291 	if (number_entries == 0) {
3292 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3293 	} else {
3294 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3295 	}
3296 	strreset->strreset_length = len;
3297 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3298 	if (number_entries) {
3299 		int i;
3300 
3301 		for (i = 0; i < number_entries; i++) {
3302 			strreset->strreset_list[i] = ntohs(list[i]);
3303 		}
3304 	}
3305 	SCTP_BUF_LEN(m_notify) = len;
3306 	SCTP_BUF_NEXT(m_notify) = NULL;
3307 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3308 		/* no space */
3309 		sctp_m_freem(m_notify);
3310 		return;
3311 	}
3312 	/* append to socket */
3313 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3314 	    0, 0, stcb->asoc.context, 0, 0, 0,
3315 	    m_notify);
3316 	if (control == NULL) {
3317 		/* no memory */
3318 		sctp_m_freem(m_notify);
3319 		return;
3320 	}
3321 	control->spec_flags = M_NOTIFICATION;
3322 	control->length = SCTP_BUF_LEN(m_notify);
3323 	/* not that we need this */
3324 	control->tail_mbuf = m_notify;
3325 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3326 	    control,
3327 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3328 }
3329 
3330 
3331 void
3332 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3333     uint32_t error, void *data, int so_locked
3334 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3335     SCTP_UNUSED
3336 #endif
3337 )
3338 {
3339 	if ((stcb == NULL) ||
3340 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3341 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3342 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3343 		/* If the socket is gone we are out of here */
3344 		return;
3345 	}
3346 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3347 		return;
3348 	}
3349 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3350 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3351 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3352 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3353 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3354 			/* Don't report these in front states */
3355 			return;
3356 		}
3357 	}
3358 	switch (notification) {
3359 	case SCTP_NOTIFY_ASSOC_UP:
3360 		if (stcb->asoc.assoc_up_sent == 0) {
3361 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, so_locked);
3362 			stcb->asoc.assoc_up_sent = 1;
3363 		}
3364 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3365 			sctp_notify_adaptation_layer(stcb);
3366 		}
3367 		if (stcb->asoc.peer_supports_auth == 0) {
3368 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3369 			    NULL, so_locked);
3370 		}
3371 		break;
3372 	case SCTP_NOTIFY_ASSOC_DOWN:
3373 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, so_locked);
3374 		break;
3375 	case SCTP_NOTIFY_INTERFACE_DOWN:
3376 		{
3377 			struct sctp_nets *net;
3378 
3379 			net = (struct sctp_nets *)data;
3380 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3381 			    (struct sockaddr *)&net->ro._l_addr, error);
3382 			break;
3383 		}
3384 	case SCTP_NOTIFY_INTERFACE_UP:
3385 		{
3386 			struct sctp_nets *net;
3387 
3388 			net = (struct sctp_nets *)data;
3389 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3390 			    (struct sockaddr *)&net->ro._l_addr, error);
3391 			break;
3392 		}
3393 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3394 		{
3395 			struct sctp_nets *net;
3396 
3397 			net = (struct sctp_nets *)data;
3398 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3399 			    (struct sockaddr *)&net->ro._l_addr, error);
3400 			break;
3401 		}
3402 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3403 		sctp_notify_send_failed2(stcb, error,
3404 		    (struct sctp_stream_queue_pending *)data, so_locked);
3405 		break;
3406 	case SCTP_NOTIFY_DG_FAIL:
3407 		sctp_notify_send_failed(stcb, error,
3408 		    (struct sctp_tmit_chunk *)data, so_locked);
3409 		break;
3410 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3411 		{
3412 			uint32_t val;
3413 
3414 			val = *((uint32_t *) data);
3415 
3416 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3417 			break;
3418 		}
3419 	case SCTP_NOTIFY_STRDATA_ERR:
3420 		break;
3421 	case SCTP_NOTIFY_ASSOC_ABORTED:
3422 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3423 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3424 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, so_locked);
3425 		} else {
3426 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, so_locked);
3427 		}
3428 		break;
3429 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3430 		break;
3431 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3432 		break;
3433 	case SCTP_NOTIFY_ASSOC_RESTART:
3434 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, so_locked);
3435 		if (stcb->asoc.peer_supports_auth == 0) {
3436 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3437 			    NULL, so_locked);
3438 		}
3439 		break;
3440 	case SCTP_NOTIFY_HB_RESP:
3441 		break;
3442 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3443 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3444 		break;
3445 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3446 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3447 		break;
3448 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3449 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3450 		break;
3451 
3452 	case SCTP_NOTIFY_STR_RESET_SEND:
3453 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3454 		break;
3455 	case SCTP_NOTIFY_STR_RESET_RECV:
3456 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3457 		break;
3458 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3459 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3460 		break;
3461 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3462 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3463 		break;
3464 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3465 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3466 		    error);
3467 		break;
3468 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3469 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3470 		    error);
3471 		break;
3472 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3473 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3474 		    error);
3475 		break;
3476 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3477 		break;
3478 	case SCTP_NOTIFY_ASCONF_FAILED:
3479 		break;
3480 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3481 		sctp_notify_shutdown_event(stcb);
3482 		break;
3483 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3484 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3485 		    (uint16_t) (uintptr_t) data,
3486 		    so_locked);
3487 		break;
3488 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3489 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3490 		    (uint16_t) (uintptr_t) data,
3491 		    so_locked);
3492 		break;
3493 	case SCTP_NOTIFY_NO_PEER_AUTH:
3494 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3495 		    (uint16_t) (uintptr_t) data,
3496 		    so_locked);
3497 		break;
3498 	case SCTP_NOTIFY_SENDER_DRY:
3499 		sctp_notify_sender_dry_event(stcb, so_locked);
3500 		break;
3501 	default:
3502 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3503 		    __FUNCTION__, notification, notification);
3504 		break;
3505 	}			/* end switch */
3506 }
3507 
3508 void
3509 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3510 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3511     SCTP_UNUSED
3512 #endif
3513 )
3514 {
3515 	struct sctp_association *asoc;
3516 	struct sctp_stream_out *outs;
3517 	struct sctp_tmit_chunk *chk, *nchk;
3518 	struct sctp_stream_queue_pending *sp, *nsp;
3519 	int i;
3520 
3521 	if (stcb == NULL) {
3522 		return;
3523 	}
3524 	asoc = &stcb->asoc;
3525 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3526 		/* already being freed */
3527 		return;
3528 	}
3529 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3530 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3531 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3532 		return;
3533 	}
3534 	/* now through all the gunk freeing chunks */
3535 	if (holds_lock == 0) {
3536 		SCTP_TCB_SEND_LOCK(stcb);
3537 	}
3538 	/* sent queue SHOULD be empty */
3539 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3540 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3541 		asoc->sent_queue_cnt--;
3542 		if (chk->data != NULL) {
3543 			sctp_free_bufspace(stcb, asoc, chk, 1);
3544 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3545 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3546 			if (chk->data) {
3547 				sctp_m_freem(chk->data);
3548 				chk->data = NULL;
3549 			}
3550 		}
3551 		sctp_free_a_chunk(stcb, chk, so_locked);
3552 		/* sa_ignore FREED_MEMORY */
3553 	}
3554 	/* pending send queue SHOULD be empty */
3555 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3556 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3557 		asoc->send_queue_cnt--;
3558 		if (chk->data != NULL) {
3559 			sctp_free_bufspace(stcb, asoc, chk, 1);
3560 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3561 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3562 			if (chk->data) {
3563 				sctp_m_freem(chk->data);
3564 				chk->data = NULL;
3565 			}
3566 		}
3567 		sctp_free_a_chunk(stcb, chk, so_locked);
3568 		/* sa_ignore FREED_MEMORY */
3569 	}
3570 	for (i = 0; i < asoc->streamoutcnt; i++) {
3571 		/* For each stream */
3572 		outs = &asoc->strmout[i];
3573 		/* clean up any sends there */
3574 		asoc->locked_on_sending = NULL;
3575 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3576 			asoc->stream_queue_cnt--;
3577 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3578 			sctp_free_spbufspace(stcb, asoc, sp);
3579 			if (sp->data) {
3580 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3581 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3582 				if (sp->data) {
3583 					sctp_m_freem(sp->data);
3584 					sp->data = NULL;
3585 				}
3586 			}
3587 			if (sp->net) {
3588 				sctp_free_remote_addr(sp->net);
3589 				sp->net = NULL;
3590 			}
3591 			/* Free the chunk */
3592 			sctp_free_a_strmoq(stcb, sp, so_locked);
3593 			/* sa_ignore FREED_MEMORY */
3594 		}
3595 	}
3596 
3597 	if (holds_lock == 0) {
3598 		SCTP_TCB_SEND_UNLOCK(stcb);
3599 	}
3600 }
3601 
3602 void
3603 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3604 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3605     SCTP_UNUSED
3606 #endif
3607 )
3608 {
3609 	if (stcb == NULL) {
3610 		return;
3611 	}
3612 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3613 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3614 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3615 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3616 	}
3617 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3618 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3619 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3620 		return;
3621 	}
3622 	/* Tell them we lost the asoc */
3623 	sctp_report_all_outbound(stcb, 1, so_locked);
3624 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3625 }
3626 
3627 void
3628 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3629     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3630     uint32_t vrf_id, uint16_t port)
3631 {
3632 	uint32_t vtag;
3633 
3634 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3635 	struct socket *so;
3636 
3637 #endif
3638 
3639 	vtag = 0;
3640 	if (stcb != NULL) {
3641 		/* We have a TCB to abort, send notification too */
3642 		vtag = stcb->asoc.peer_vtag;
3643 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3644 		/* get the assoc vrf id and table id */
3645 		vrf_id = stcb->asoc.vrf_id;
3646 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3647 	}
3648 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3649 	if (stcb != NULL) {
3650 		/* Ok, now lets free it */
3651 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3652 		so = SCTP_INP_SO(inp);
3653 		atomic_add_int(&stcb->asoc.refcnt, 1);
3654 		SCTP_TCB_UNLOCK(stcb);
3655 		SCTP_SOCKET_LOCK(so, 1);
3656 		SCTP_TCB_LOCK(stcb);
3657 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3658 #endif
3659 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3660 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3661 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3662 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3663 		}
3664 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3665 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3666 		SCTP_SOCKET_UNLOCK(so, 1);
3667 #endif
3668 	}
3669 }
3670 
3671 #ifdef SCTP_ASOCLOG_OF_TSNS
3672 void
3673 sctp_print_out_track_log(struct sctp_tcb *stcb)
3674 {
3675 #ifdef NOSIY_PRINTS
3676 	int i;
3677 
3678 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3679 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3680 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3681 		SCTP_PRINTF("None rcvd\n");
3682 		goto none_in;
3683 	}
3684 	if (stcb->asoc.tsn_in_wrapped) {
3685 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3686 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3687 			    stcb->asoc.in_tsnlog[i].tsn,
3688 			    stcb->asoc.in_tsnlog[i].strm,
3689 			    stcb->asoc.in_tsnlog[i].seq,
3690 			    stcb->asoc.in_tsnlog[i].flgs,
3691 			    stcb->asoc.in_tsnlog[i].sz);
3692 		}
3693 	}
3694 	if (stcb->asoc.tsn_in_at) {
3695 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3696 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3697 			    stcb->asoc.in_tsnlog[i].tsn,
3698 			    stcb->asoc.in_tsnlog[i].strm,
3699 			    stcb->asoc.in_tsnlog[i].seq,
3700 			    stcb->asoc.in_tsnlog[i].flgs,
3701 			    stcb->asoc.in_tsnlog[i].sz);
3702 		}
3703 	}
3704 none_in:
3705 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3706 	if ((stcb->asoc.tsn_out_at == 0) &&
3707 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3708 		SCTP_PRINTF("None sent\n");
3709 	}
3710 	if (stcb->asoc.tsn_out_wrapped) {
3711 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3712 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3713 			    stcb->asoc.out_tsnlog[i].tsn,
3714 			    stcb->asoc.out_tsnlog[i].strm,
3715 			    stcb->asoc.out_tsnlog[i].seq,
3716 			    stcb->asoc.out_tsnlog[i].flgs,
3717 			    stcb->asoc.out_tsnlog[i].sz);
3718 		}
3719 	}
3720 	if (stcb->asoc.tsn_out_at) {
3721 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3722 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3723 			    stcb->asoc.out_tsnlog[i].tsn,
3724 			    stcb->asoc.out_tsnlog[i].strm,
3725 			    stcb->asoc.out_tsnlog[i].seq,
3726 			    stcb->asoc.out_tsnlog[i].flgs,
3727 			    stcb->asoc.out_tsnlog[i].sz);
3728 		}
3729 	}
3730 #endif
3731 }
3732 
3733 #endif
3734 
3735 void
3736 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3737     int error, struct mbuf *op_err,
3738     int so_locked
3739 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3740     SCTP_UNUSED
3741 #endif
3742 )
3743 {
3744 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3745 	struct socket *so;
3746 
3747 #endif
3748 
3749 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3750 	so = SCTP_INP_SO(inp);
3751 #endif
3752 	if (stcb == NULL) {
3753 		/* Got to have a TCB */
3754 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3755 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3756 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3757 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3758 			}
3759 		}
3760 		return;
3761 	} else {
3762 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3763 	}
3764 	/* notify the ulp */
3765 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3766 		sctp_abort_notification(stcb, error, so_locked);
3767 	/* notify the peer */
3768 #if defined(SCTP_PANIC_ON_ABORT)
3769 	panic("aborting an association");
3770 #endif
3771 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3772 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3773 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3774 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3775 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3776 	}
3777 	/* now free the asoc */
3778 #ifdef SCTP_ASOCLOG_OF_TSNS
3779 	sctp_print_out_track_log(stcb);
3780 #endif
3781 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3782 	if (!so_locked) {
3783 		atomic_add_int(&stcb->asoc.refcnt, 1);
3784 		SCTP_TCB_UNLOCK(stcb);
3785 		SCTP_SOCKET_LOCK(so, 1);
3786 		SCTP_TCB_LOCK(stcb);
3787 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3788 	}
3789 #endif
3790 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3791 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3792 	if (!so_locked) {
3793 		SCTP_SOCKET_UNLOCK(so, 1);
3794 	}
3795 #endif
3796 }
3797 
3798 void
3799 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3800     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3801 {
3802 	struct sctp_chunkhdr *ch, chunk_buf;
3803 	unsigned int chk_length;
3804 	int contains_init_chunk;
3805 
3806 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3807 	/* Generate a TO address for future reference */
3808 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3809 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3810 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3811 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3812 		}
3813 	}
3814 	contains_init_chunk = 0;
3815 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3816 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3817 	while (ch != NULL) {
3818 		chk_length = ntohs(ch->chunk_length);
3819 		if (chk_length < sizeof(*ch)) {
3820 			/* break to abort land */
3821 			break;
3822 		}
3823 		switch (ch->chunk_type) {
3824 		case SCTP_INIT:
3825 			contains_init_chunk = 1;
3826 			break;
3827 		case SCTP_COOKIE_ECHO:
3828 			/* We hit here only if the assoc is being freed */
3829 			return;
3830 		case SCTP_PACKET_DROPPED:
3831 			/* we don't respond to pkt-dropped */
3832 			return;
3833 		case SCTP_ABORT_ASSOCIATION:
3834 			/* we don't respond with an ABORT to an ABORT */
3835 			return;
3836 		case SCTP_SHUTDOWN_COMPLETE:
3837 			/*
3838 			 * we ignore it since we are not waiting for it and
3839 			 * peer is gone
3840 			 */
3841 			return;
3842 		case SCTP_SHUTDOWN_ACK:
3843 			sctp_send_shutdown_complete2(m, sh, vrf_id, port);
3844 			return;
3845 		default:
3846 			break;
3847 		}
3848 		offset += SCTP_SIZE32(chk_length);
3849 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3850 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3851 	}
3852 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
3853 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
3854 	    (contains_init_chunk == 0))) {
3855 		sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
3856 	}
3857 }
3858 
3859 /*
3860  * check the inbound datagram to make sure there is not an abort inside it,
3861  * if there is return 1, else return 0.
3862  */
3863 int
3864 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3865 {
3866 	struct sctp_chunkhdr *ch;
3867 	struct sctp_init_chunk *init_chk, chunk_buf;
3868 	int offset;
3869 	unsigned int chk_length;
3870 
3871 	offset = iphlen + sizeof(struct sctphdr);
3872 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3873 	    (uint8_t *) & chunk_buf);
3874 	while (ch != NULL) {
3875 		chk_length = ntohs(ch->chunk_length);
3876 		if (chk_length < sizeof(*ch)) {
3877 			/* packet is probably corrupt */
3878 			break;
3879 		}
3880 		/* we seem to be ok, is it an abort? */
3881 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3882 			/* yep, tell them */
3883 			return (1);
3884 		}
3885 		if (ch->chunk_type == SCTP_INITIATION) {
3886 			/* need to update the Vtag */
3887 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
3888 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
3889 			if (init_chk != NULL) {
3890 				*vtagfill = ntohl(init_chk->init.initiate_tag);
3891 			}
3892 		}
3893 		/* Nope, move to the next chunk */
3894 		offset += SCTP_SIZE32(chk_length);
3895 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3896 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3897 	}
3898 	return (0);
3899 }
3900 
3901 /*
3902  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
3903  * set (i.e. it's 0) so, create this function to compare link local scopes
3904  */
3905 #ifdef INET6
3906 uint32_t
3907 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
3908 {
3909 	struct sockaddr_in6 a, b;
3910 
3911 	/* save copies */
3912 	a = *addr1;
3913 	b = *addr2;
3914 
3915 	if (a.sin6_scope_id == 0)
3916 		if (sa6_recoverscope(&a)) {
3917 			/* can't get scope, so can't match */
3918 			return (0);
3919 		}
3920 	if (b.sin6_scope_id == 0)
3921 		if (sa6_recoverscope(&b)) {
3922 			/* can't get scope, so can't match */
3923 			return (0);
3924 		}
3925 	if (a.sin6_scope_id != b.sin6_scope_id)
3926 		return (0);
3927 
3928 	return (1);
3929 }
3930 
3931 /*
3932  * returns a sockaddr_in6 with embedded scope recovered and removed
3933  */
3934 struct sockaddr_in6 *
3935 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
3936 {
3937 	/* check and strip embedded scope junk */
3938 	if (addr->sin6_family == AF_INET6) {
3939 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
3940 			if (addr->sin6_scope_id == 0) {
3941 				*store = *addr;
3942 				if (!sa6_recoverscope(store)) {
3943 					/* use the recovered scope */
3944 					addr = store;
3945 				}
3946 			} else {
3947 				/* else, return the original "to" addr */
3948 				in6_clearscope(&addr->sin6_addr);
3949 			}
3950 		}
3951 	}
3952 	return (addr);
3953 }
3954 
3955 #endif
3956 
3957 /*
3958  * are the two addresses the same?  currently a "scopeless" check returns: 1
3959  * if same, 0 if not
3960  */
3961 int
3962 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
3963 {
3964 
3965 	/* must be valid */
3966 	if (sa1 == NULL || sa2 == NULL)
3967 		return (0);
3968 
3969 	/* must be the same family */
3970 	if (sa1->sa_family != sa2->sa_family)
3971 		return (0);
3972 
3973 	switch (sa1->sa_family) {
3974 #ifdef INET6
3975 	case AF_INET6:
3976 		{
3977 			/* IPv6 addresses */
3978 			struct sockaddr_in6 *sin6_1, *sin6_2;
3979 
3980 			sin6_1 = (struct sockaddr_in6 *)sa1;
3981 			sin6_2 = (struct sockaddr_in6 *)sa2;
3982 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
3983 			    sin6_2));
3984 		}
3985 #endif
3986 #ifdef INET
3987 	case AF_INET:
3988 		{
3989 			/* IPv4 addresses */
3990 			struct sockaddr_in *sin_1, *sin_2;
3991 
3992 			sin_1 = (struct sockaddr_in *)sa1;
3993 			sin_2 = (struct sockaddr_in *)sa2;
3994 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
3995 		}
3996 #endif
3997 	default:
3998 		/* we don't do these... */
3999 		return (0);
4000 	}
4001 }
4002 
4003 void
4004 sctp_print_address(struct sockaddr *sa)
4005 {
4006 #ifdef INET6
4007 	char ip6buf[INET6_ADDRSTRLEN];
4008 
4009 	ip6buf[0] = 0;
4010 #endif
4011 
4012 	switch (sa->sa_family) {
4013 #ifdef INET6
4014 	case AF_INET6:
4015 		{
4016 			struct sockaddr_in6 *sin6;
4017 
4018 			sin6 = (struct sockaddr_in6 *)sa;
4019 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4020 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4021 			    ntohs(sin6->sin6_port),
4022 			    sin6->sin6_scope_id);
4023 			break;
4024 		}
4025 #endif
4026 #ifdef INET
4027 	case AF_INET:
4028 		{
4029 			struct sockaddr_in *sin;
4030 			unsigned char *p;
4031 
4032 			sin = (struct sockaddr_in *)sa;
4033 			p = (unsigned char *)&sin->sin_addr;
4034 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4035 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4036 			break;
4037 		}
4038 #endif
4039 	default:
4040 		SCTP_PRINTF("?\n");
4041 		break;
4042 	}
4043 }
4044 
4045 void
4046 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4047 {
4048 	switch (iph->ip_v) {
4049 #ifdef INET
4050 	case IPVERSION:
4051 		{
4052 			struct sockaddr_in lsa, fsa;
4053 
4054 			bzero(&lsa, sizeof(lsa));
4055 			lsa.sin_len = sizeof(lsa);
4056 			lsa.sin_family = AF_INET;
4057 			lsa.sin_addr = iph->ip_src;
4058 			lsa.sin_port = sh->src_port;
4059 			bzero(&fsa, sizeof(fsa));
4060 			fsa.sin_len = sizeof(fsa);
4061 			fsa.sin_family = AF_INET;
4062 			fsa.sin_addr = iph->ip_dst;
4063 			fsa.sin_port = sh->dest_port;
4064 			SCTP_PRINTF("src: ");
4065 			sctp_print_address((struct sockaddr *)&lsa);
4066 			SCTP_PRINTF("dest: ");
4067 			sctp_print_address((struct sockaddr *)&fsa);
4068 			break;
4069 		}
4070 #endif
4071 #ifdef INET6
4072 	case IPV6_VERSION >> 4:
4073 		{
4074 			struct ip6_hdr *ip6;
4075 			struct sockaddr_in6 lsa6, fsa6;
4076 
4077 			ip6 = (struct ip6_hdr *)iph;
4078 			bzero(&lsa6, sizeof(lsa6));
4079 			lsa6.sin6_len = sizeof(lsa6);
4080 			lsa6.sin6_family = AF_INET6;
4081 			lsa6.sin6_addr = ip6->ip6_src;
4082 			lsa6.sin6_port = sh->src_port;
4083 			bzero(&fsa6, sizeof(fsa6));
4084 			fsa6.sin6_len = sizeof(fsa6);
4085 			fsa6.sin6_family = AF_INET6;
4086 			fsa6.sin6_addr = ip6->ip6_dst;
4087 			fsa6.sin6_port = sh->dest_port;
4088 			SCTP_PRINTF("src: ");
4089 			sctp_print_address((struct sockaddr *)&lsa6);
4090 			SCTP_PRINTF("dest: ");
4091 			sctp_print_address((struct sockaddr *)&fsa6);
4092 			break;
4093 		}
4094 #endif
4095 	default:
4096 		/* TSNH */
4097 		break;
4098 	}
4099 }
4100 
4101 void
4102 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4103     struct sctp_inpcb *new_inp,
4104     struct sctp_tcb *stcb,
4105     int waitflags)
4106 {
4107 	/*
4108 	 * go through our old INP and pull off any control structures that
4109 	 * belong to stcb and move then to the new inp.
4110 	 */
4111 	struct socket *old_so, *new_so;
4112 	struct sctp_queued_to_read *control, *nctl;
4113 	struct sctp_readhead tmp_queue;
4114 	struct mbuf *m;
4115 	int error = 0;
4116 
4117 	old_so = old_inp->sctp_socket;
4118 	new_so = new_inp->sctp_socket;
4119 	TAILQ_INIT(&tmp_queue);
4120 	error = sblock(&old_so->so_rcv, waitflags);
4121 	if (error) {
4122 		/*
4123 		 * Gak, can't get sblock, we have a problem. data will be
4124 		 * left stranded.. and we don't dare look at it since the
4125 		 * other thread may be reading something. Oh well, its a
4126 		 * screwed up app that does a peeloff OR a accept while
4127 		 * reading from the main socket... actually its only the
4128 		 * peeloff() case, since I think read will fail on a
4129 		 * listening socket..
4130 		 */
4131 		return;
4132 	}
4133 	/* lock the socket buffers */
4134 	SCTP_INP_READ_LOCK(old_inp);
4135 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4136 		/* Pull off all for out target stcb */
4137 		if (control->stcb == stcb) {
4138 			/* remove it we want it */
4139 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4140 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4141 			m = control->data;
4142 			while (m) {
4143 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4144 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4145 				}
4146 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4147 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4148 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4149 				}
4150 				m = SCTP_BUF_NEXT(m);
4151 			}
4152 		}
4153 	}
4154 	SCTP_INP_READ_UNLOCK(old_inp);
4155 	/* Remove the sb-lock on the old socket */
4156 
4157 	sbunlock(&old_so->so_rcv);
4158 	/* Now we move them over to the new socket buffer */
4159 	SCTP_INP_READ_LOCK(new_inp);
4160 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4161 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4162 		m = control->data;
4163 		while (m) {
4164 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4165 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4166 			}
4167 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4168 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4169 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4170 			}
4171 			m = SCTP_BUF_NEXT(m);
4172 		}
4173 	}
4174 	SCTP_INP_READ_UNLOCK(new_inp);
4175 }
4176 
4177 void
4178 sctp_add_to_readq(struct sctp_inpcb *inp,
4179     struct sctp_tcb *stcb,
4180     struct sctp_queued_to_read *control,
4181     struct sockbuf *sb,
4182     int end,
4183     int inp_read_lock_held,
4184     int so_locked
4185 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4186     SCTP_UNUSED
4187 #endif
4188 )
4189 {
4190 	/*
4191 	 * Here we must place the control on the end of the socket read
4192 	 * queue AND increment sb_cc so that select will work properly on
4193 	 * read.
4194 	 */
4195 	struct mbuf *m, *prev = NULL;
4196 
4197 	if (inp == NULL) {
4198 		/* Gak, TSNH!! */
4199 #ifdef INVARIANTS
4200 		panic("Gak, inp NULL on add_to_readq");
4201 #endif
4202 		return;
4203 	}
4204 	if (inp_read_lock_held == 0)
4205 		SCTP_INP_READ_LOCK(inp);
4206 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4207 		sctp_free_remote_addr(control->whoFrom);
4208 		if (control->data) {
4209 			sctp_m_freem(control->data);
4210 			control->data = NULL;
4211 		}
4212 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4213 		if (inp_read_lock_held == 0)
4214 			SCTP_INP_READ_UNLOCK(inp);
4215 		return;
4216 	}
4217 	if (!(control->spec_flags & M_NOTIFICATION)) {
4218 		atomic_add_int(&inp->total_recvs, 1);
4219 		if (!control->do_not_ref_stcb) {
4220 			atomic_add_int(&stcb->total_recvs, 1);
4221 		}
4222 	}
4223 	m = control->data;
4224 	control->held_length = 0;
4225 	control->length = 0;
4226 	while (m) {
4227 		if (SCTP_BUF_LEN(m) == 0) {
4228 			/* Skip mbufs with NO length */
4229 			if (prev == NULL) {
4230 				/* First one */
4231 				control->data = sctp_m_free(m);
4232 				m = control->data;
4233 			} else {
4234 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4235 				m = SCTP_BUF_NEXT(prev);
4236 			}
4237 			if (m == NULL) {
4238 				control->tail_mbuf = prev;
4239 			}
4240 			continue;
4241 		}
4242 		prev = m;
4243 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4244 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4245 		}
4246 		sctp_sballoc(stcb, sb, m);
4247 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4248 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4249 		}
4250 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4251 		m = SCTP_BUF_NEXT(m);
4252 	}
4253 	if (prev != NULL) {
4254 		control->tail_mbuf = prev;
4255 	} else {
4256 		/* Everything got collapsed out?? */
4257 		sctp_free_remote_addr(control->whoFrom);
4258 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4259 		if (inp_read_lock_held == 0)
4260 			SCTP_INP_READ_UNLOCK(inp);
4261 		return;
4262 	}
4263 	if (end) {
4264 		control->end_added = 1;
4265 	}
4266 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4267 	if (inp_read_lock_held == 0)
4268 		SCTP_INP_READ_UNLOCK(inp);
4269 	if (inp && inp->sctp_socket) {
4270 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4271 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4272 		} else {
4273 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4274 			struct socket *so;
4275 
4276 			so = SCTP_INP_SO(inp);
4277 			if (!so_locked) {
4278 				if (stcb) {
4279 					atomic_add_int(&stcb->asoc.refcnt, 1);
4280 					SCTP_TCB_UNLOCK(stcb);
4281 				}
4282 				SCTP_SOCKET_LOCK(so, 1);
4283 				if (stcb) {
4284 					SCTP_TCB_LOCK(stcb);
4285 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4286 				}
4287 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4288 					SCTP_SOCKET_UNLOCK(so, 1);
4289 					return;
4290 				}
4291 			}
4292 #endif
4293 			sctp_sorwakeup(inp, inp->sctp_socket);
4294 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4295 			if (!so_locked) {
4296 				SCTP_SOCKET_UNLOCK(so, 1);
4297 			}
4298 #endif
4299 		}
4300 	}
4301 }
4302 
4303 
4304 int
4305 sctp_append_to_readq(struct sctp_inpcb *inp,
4306     struct sctp_tcb *stcb,
4307     struct sctp_queued_to_read *control,
4308     struct mbuf *m,
4309     int end,
4310     int ctls_cumack,
4311     struct sockbuf *sb)
4312 {
4313 	/*
4314 	 * A partial delivery API event is underway. OR we are appending on
4315 	 * the reassembly queue.
4316 	 *
4317 	 * If PDAPI this means we need to add m to the end of the data.
4318 	 * Increase the length in the control AND increment the sb_cc.
4319 	 * Otherwise sb is NULL and all we need to do is put it at the end
4320 	 * of the mbuf chain.
4321 	 */
4322 	int len = 0;
4323 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4324 
4325 	if (inp) {
4326 		SCTP_INP_READ_LOCK(inp);
4327 	}
4328 	if (control == NULL) {
4329 get_out:
4330 		if (inp) {
4331 			SCTP_INP_READ_UNLOCK(inp);
4332 		}
4333 		return (-1);
4334 	}
4335 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4336 		SCTP_INP_READ_UNLOCK(inp);
4337 		return (0);
4338 	}
4339 	if (control->end_added) {
4340 		/* huh this one is complete? */
4341 		goto get_out;
4342 	}
4343 	mm = m;
4344 	if (mm == NULL) {
4345 		goto get_out;
4346 	}
4347 	while (mm) {
4348 		if (SCTP_BUF_LEN(mm) == 0) {
4349 			/* Skip mbufs with NO lenght */
4350 			if (prev == NULL) {
4351 				/* First one */
4352 				m = sctp_m_free(mm);
4353 				mm = m;
4354 			} else {
4355 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4356 				mm = SCTP_BUF_NEXT(prev);
4357 			}
4358 			continue;
4359 		}
4360 		prev = mm;
4361 		len += SCTP_BUF_LEN(mm);
4362 		if (sb) {
4363 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4364 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4365 			}
4366 			sctp_sballoc(stcb, sb, mm);
4367 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4368 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4369 			}
4370 		}
4371 		mm = SCTP_BUF_NEXT(mm);
4372 	}
4373 	if (prev) {
4374 		tail = prev;
4375 	} else {
4376 		/* Really there should always be a prev */
4377 		if (m == NULL) {
4378 			/* Huh nothing left? */
4379 #ifdef INVARIANTS
4380 			panic("Nothing left to add?");
4381 #else
4382 			goto get_out;
4383 #endif
4384 		}
4385 		tail = m;
4386 	}
4387 	if (control->tail_mbuf) {
4388 		/* append */
4389 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4390 		control->tail_mbuf = tail;
4391 	} else {
4392 		/* nothing there */
4393 #ifdef INVARIANTS
4394 		if (control->data != NULL) {
4395 			panic("This should NOT happen");
4396 		}
4397 #endif
4398 		control->data = m;
4399 		control->tail_mbuf = tail;
4400 	}
4401 	atomic_add_int(&control->length, len);
4402 	if (end) {
4403 		/* message is complete */
4404 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4405 			stcb->asoc.control_pdapi = NULL;
4406 		}
4407 		control->held_length = 0;
4408 		control->end_added = 1;
4409 	}
4410 	if (stcb == NULL) {
4411 		control->do_not_ref_stcb = 1;
4412 	}
4413 	/*
4414 	 * When we are appending in partial delivery, the cum-ack is used
4415 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4416 	 * is populated in the outbound sinfo structure from the true cumack
4417 	 * if the association exists...
4418 	 */
4419 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4420 	if (inp) {
4421 		SCTP_INP_READ_UNLOCK(inp);
4422 	}
4423 	if (inp && inp->sctp_socket) {
4424 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4425 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4426 		} else {
4427 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4428 			struct socket *so;
4429 
4430 			so = SCTP_INP_SO(inp);
4431 			if (stcb) {
4432 				atomic_add_int(&stcb->asoc.refcnt, 1);
4433 				SCTP_TCB_UNLOCK(stcb);
4434 			}
4435 			SCTP_SOCKET_LOCK(so, 1);
4436 			if (stcb) {
4437 				SCTP_TCB_LOCK(stcb);
4438 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4439 			}
4440 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4441 				SCTP_SOCKET_UNLOCK(so, 1);
4442 				return (0);
4443 			}
4444 #endif
4445 			sctp_sorwakeup(inp, inp->sctp_socket);
4446 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4447 			SCTP_SOCKET_UNLOCK(so, 1);
4448 #endif
4449 		}
4450 	}
4451 	return (0);
4452 }
4453 
4454 
4455 
4456 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4457  *************ALTERNATE ROUTING CODE
4458  */
4459 
4460 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4461  *************ALTERNATE ROUTING CODE
4462  */
4463 
4464 struct mbuf *
4465 sctp_generate_invmanparam(int err)
4466 {
4467 	/* Return a MBUF with a invalid mandatory parameter */
4468 	struct mbuf *m;
4469 
4470 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4471 	if (m) {
4472 		struct sctp_paramhdr *ph;
4473 
4474 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4475 		ph = mtod(m, struct sctp_paramhdr *);
4476 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4477 		ph->param_type = htons(err);
4478 	}
4479 	return (m);
4480 }
4481 
4482 #ifdef SCTP_MBCNT_LOGGING
4483 void
4484 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4485     struct sctp_tmit_chunk *tp1, int chk_cnt)
4486 {
4487 	if (tp1->data == NULL) {
4488 		return;
4489 	}
4490 	asoc->chunks_on_out_queue -= chk_cnt;
4491 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4492 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4493 		    asoc->total_output_queue_size,
4494 		    tp1->book_size,
4495 		    0,
4496 		    tp1->mbcnt);
4497 	}
4498 	if (asoc->total_output_queue_size >= tp1->book_size) {
4499 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4500 	} else {
4501 		asoc->total_output_queue_size = 0;
4502 	}
4503 
4504 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4505 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4506 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4507 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4508 		} else {
4509 			stcb->sctp_socket->so_snd.sb_cc = 0;
4510 
4511 		}
4512 	}
4513 }
4514 
4515 #endif
4516 
4517 int
4518 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4519     int reason, int so_locked
4520 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4521     SCTP_UNUSED
4522 #endif
4523 )
4524 {
4525 	struct sctp_stream_out *strq;
4526 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4527 	struct sctp_stream_queue_pending *sp;
4528 	uint16_t stream = 0, seq = 0;
4529 	uint8_t foundeom = 0;
4530 	int ret_sz = 0;
4531 	int notdone;
4532 	int do_wakeup_routine = 0;
4533 
4534 	stream = tp1->rec.data.stream_number;
4535 	seq = tp1->rec.data.stream_seq;
4536 	do {
4537 		ret_sz += tp1->book_size;
4538 		if (tp1->data != NULL) {
4539 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4540 				sctp_flight_size_decrease(tp1);
4541 				sctp_total_flight_decrease(stcb, tp1);
4542 			}
4543 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4544 			stcb->asoc.peers_rwnd += tp1->send_size;
4545 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4546 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4547 			if (tp1->data) {
4548 				sctp_m_freem(tp1->data);
4549 				tp1->data = NULL;
4550 			}
4551 			do_wakeup_routine = 1;
4552 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4553 				stcb->asoc.sent_queue_cnt_removeable--;
4554 			}
4555 		}
4556 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4557 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4558 		    SCTP_DATA_NOT_FRAG) {
4559 			/* not frag'ed we ae done   */
4560 			notdone = 0;
4561 			foundeom = 1;
4562 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4563 			/* end of frag, we are done */
4564 			notdone = 0;
4565 			foundeom = 1;
4566 		} else {
4567 			/*
4568 			 * Its a begin or middle piece, we must mark all of
4569 			 * it
4570 			 */
4571 			notdone = 1;
4572 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4573 		}
4574 	} while (tp1 && notdone);
4575 	if (foundeom == 0) {
4576 		/*
4577 		 * The multi-part message was scattered across the send and
4578 		 * sent queue.
4579 		 */
4580 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4581 			if ((tp1->rec.data.stream_number != stream) ||
4582 			    (tp1->rec.data.stream_seq != seq)) {
4583 				break;
4584 			}
4585 			/*
4586 			 * save to chk in case we have some on stream out
4587 			 * queue. If so and we have an un-transmitted one we
4588 			 * don't have to fudge the TSN.
4589 			 */
4590 			chk = tp1;
4591 			ret_sz += tp1->book_size;
4592 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4593 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4594 			if (tp1->data) {
4595 				sctp_m_freem(tp1->data);
4596 				tp1->data = NULL;
4597 			}
4598 			/* No flight involved here book the size to 0 */
4599 			tp1->book_size = 0;
4600 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4601 				foundeom = 1;
4602 			}
4603 			do_wakeup_routine = 1;
4604 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4605 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4606 			/*
4607 			 * on to the sent queue so we can wait for it to be
4608 			 * passed by.
4609 			 */
4610 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4611 			    sctp_next);
4612 			stcb->asoc.send_queue_cnt--;
4613 			stcb->asoc.sent_queue_cnt++;
4614 		}
4615 	}
4616 	if (foundeom == 0) {
4617 		/*
4618 		 * Still no eom found. That means there is stuff left on the
4619 		 * stream out queue.. yuck.
4620 		 */
4621 		strq = &stcb->asoc.strmout[stream];
4622 		SCTP_TCB_SEND_LOCK(stcb);
4623 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4624 			/* FIXME: Shouldn't this be a serial number check? */
4625 			if (sp->strseq > seq) {
4626 				break;
4627 			}
4628 			/* Check if its our SEQ */
4629 			if (sp->strseq == seq) {
4630 				sp->discard_rest = 1;
4631 				/*
4632 				 * We may need to put a chunk on the queue
4633 				 * that holds the TSN that would have been
4634 				 * sent with the LAST bit.
4635 				 */
4636 				if (chk == NULL) {
4637 					/* Yep, we have to */
4638 					sctp_alloc_a_chunk(stcb, chk);
4639 					if (chk == NULL) {
4640 						/*
4641 						 * we are hosed. All we can
4642 						 * do is nothing.. which
4643 						 * will cause an abort if
4644 						 * the peer is paying
4645 						 * attention.
4646 						 */
4647 						goto oh_well;
4648 					}
4649 					memset(chk, 0, sizeof(*chk));
4650 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4651 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4652 					chk->asoc = &stcb->asoc;
4653 					chk->rec.data.stream_seq = sp->strseq;
4654 					chk->rec.data.stream_number = sp->stream;
4655 					chk->rec.data.payloadtype = sp->ppid;
4656 					chk->rec.data.context = sp->context;
4657 					chk->flags = sp->act_flags;
4658 					if (sp->net)
4659 						chk->whoTo = sp->net;
4660 					else
4661 						chk->whoTo = stcb->asoc.primary_destination;
4662 					atomic_add_int(&chk->whoTo->ref_count, 1);
4663 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4664 					stcb->asoc.pr_sctp_cnt++;
4665 					chk->pr_sctp_on = 1;
4666 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4667 					stcb->asoc.sent_queue_cnt++;
4668 					stcb->asoc.pr_sctp_cnt++;
4669 				} else {
4670 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4671 				}
4672 		oh_well:
4673 				if (sp->data) {
4674 					/*
4675 					 * Pull any data to free up the SB
4676 					 * and allow sender to "add more"
4677 					 * whilc we will throw away :-)
4678 					 */
4679 					sctp_free_spbufspace(stcb, &stcb->asoc,
4680 					    sp);
4681 					ret_sz += sp->length;
4682 					do_wakeup_routine = 1;
4683 					sp->some_taken = 1;
4684 					sctp_m_freem(sp->data);
4685 					sp->length = 0;
4686 					sp->data = NULL;
4687 					sp->tail_mbuf = NULL;
4688 				}
4689 				break;
4690 			}
4691 		}		/* End tailq_foreach */
4692 		SCTP_TCB_SEND_UNLOCK(stcb);
4693 	}
4694 	if (do_wakeup_routine) {
4695 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4696 		struct socket *so;
4697 
4698 		so = SCTP_INP_SO(stcb->sctp_ep);
4699 		if (!so_locked) {
4700 			atomic_add_int(&stcb->asoc.refcnt, 1);
4701 			SCTP_TCB_UNLOCK(stcb);
4702 			SCTP_SOCKET_LOCK(so, 1);
4703 			SCTP_TCB_LOCK(stcb);
4704 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4705 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4706 				/* assoc was freed while we were unlocked */
4707 				SCTP_SOCKET_UNLOCK(so, 1);
4708 				return (ret_sz);
4709 			}
4710 		}
4711 #endif
4712 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4713 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4714 		if (!so_locked) {
4715 			SCTP_SOCKET_UNLOCK(so, 1);
4716 		}
4717 #endif
4718 	}
4719 	return (ret_sz);
4720 }
4721 
4722 /*
4723  * checks to see if the given address, sa, is one that is currently known by
4724  * the kernel note: can't distinguish the same address on multiple interfaces
4725  * and doesn't handle multiple addresses with different zone/scope id's note:
4726  * ifa_ifwithaddr() compares the entire sockaddr struct
4727  */
4728 struct sctp_ifa *
4729 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4730     int holds_lock)
4731 {
4732 	struct sctp_laddr *laddr;
4733 
4734 	if (holds_lock == 0) {
4735 		SCTP_INP_RLOCK(inp);
4736 	}
4737 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4738 		if (laddr->ifa == NULL)
4739 			continue;
4740 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4741 			continue;
4742 #ifdef INET
4743 		if (addr->sa_family == AF_INET) {
4744 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4745 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4746 				/* found him. */
4747 				if (holds_lock == 0) {
4748 					SCTP_INP_RUNLOCK(inp);
4749 				}
4750 				return (laddr->ifa);
4751 				break;
4752 			}
4753 		}
4754 #endif
4755 #ifdef INET6
4756 		if (addr->sa_family == AF_INET6) {
4757 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4758 			    &laddr->ifa->address.sin6)) {
4759 				/* found him. */
4760 				if (holds_lock == 0) {
4761 					SCTP_INP_RUNLOCK(inp);
4762 				}
4763 				return (laddr->ifa);
4764 				break;
4765 			}
4766 		}
4767 #endif
4768 	}
4769 	if (holds_lock == 0) {
4770 		SCTP_INP_RUNLOCK(inp);
4771 	}
4772 	return (NULL);
4773 }
4774 
4775 uint32_t
4776 sctp_get_ifa_hash_val(struct sockaddr *addr)
4777 {
4778 	switch (addr->sa_family) {
4779 #ifdef INET
4780 	case AF_INET:
4781 		{
4782 			struct sockaddr_in *sin;
4783 
4784 			sin = (struct sockaddr_in *)addr;
4785 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4786 		}
4787 #endif
4788 #ifdef INET6
4789 	case INET6:
4790 		{
4791 			struct sockaddr_in6 *sin6;
4792 			uint32_t hash_of_addr;
4793 
4794 			sin6 = (struct sockaddr_in6 *)addr;
4795 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4796 			    sin6->sin6_addr.s6_addr32[1] +
4797 			    sin6->sin6_addr.s6_addr32[2] +
4798 			    sin6->sin6_addr.s6_addr32[3]);
4799 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4800 			return (hash_of_addr);
4801 		}
4802 #endif
4803 	default:
4804 		break;
4805 	}
4806 	return (0);
4807 }
4808 
4809 struct sctp_ifa *
4810 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4811 {
4812 	struct sctp_ifa *sctp_ifap;
4813 	struct sctp_vrf *vrf;
4814 	struct sctp_ifalist *hash_head;
4815 	uint32_t hash_of_addr;
4816 
4817 	if (holds_lock == 0)
4818 		SCTP_IPI_ADDR_RLOCK();
4819 
4820 	vrf = sctp_find_vrf(vrf_id);
4821 	if (vrf == NULL) {
4822 stage_right:
4823 		if (holds_lock == 0)
4824 			SCTP_IPI_ADDR_RUNLOCK();
4825 		return (NULL);
4826 	}
4827 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4828 
4829 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4830 	if (hash_head == NULL) {
4831 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4832 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4833 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4834 		sctp_print_address(addr);
4835 		SCTP_PRINTF("No such bucket for address\n");
4836 		if (holds_lock == 0)
4837 			SCTP_IPI_ADDR_RUNLOCK();
4838 
4839 		return (NULL);
4840 	}
4841 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4842 		if (sctp_ifap == NULL) {
4843 #ifdef INVARIANTS
4844 			panic("Huh LIST_FOREACH corrupt");
4845 			goto stage_right;
4846 #else
4847 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4848 			goto stage_right;
4849 #endif
4850 		}
4851 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4852 			continue;
4853 #ifdef INET
4854 		if (addr->sa_family == AF_INET) {
4855 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4856 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4857 				/* found him. */
4858 				if (holds_lock == 0)
4859 					SCTP_IPI_ADDR_RUNLOCK();
4860 				return (sctp_ifap);
4861 				break;
4862 			}
4863 		}
4864 #endif
4865 #ifdef INET6
4866 		if (addr->sa_family == AF_INET6) {
4867 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4868 			    &sctp_ifap->address.sin6)) {
4869 				/* found him. */
4870 				if (holds_lock == 0)
4871 					SCTP_IPI_ADDR_RUNLOCK();
4872 				return (sctp_ifap);
4873 				break;
4874 			}
4875 		}
4876 #endif
4877 	}
4878 	if (holds_lock == 0)
4879 		SCTP_IPI_ADDR_RUNLOCK();
4880 	return (NULL);
4881 }
4882 
4883 static void
4884 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4885     uint32_t rwnd_req)
4886 {
4887 	/* User pulled some data, do we need a rwnd update? */
4888 	int r_unlocked = 0;
4889 	uint32_t dif, rwnd;
4890 	struct socket *so = NULL;
4891 
4892 	if (stcb == NULL)
4893 		return;
4894 
4895 	atomic_add_int(&stcb->asoc.refcnt, 1);
4896 
4897 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4898 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4899 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4900 		/* Pre-check If we are freeing no update */
4901 		goto no_lock;
4902 	}
4903 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4904 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4905 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4906 		goto out;
4907 	}
4908 	so = stcb->sctp_socket;
4909 	if (so == NULL) {
4910 		goto out;
4911 	}
4912 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4913 	/* Have you have freed enough to look */
4914 	*freed_so_far = 0;
4915 	/* Yep, its worth a look and the lock overhead */
4916 
4917 	/* Figure out what the rwnd would be */
4918 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4919 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4920 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4921 	} else {
4922 		dif = 0;
4923 	}
4924 	if (dif >= rwnd_req) {
4925 		if (hold_rlock) {
4926 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4927 			r_unlocked = 1;
4928 		}
4929 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4930 			/*
4931 			 * One last check before we allow the guy possibly
4932 			 * to get in. There is a race, where the guy has not
4933 			 * reached the gate. In that case
4934 			 */
4935 			goto out;
4936 		}
4937 		SCTP_TCB_LOCK(stcb);
4938 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4939 			/* No reports here */
4940 			SCTP_TCB_UNLOCK(stcb);
4941 			goto out;
4942 		}
4943 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4944 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
4945 
4946 		sctp_chunk_output(stcb->sctp_ep, stcb,
4947 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4948 		/* make sure no timer is running */
4949 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4950 		SCTP_TCB_UNLOCK(stcb);
4951 	} else {
4952 		/* Update how much we have pending */
4953 		stcb->freed_by_sorcv_sincelast = dif;
4954 	}
4955 out:
4956 	if (so && r_unlocked && hold_rlock) {
4957 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4958 	}
4959 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4960 no_lock:
4961 	atomic_add_int(&stcb->asoc.refcnt, -1);
4962 	return;
4963 }
4964 
4965 int
4966 sctp_sorecvmsg(struct socket *so,
4967     struct uio *uio,
4968     struct mbuf **mp,
4969     struct sockaddr *from,
4970     int fromlen,
4971     int *msg_flags,
4972     struct sctp_sndrcvinfo *sinfo,
4973     int filling_sinfo)
4974 {
4975 	/*
4976 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4977 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4978 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4979 	 * On the way out we may send out any combination of:
4980 	 * MSG_NOTIFICATION MSG_EOR
4981 	 *
4982 	 */
4983 	struct sctp_inpcb *inp = NULL;
4984 	int my_len = 0;
4985 	int cp_len = 0, error = 0;
4986 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4987 	struct mbuf *m = NULL;
4988 	struct sctp_tcb *stcb = NULL;
4989 	int wakeup_read_socket = 0;
4990 	int freecnt_applied = 0;
4991 	int out_flags = 0, in_flags = 0;
4992 	int block_allowed = 1;
4993 	uint32_t freed_so_far = 0;
4994 	uint32_t copied_so_far = 0;
4995 	int in_eeor_mode = 0;
4996 	int no_rcv_needed = 0;
4997 	uint32_t rwnd_req = 0;
4998 	int hold_sblock = 0;
4999 	int hold_rlock = 0;
5000 	int slen = 0;
5001 	uint32_t held_length = 0;
5002 	int sockbuf_lock = 0;
5003 
5004 	if (uio == NULL) {
5005 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5006 		return (EINVAL);
5007 	}
5008 	if (msg_flags) {
5009 		in_flags = *msg_flags;
5010 		if (in_flags & MSG_PEEK)
5011 			SCTP_STAT_INCR(sctps_read_peeks);
5012 	} else {
5013 		in_flags = 0;
5014 	}
5015 	slen = uio->uio_resid;
5016 
5017 	/* Pull in and set up our int flags */
5018 	if (in_flags & MSG_OOB) {
5019 		/* Out of band's NOT supported */
5020 		return (EOPNOTSUPP);
5021 	}
5022 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5023 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5024 		return (EINVAL);
5025 	}
5026 	if ((in_flags & (MSG_DONTWAIT
5027 	    | MSG_NBIO
5028 	    )) ||
5029 	    SCTP_SO_IS_NBIO(so)) {
5030 		block_allowed = 0;
5031 	}
5032 	/* setup the endpoint */
5033 	inp = (struct sctp_inpcb *)so->so_pcb;
5034 	if (inp == NULL) {
5035 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5036 		return (EFAULT);
5037 	}
5038 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5039 	/* Must be at least a MTU's worth */
5040 	if (rwnd_req < SCTP_MIN_RWND)
5041 		rwnd_req = SCTP_MIN_RWND;
5042 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5043 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5044 		sctp_misc_ints(SCTP_SORECV_ENTER,
5045 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5046 	}
5047 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5048 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5049 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5050 	}
5051 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5052 	sockbuf_lock = 1;
5053 	if (error) {
5054 		goto release_unlocked;
5055 	}
5056 restart:
5057 
5058 
5059 restart_nosblocks:
5060 	if (hold_sblock == 0) {
5061 		SOCKBUF_LOCK(&so->so_rcv);
5062 		hold_sblock = 1;
5063 	}
5064 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5065 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5066 		goto out;
5067 	}
5068 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5069 		if (so->so_error) {
5070 			error = so->so_error;
5071 			if ((in_flags & MSG_PEEK) == 0)
5072 				so->so_error = 0;
5073 			goto out;
5074 		} else {
5075 			if (so->so_rcv.sb_cc == 0) {
5076 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5077 				/* indicate EOF */
5078 				error = 0;
5079 				goto out;
5080 			}
5081 		}
5082 	}
5083 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5084 		/* we need to wait for data */
5085 		if ((so->so_rcv.sb_cc == 0) &&
5086 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5087 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5088 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5089 				/*
5090 				 * For active open side clear flags for
5091 				 * re-use passive open is blocked by
5092 				 * connect.
5093 				 */
5094 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5095 					/*
5096 					 * You were aborted, passive side
5097 					 * always hits here
5098 					 */
5099 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5100 					error = ECONNRESET;
5101 				}
5102 				so->so_state &= ~(SS_ISCONNECTING |
5103 				    SS_ISDISCONNECTING |
5104 				    SS_ISCONFIRMING |
5105 				    SS_ISCONNECTED);
5106 				if (error == 0) {
5107 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5108 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5109 						error = ENOTCONN;
5110 					}
5111 				}
5112 				goto out;
5113 			}
5114 		}
5115 		error = sbwait(&so->so_rcv);
5116 		if (error) {
5117 			goto out;
5118 		}
5119 		held_length = 0;
5120 		goto restart_nosblocks;
5121 	} else if (so->so_rcv.sb_cc == 0) {
5122 		if (so->so_error) {
5123 			error = so->so_error;
5124 			if ((in_flags & MSG_PEEK) == 0)
5125 				so->so_error = 0;
5126 		} else {
5127 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5128 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5129 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5130 					/*
5131 					 * For active open side clear flags
5132 					 * for re-use passive open is
5133 					 * blocked by connect.
5134 					 */
5135 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5136 						/*
5137 						 * You were aborted, passive
5138 						 * side always hits here
5139 						 */
5140 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5141 						error = ECONNRESET;
5142 					}
5143 					so->so_state &= ~(SS_ISCONNECTING |
5144 					    SS_ISDISCONNECTING |
5145 					    SS_ISCONFIRMING |
5146 					    SS_ISCONNECTED);
5147 					if (error == 0) {
5148 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5149 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5150 							error = ENOTCONN;
5151 						}
5152 					}
5153 					goto out;
5154 				}
5155 			}
5156 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5157 			error = EWOULDBLOCK;
5158 		}
5159 		goto out;
5160 	}
5161 	if (hold_sblock == 1) {
5162 		SOCKBUF_UNLOCK(&so->so_rcv);
5163 		hold_sblock = 0;
5164 	}
5165 	/* we possibly have data we can read */
5166 	/* sa_ignore FREED_MEMORY */
5167 	control = TAILQ_FIRST(&inp->read_queue);
5168 	if (control == NULL) {
5169 		/*
5170 		 * This could be happening since the appender did the
5171 		 * increment but as not yet did the tailq insert onto the
5172 		 * read_queue
5173 		 */
5174 		if (hold_rlock == 0) {
5175 			SCTP_INP_READ_LOCK(inp);
5176 		}
5177 		control = TAILQ_FIRST(&inp->read_queue);
5178 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5179 #ifdef INVARIANTS
5180 			panic("Huh, its non zero and nothing on control?");
5181 #endif
5182 			so->so_rcv.sb_cc = 0;
5183 		}
5184 		SCTP_INP_READ_UNLOCK(inp);
5185 		hold_rlock = 0;
5186 		goto restart;
5187 	}
5188 	if ((control->length == 0) &&
5189 	    (control->do_not_ref_stcb)) {
5190 		/*
5191 		 * Clean up code for freeing assoc that left behind a
5192 		 * pdapi.. maybe a peer in EEOR that just closed after
5193 		 * sending and never indicated a EOR.
5194 		 */
5195 		if (hold_rlock == 0) {
5196 			hold_rlock = 1;
5197 			SCTP_INP_READ_LOCK(inp);
5198 		}
5199 		control->held_length = 0;
5200 		if (control->data) {
5201 			/* Hmm there is data here .. fix */
5202 			struct mbuf *m_tmp;
5203 			int cnt = 0;
5204 
5205 			m_tmp = control->data;
5206 			while (m_tmp) {
5207 				cnt += SCTP_BUF_LEN(m_tmp);
5208 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5209 					control->tail_mbuf = m_tmp;
5210 					control->end_added = 1;
5211 				}
5212 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5213 			}
5214 			control->length = cnt;
5215 		} else {
5216 			/* remove it */
5217 			TAILQ_REMOVE(&inp->read_queue, control, next);
5218 			/* Add back any hiddend data */
5219 			sctp_free_remote_addr(control->whoFrom);
5220 			sctp_free_a_readq(stcb, control);
5221 		}
5222 		if (hold_rlock) {
5223 			hold_rlock = 0;
5224 			SCTP_INP_READ_UNLOCK(inp);
5225 		}
5226 		goto restart;
5227 	}
5228 	if ((control->length == 0) &&
5229 	    (control->end_added == 1)) {
5230 		/*
5231 		 * Do we also need to check for (control->pdapi_aborted ==
5232 		 * 1)?
5233 		 */
5234 		if (hold_rlock == 0) {
5235 			hold_rlock = 1;
5236 			SCTP_INP_READ_LOCK(inp);
5237 		}
5238 		TAILQ_REMOVE(&inp->read_queue, control, next);
5239 		if (control->data) {
5240 #ifdef INVARIANTS
5241 			panic("control->data not null but control->length == 0");
5242 #else
5243 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5244 			sctp_m_freem(control->data);
5245 			control->data = NULL;
5246 #endif
5247 		}
5248 		if (control->aux_data) {
5249 			sctp_m_free(control->aux_data);
5250 			control->aux_data = NULL;
5251 		}
5252 		sctp_free_remote_addr(control->whoFrom);
5253 		sctp_free_a_readq(stcb, control);
5254 		if (hold_rlock) {
5255 			hold_rlock = 0;
5256 			SCTP_INP_READ_UNLOCK(inp);
5257 		}
5258 		goto restart;
5259 	}
5260 	if (control->length == 0) {
5261 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5262 		    (filling_sinfo)) {
5263 			/* find a more suitable one then this */
5264 			ctl = TAILQ_NEXT(control, next);
5265 			while (ctl) {
5266 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5267 				    (ctl->some_taken ||
5268 				    (ctl->spec_flags & M_NOTIFICATION) ||
5269 				    ((ctl->do_not_ref_stcb == 0) &&
5270 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5271 				    ) {
5272 					/*-
5273 					 * If we have a different TCB next, and there is data
5274 					 * present. If we have already taken some (pdapi), OR we can
5275 					 * ref the tcb and no delivery as started on this stream, we
5276 					 * take it. Note we allow a notification on a different
5277 					 * assoc to be delivered..
5278 					 */
5279 					control = ctl;
5280 					goto found_one;
5281 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5282 					    (ctl->length) &&
5283 					    ((ctl->some_taken) ||
5284 					    ((ctl->do_not_ref_stcb == 0) &&
5285 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5286 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5287 					/*-
5288 					 * If we have the same tcb, and there is data present, and we
5289 					 * have the strm interleave feature present. Then if we have
5290 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5291 					 * not started a delivery for this stream, we can take it.
5292 					 * Note we do NOT allow a notificaiton on the same assoc to
5293 					 * be delivered.
5294 					 */
5295 					control = ctl;
5296 					goto found_one;
5297 				}
5298 				ctl = TAILQ_NEXT(ctl, next);
5299 			}
5300 		}
5301 		/*
5302 		 * if we reach here, not suitable replacement is available
5303 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5304 		 * into the our held count, and its time to sleep again.
5305 		 */
5306 		held_length = so->so_rcv.sb_cc;
5307 		control->held_length = so->so_rcv.sb_cc;
5308 		goto restart;
5309 	}
5310 	/* Clear the held length since there is something to read */
5311 	control->held_length = 0;
5312 	if (hold_rlock) {
5313 		SCTP_INP_READ_UNLOCK(inp);
5314 		hold_rlock = 0;
5315 	}
5316 found_one:
5317 	/*
5318 	 * If we reach here, control has a some data for us to read off.
5319 	 * Note that stcb COULD be NULL.
5320 	 */
5321 	control->some_taken++;
5322 	if (hold_sblock) {
5323 		SOCKBUF_UNLOCK(&so->so_rcv);
5324 		hold_sblock = 0;
5325 	}
5326 	stcb = control->stcb;
5327 	if (stcb) {
5328 		if ((control->do_not_ref_stcb == 0) &&
5329 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5330 			if (freecnt_applied == 0)
5331 				stcb = NULL;
5332 		} else if (control->do_not_ref_stcb == 0) {
5333 			/* you can't free it on me please */
5334 			/*
5335 			 * The lock on the socket buffer protects us so the
5336 			 * free code will stop. But since we used the
5337 			 * socketbuf lock and the sender uses the tcb_lock
5338 			 * to increment, we need to use the atomic add to
5339 			 * the refcnt
5340 			 */
5341 			if (freecnt_applied) {
5342 #ifdef INVARIANTS
5343 				panic("refcnt already incremented");
5344 #else
5345 				printf("refcnt already incremented?\n");
5346 #endif
5347 			} else {
5348 				atomic_add_int(&stcb->asoc.refcnt, 1);
5349 				freecnt_applied = 1;
5350 			}
5351 			/*
5352 			 * Setup to remember how much we have not yet told
5353 			 * the peer our rwnd has opened up. Note we grab the
5354 			 * value from the tcb from last time. Note too that
5355 			 * sack sending clears this when a sack is sent,
5356 			 * which is fine. Once we hit the rwnd_req, we then
5357 			 * will go to the sctp_user_rcvd() that will not
5358 			 * lock until it KNOWs it MUST send a WUP-SACK.
5359 			 */
5360 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5361 			stcb->freed_by_sorcv_sincelast = 0;
5362 		}
5363 	}
5364 	if (stcb &&
5365 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5366 	    control->do_not_ref_stcb == 0) {
5367 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5368 	}
5369 	/* First lets get off the sinfo and sockaddr info */
5370 	if ((sinfo) && filling_sinfo) {
5371 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5372 		nxt = TAILQ_NEXT(control, next);
5373 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5374 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5375 			struct sctp_extrcvinfo *s_extra;
5376 
5377 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5378 			if ((nxt) &&
5379 			    (nxt->length)) {
5380 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5381 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5382 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5383 				}
5384 				if (nxt->spec_flags & M_NOTIFICATION) {
5385 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5386 				}
5387 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5388 				s_extra->sreinfo_next_length = nxt->length;
5389 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5390 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5391 				if (nxt->tail_mbuf != NULL) {
5392 					if (nxt->end_added) {
5393 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5394 					}
5395 				}
5396 			} else {
5397 				/*
5398 				 * we explicitly 0 this, since the memcpy
5399 				 * got some other things beyond the older
5400 				 * sinfo_ that is on the control's structure
5401 				 * :-D
5402 				 */
5403 				nxt = NULL;
5404 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5405 				s_extra->sreinfo_next_aid = 0;
5406 				s_extra->sreinfo_next_length = 0;
5407 				s_extra->sreinfo_next_ppid = 0;
5408 				s_extra->sreinfo_next_stream = 0;
5409 			}
5410 		}
5411 		/*
5412 		 * update off the real current cum-ack, if we have an stcb.
5413 		 */
5414 		if ((control->do_not_ref_stcb == 0) && stcb)
5415 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5416 		/*
5417 		 * mask off the high bits, we keep the actual chunk bits in
5418 		 * there.
5419 		 */
5420 		sinfo->sinfo_flags &= 0x00ff;
5421 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5422 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5423 		}
5424 	}
5425 #ifdef SCTP_ASOCLOG_OF_TSNS
5426 	{
5427 		int index, newindex;
5428 		struct sctp_pcbtsn_rlog *entry;
5429 
5430 		do {
5431 			index = inp->readlog_index;
5432 			newindex = index + 1;
5433 			if (newindex >= SCTP_READ_LOG_SIZE) {
5434 				newindex = 0;
5435 			}
5436 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5437 		entry = &inp->readlog[index];
5438 		entry->vtag = control->sinfo_assoc_id;
5439 		entry->strm = control->sinfo_stream;
5440 		entry->seq = control->sinfo_ssn;
5441 		entry->sz = control->length;
5442 		entry->flgs = control->sinfo_flags;
5443 	}
5444 #endif
5445 	if (fromlen && from) {
5446 		struct sockaddr *to;
5447 
5448 #ifdef INET
5449 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5450 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5451 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5452 #else
5453 		/* No AF_INET use AF_INET6 */
5454 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5455 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5456 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5457 #endif
5458 
5459 		to = from;
5460 #if defined(INET) && defined(INET6)
5461 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5462 		    (to->sa_family == AF_INET) &&
5463 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5464 			struct sockaddr_in *sin;
5465 			struct sockaddr_in6 sin6;
5466 
5467 			sin = (struct sockaddr_in *)to;
5468 			bzero(&sin6, sizeof(sin6));
5469 			sin6.sin6_family = AF_INET6;
5470 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5471 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5472 			bcopy(&sin->sin_addr,
5473 			    &sin6.sin6_addr.s6_addr32[3],
5474 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5475 			sin6.sin6_port = sin->sin_port;
5476 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5477 		}
5478 #endif
5479 #if defined(INET6)
5480 		{
5481 			struct sockaddr_in6 lsa6, *to6;
5482 
5483 			to6 = (struct sockaddr_in6 *)to;
5484 			sctp_recover_scope_mac(to6, (&lsa6));
5485 		}
5486 #endif
5487 	}
5488 	/* now copy out what data we can */
5489 	if (mp == NULL) {
5490 		/* copy out each mbuf in the chain up to length */
5491 get_more_data:
5492 		m = control->data;
5493 		while (m) {
5494 			/* Move out all we can */
5495 			cp_len = (int)uio->uio_resid;
5496 			my_len = (int)SCTP_BUF_LEN(m);
5497 			if (cp_len > my_len) {
5498 				/* not enough in this buf */
5499 				cp_len = my_len;
5500 			}
5501 			if (hold_rlock) {
5502 				SCTP_INP_READ_UNLOCK(inp);
5503 				hold_rlock = 0;
5504 			}
5505 			if (cp_len > 0)
5506 				error = uiomove(mtod(m, char *), cp_len, uio);
5507 			/* re-read */
5508 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5509 				goto release;
5510 			}
5511 			if ((control->do_not_ref_stcb == 0) && stcb &&
5512 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5513 				no_rcv_needed = 1;
5514 			}
5515 			if (error) {
5516 				/* error we are out of here */
5517 				goto release;
5518 			}
5519 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5520 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5521 			    ((control->end_added == 0) ||
5522 			    (control->end_added &&
5523 			    (TAILQ_NEXT(control, next) == NULL)))
5524 			    ) {
5525 				SCTP_INP_READ_LOCK(inp);
5526 				hold_rlock = 1;
5527 			}
5528 			if (cp_len == SCTP_BUF_LEN(m)) {
5529 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5530 				    (control->end_added)) {
5531 					out_flags |= MSG_EOR;
5532 					if ((control->do_not_ref_stcb == 0) &&
5533 					    (control->stcb != NULL) &&
5534 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5535 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5536 				}
5537 				if (control->spec_flags & M_NOTIFICATION) {
5538 					out_flags |= MSG_NOTIFICATION;
5539 				}
5540 				/* we ate up the mbuf */
5541 				if (in_flags & MSG_PEEK) {
5542 					/* just looking */
5543 					m = SCTP_BUF_NEXT(m);
5544 					copied_so_far += cp_len;
5545 				} else {
5546 					/* dispose of the mbuf */
5547 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5548 						sctp_sblog(&so->so_rcv,
5549 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5550 					}
5551 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5552 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5553 						sctp_sblog(&so->so_rcv,
5554 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5555 					}
5556 					copied_so_far += cp_len;
5557 					freed_so_far += cp_len;
5558 					freed_so_far += MSIZE;
5559 					atomic_subtract_int(&control->length, cp_len);
5560 					control->data = sctp_m_free(m);
5561 					m = control->data;
5562 					/*
5563 					 * been through it all, must hold sb
5564 					 * lock ok to null tail
5565 					 */
5566 					if (control->data == NULL) {
5567 #ifdef INVARIANTS
5568 						if ((control->end_added == 0) ||
5569 						    (TAILQ_NEXT(control, next) == NULL)) {
5570 							/*
5571 							 * If the end is not
5572 							 * added, OR the
5573 							 * next is NOT null
5574 							 * we MUST have the
5575 							 * lock.
5576 							 */
5577 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5578 								panic("Hmm we don't own the lock?");
5579 							}
5580 						}
5581 #endif
5582 						control->tail_mbuf = NULL;
5583 #ifdef INVARIANTS
5584 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5585 							panic("end_added, nothing left and no MSG_EOR");
5586 						}
5587 #endif
5588 					}
5589 				}
5590 			} else {
5591 				/* Do we need to trim the mbuf? */
5592 				if (control->spec_flags & M_NOTIFICATION) {
5593 					out_flags |= MSG_NOTIFICATION;
5594 				}
5595 				if ((in_flags & MSG_PEEK) == 0) {
5596 					SCTP_BUF_RESV_UF(m, cp_len);
5597 					SCTP_BUF_LEN(m) -= cp_len;
5598 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5599 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5600 					}
5601 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5602 					if ((control->do_not_ref_stcb == 0) &&
5603 					    stcb) {
5604 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5605 					}
5606 					copied_so_far += cp_len;
5607 					freed_so_far += cp_len;
5608 					freed_so_far += MSIZE;
5609 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5610 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5611 						    SCTP_LOG_SBRESULT, 0);
5612 					}
5613 					atomic_subtract_int(&control->length, cp_len);
5614 				} else {
5615 					copied_so_far += cp_len;
5616 				}
5617 			}
5618 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5619 				break;
5620 			}
5621 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5622 			    (control->do_not_ref_stcb == 0) &&
5623 			    (freed_so_far >= rwnd_req)) {
5624 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5625 			}
5626 		}		/* end while(m) */
5627 		/*
5628 		 * At this point we have looked at it all and we either have
5629 		 * a MSG_EOR/or read all the user wants... <OR>
5630 		 * control->length == 0.
5631 		 */
5632 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5633 			/* we are done with this control */
5634 			if (control->length == 0) {
5635 				if (control->data) {
5636 #ifdef INVARIANTS
5637 					panic("control->data not null at read eor?");
5638 #else
5639 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5640 					sctp_m_freem(control->data);
5641 					control->data = NULL;
5642 #endif
5643 				}
5644 		done_with_control:
5645 				if (TAILQ_NEXT(control, next) == NULL) {
5646 					/*
5647 					 * If we don't have a next we need a
5648 					 * lock, if there is a next
5649 					 * interrupt is filling ahead of us
5650 					 * and we don't need a lock to
5651 					 * remove this guy (which is the
5652 					 * head of the queue).
5653 					 */
5654 					if (hold_rlock == 0) {
5655 						SCTP_INP_READ_LOCK(inp);
5656 						hold_rlock = 1;
5657 					}
5658 				}
5659 				TAILQ_REMOVE(&inp->read_queue, control, next);
5660 				/* Add back any hiddend data */
5661 				if (control->held_length) {
5662 					held_length = 0;
5663 					control->held_length = 0;
5664 					wakeup_read_socket = 1;
5665 				}
5666 				if (control->aux_data) {
5667 					sctp_m_free(control->aux_data);
5668 					control->aux_data = NULL;
5669 				}
5670 				no_rcv_needed = control->do_not_ref_stcb;
5671 				sctp_free_remote_addr(control->whoFrom);
5672 				control->data = NULL;
5673 				sctp_free_a_readq(stcb, control);
5674 				control = NULL;
5675 				if ((freed_so_far >= rwnd_req) &&
5676 				    (no_rcv_needed == 0))
5677 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5678 
5679 			} else {
5680 				/*
5681 				 * The user did not read all of this
5682 				 * message, turn off the returned MSG_EOR
5683 				 * since we are leaving more behind on the
5684 				 * control to read.
5685 				 */
5686 #ifdef INVARIANTS
5687 				if (control->end_added &&
5688 				    (control->data == NULL) &&
5689 				    (control->tail_mbuf == NULL)) {
5690 					panic("Gak, control->length is corrupt?");
5691 				}
5692 #endif
5693 				no_rcv_needed = control->do_not_ref_stcb;
5694 				out_flags &= ~MSG_EOR;
5695 			}
5696 		}
5697 		if (out_flags & MSG_EOR) {
5698 			goto release;
5699 		}
5700 		if ((uio->uio_resid == 0) ||
5701 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5702 		    ) {
5703 			goto release;
5704 		}
5705 		/*
5706 		 * If I hit here the receiver wants more and this message is
5707 		 * NOT done (pd-api). So two questions. Can we block? if not
5708 		 * we are done. Did the user NOT set MSG_WAITALL?
5709 		 */
5710 		if (block_allowed == 0) {
5711 			goto release;
5712 		}
5713 		/*
5714 		 * We need to wait for more data a few things: - We don't
5715 		 * sbunlock() so we don't get someone else reading. - We
5716 		 * must be sure to account for the case where what is added
5717 		 * is NOT to our control when we wakeup.
5718 		 */
5719 
5720 		/*
5721 		 * Do we need to tell the transport a rwnd update might be
5722 		 * needed before we go to sleep?
5723 		 */
5724 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5725 		    ((freed_so_far >= rwnd_req) &&
5726 		    (control->do_not_ref_stcb == 0) &&
5727 		    (no_rcv_needed == 0))) {
5728 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5729 		}
5730 wait_some_more:
5731 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5732 			goto release;
5733 		}
5734 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5735 			goto release;
5736 
5737 		if (hold_rlock == 1) {
5738 			SCTP_INP_READ_UNLOCK(inp);
5739 			hold_rlock = 0;
5740 		}
5741 		if (hold_sblock == 0) {
5742 			SOCKBUF_LOCK(&so->so_rcv);
5743 			hold_sblock = 1;
5744 		}
5745 		if ((copied_so_far) && (control->length == 0) &&
5746 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5747 			goto release;
5748 		}
5749 		if (so->so_rcv.sb_cc <= control->held_length) {
5750 			error = sbwait(&so->so_rcv);
5751 			if (error) {
5752 				goto release;
5753 			}
5754 			control->held_length = 0;
5755 		}
5756 		if (hold_sblock) {
5757 			SOCKBUF_UNLOCK(&so->so_rcv);
5758 			hold_sblock = 0;
5759 		}
5760 		if (control->length == 0) {
5761 			/* still nothing here */
5762 			if (control->end_added == 1) {
5763 				/* he aborted, or is done i.e.did a shutdown */
5764 				out_flags |= MSG_EOR;
5765 				if (control->pdapi_aborted) {
5766 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5767 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5768 
5769 					out_flags |= MSG_TRUNC;
5770 				} else {
5771 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5772 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5773 				}
5774 				goto done_with_control;
5775 			}
5776 			if (so->so_rcv.sb_cc > held_length) {
5777 				control->held_length = so->so_rcv.sb_cc;
5778 				held_length = 0;
5779 			}
5780 			goto wait_some_more;
5781 		} else if (control->data == NULL) {
5782 			/*
5783 			 * we must re-sync since data is probably being
5784 			 * added
5785 			 */
5786 			SCTP_INP_READ_LOCK(inp);
5787 			if ((control->length > 0) && (control->data == NULL)) {
5788 				/*
5789 				 * big trouble.. we have the lock and its
5790 				 * corrupt?
5791 				 */
5792 #ifdef INVARIANTS
5793 				panic("Impossible data==NULL length !=0");
5794 #endif
5795 				out_flags |= MSG_EOR;
5796 				out_flags |= MSG_TRUNC;
5797 				control->length = 0;
5798 				SCTP_INP_READ_UNLOCK(inp);
5799 				goto done_with_control;
5800 			}
5801 			SCTP_INP_READ_UNLOCK(inp);
5802 			/* We will fall around to get more data */
5803 		}
5804 		goto get_more_data;
5805 	} else {
5806 		/*-
5807 		 * Give caller back the mbuf chain,
5808 		 * store in uio_resid the length
5809 		 */
5810 		wakeup_read_socket = 0;
5811 		if ((control->end_added == 0) ||
5812 		    (TAILQ_NEXT(control, next) == NULL)) {
5813 			/* Need to get rlock */
5814 			if (hold_rlock == 0) {
5815 				SCTP_INP_READ_LOCK(inp);
5816 				hold_rlock = 1;
5817 			}
5818 		}
5819 		if (control->end_added) {
5820 			out_flags |= MSG_EOR;
5821 			if ((control->do_not_ref_stcb == 0) &&
5822 			    (control->stcb != NULL) &&
5823 			    ((control->spec_flags & M_NOTIFICATION) == 0))
5824 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5825 		}
5826 		if (control->spec_flags & M_NOTIFICATION) {
5827 			out_flags |= MSG_NOTIFICATION;
5828 		}
5829 		uio->uio_resid = control->length;
5830 		*mp = control->data;
5831 		m = control->data;
5832 		while (m) {
5833 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5834 				sctp_sblog(&so->so_rcv,
5835 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5836 			}
5837 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5838 			freed_so_far += SCTP_BUF_LEN(m);
5839 			freed_so_far += MSIZE;
5840 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5841 				sctp_sblog(&so->so_rcv,
5842 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5843 			}
5844 			m = SCTP_BUF_NEXT(m);
5845 		}
5846 		control->data = control->tail_mbuf = NULL;
5847 		control->length = 0;
5848 		if (out_flags & MSG_EOR) {
5849 			/* Done with this control */
5850 			goto done_with_control;
5851 		}
5852 	}
5853 release:
5854 	if (hold_rlock == 1) {
5855 		SCTP_INP_READ_UNLOCK(inp);
5856 		hold_rlock = 0;
5857 	}
5858 	if (hold_sblock == 1) {
5859 		SOCKBUF_UNLOCK(&so->so_rcv);
5860 		hold_sblock = 0;
5861 	}
5862 	sbunlock(&so->so_rcv);
5863 	sockbuf_lock = 0;
5864 
5865 release_unlocked:
5866 	if (hold_sblock) {
5867 		SOCKBUF_UNLOCK(&so->so_rcv);
5868 		hold_sblock = 0;
5869 	}
5870 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5871 		if ((freed_so_far >= rwnd_req) &&
5872 		    (control && (control->do_not_ref_stcb == 0)) &&
5873 		    (no_rcv_needed == 0))
5874 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5875 	}
5876 out:
5877 	if (msg_flags) {
5878 		*msg_flags = out_flags;
5879 	}
5880 	if (((out_flags & MSG_EOR) == 0) &&
5881 	    ((in_flags & MSG_PEEK) == 0) &&
5882 	    (sinfo) &&
5883 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5884 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
5885 		struct sctp_extrcvinfo *s_extra;
5886 
5887 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5888 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5889 	}
5890 	if (hold_rlock == 1) {
5891 		SCTP_INP_READ_UNLOCK(inp);
5892 	}
5893 	if (hold_sblock) {
5894 		SOCKBUF_UNLOCK(&so->so_rcv);
5895 	}
5896 	if (sockbuf_lock) {
5897 		sbunlock(&so->so_rcv);
5898 	}
5899 	if (freecnt_applied) {
5900 		/*
5901 		 * The lock on the socket buffer protects us so the free
5902 		 * code will stop. But since we used the socketbuf lock and
5903 		 * the sender uses the tcb_lock to increment, we need to use
5904 		 * the atomic add to the refcnt.
5905 		 */
5906 		if (stcb == NULL) {
5907 #ifdef INVARIANTS
5908 			panic("stcb for refcnt has gone NULL?");
5909 			goto stage_left;
5910 #else
5911 			goto stage_left;
5912 #endif
5913 		}
5914 		atomic_add_int(&stcb->asoc.refcnt, -1);
5915 		/* Save the value back for next time */
5916 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5917 	}
5918 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5919 		if (stcb) {
5920 			sctp_misc_ints(SCTP_SORECV_DONE,
5921 			    freed_so_far,
5922 			    ((uio) ? (slen - uio->uio_resid) : slen),
5923 			    stcb->asoc.my_rwnd,
5924 			    so->so_rcv.sb_cc);
5925 		} else {
5926 			sctp_misc_ints(SCTP_SORECV_DONE,
5927 			    freed_so_far,
5928 			    ((uio) ? (slen - uio->uio_resid) : slen),
5929 			    0,
5930 			    so->so_rcv.sb_cc);
5931 		}
5932 	}
5933 stage_left:
5934 	if (wakeup_read_socket) {
5935 		sctp_sorwakeup(inp, so);
5936 	}
5937 	return (error);
5938 }
5939 
5940 
5941 #ifdef SCTP_MBUF_LOGGING
5942 struct mbuf *
5943 sctp_m_free(struct mbuf *m)
5944 {
5945 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5946 		if (SCTP_BUF_IS_EXTENDED(m)) {
5947 			sctp_log_mb(m, SCTP_MBUF_IFREE);
5948 		}
5949 	}
5950 	return (m_free(m));
5951 }
5952 
5953 void
5954 sctp_m_freem(struct mbuf *mb)
5955 {
5956 	while (mb != NULL)
5957 		mb = sctp_m_free(mb);
5958 }
5959 
5960 #endif
5961 
5962 int
5963 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5964 {
5965 	/*
5966 	 * Given a local address. For all associations that holds the
5967 	 * address, request a peer-set-primary.
5968 	 */
5969 	struct sctp_ifa *ifa;
5970 	struct sctp_laddr *wi;
5971 
5972 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5973 	if (ifa == NULL) {
5974 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
5975 		return (EADDRNOTAVAIL);
5976 	}
5977 	/*
5978 	 * Now that we have the ifa we must awaken the iterator with this
5979 	 * message.
5980 	 */
5981 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
5982 	if (wi == NULL) {
5983 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
5984 		return (ENOMEM);
5985 	}
5986 	/* Now incr the count and int wi structure */
5987 	SCTP_INCR_LADDR_COUNT();
5988 	bzero(wi, sizeof(*wi));
5989 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
5990 	wi->ifa = ifa;
5991 	wi->action = SCTP_SET_PRIM_ADDR;
5992 	atomic_add_int(&ifa->refcount, 1);
5993 
5994 	/* Now add it to the work queue */
5995 	SCTP_WQ_ADDR_LOCK();
5996 	/*
5997 	 * Should this really be a tailq? As it is we will process the
5998 	 * newest first :-0
5999 	 */
6000 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6001 	SCTP_WQ_ADDR_UNLOCK();
6002 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6003 	    (struct sctp_inpcb *)NULL,
6004 	    (struct sctp_tcb *)NULL,
6005 	    (struct sctp_nets *)NULL);
6006 	return (0);
6007 }
6008 
6009 
6010 int
6011 sctp_soreceive(struct socket *so,
6012     struct sockaddr **psa,
6013     struct uio *uio,
6014     struct mbuf **mp0,
6015     struct mbuf **controlp,
6016     int *flagsp)
6017 {
6018 	int error, fromlen;
6019 	uint8_t sockbuf[256];
6020 	struct sockaddr *from;
6021 	struct sctp_extrcvinfo sinfo;
6022 	int filling_sinfo = 1;
6023 	struct sctp_inpcb *inp;
6024 
6025 	inp = (struct sctp_inpcb *)so->so_pcb;
6026 	/* pickup the assoc we are reading from */
6027 	if (inp == NULL) {
6028 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6029 		return (EINVAL);
6030 	}
6031 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6032 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6033 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6034 	    (controlp == NULL)) {
6035 		/* user does not want the sndrcv ctl */
6036 		filling_sinfo = 0;
6037 	}
6038 	if (psa) {
6039 		from = (struct sockaddr *)sockbuf;
6040 		fromlen = sizeof(sockbuf);
6041 		from->sa_len = 0;
6042 	} else {
6043 		from = NULL;
6044 		fromlen = 0;
6045 	}
6046 
6047 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6048 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6049 	if ((controlp) && (filling_sinfo)) {
6050 		/* copy back the sinfo in a CMSG format */
6051 		if (filling_sinfo)
6052 			*controlp = sctp_build_ctl_nchunk(inp,
6053 			    (struct sctp_sndrcvinfo *)&sinfo);
6054 		else
6055 			*controlp = NULL;
6056 	}
6057 	if (psa) {
6058 		/* copy back the address info */
6059 		if (from && from->sa_len) {
6060 			*psa = sodupsockaddr(from, M_NOWAIT);
6061 		} else {
6062 			*psa = NULL;
6063 		}
6064 	}
6065 	return (error);
6066 }
6067 
6068 
6069 
6070 
6071 
6072 int
6073 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6074     int totaddr, int *error)
6075 {
6076 	int added = 0;
6077 	int i;
6078 	struct sctp_inpcb *inp;
6079 	struct sockaddr *sa;
6080 	size_t incr = 0;
6081 
6082 #ifdef INET
6083 	struct sockaddr_in *sin;
6084 
6085 #endif
6086 #ifdef INET6
6087 	struct sockaddr_in6 *sin6;
6088 
6089 #endif
6090 
6091 	sa = addr;
6092 	inp = stcb->sctp_ep;
6093 	*error = 0;
6094 	for (i = 0; i < totaddr; i++) {
6095 		switch (sa->sa_family) {
6096 #ifdef INET
6097 		case AF_INET:
6098 			incr = sizeof(struct sockaddr_in);
6099 			sin = (struct sockaddr_in *)sa;
6100 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6101 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6102 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6103 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6104 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6105 				*error = EINVAL;
6106 				goto out_now;
6107 			}
6108 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6109 				/* assoc gone no un-lock */
6110 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6111 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6112 				*error = ENOBUFS;
6113 				goto out_now;
6114 			}
6115 			added++;
6116 			break;
6117 #endif
6118 #ifdef INET6
6119 		case AF_INET6:
6120 			incr = sizeof(struct sockaddr_in6);
6121 			sin6 = (struct sockaddr_in6 *)sa;
6122 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6123 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6124 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6125 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6126 				*error = EINVAL;
6127 				goto out_now;
6128 			}
6129 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6130 				/* assoc gone no un-lock */
6131 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6132 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6133 				*error = ENOBUFS;
6134 				goto out_now;
6135 			}
6136 			added++;
6137 			break;
6138 #endif
6139 		default:
6140 			break;
6141 		}
6142 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6143 	}
6144 out_now:
6145 	return (added);
6146 }
6147 
6148 struct sctp_tcb *
6149 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6150     int *totaddr, int *num_v4, int *num_v6, int *error,
6151     int limit, int *bad_addr)
6152 {
6153 	struct sockaddr *sa;
6154 	struct sctp_tcb *stcb = NULL;
6155 	size_t incr, at, i;
6156 
6157 	at = incr = 0;
6158 	sa = addr;
6159 
6160 	*error = *num_v6 = *num_v4 = 0;
6161 	/* account and validate addresses */
6162 	for (i = 0; i < (size_t)*totaddr; i++) {
6163 		switch (sa->sa_family) {
6164 #ifdef INET
6165 		case AF_INET:
6166 			(*num_v4) += 1;
6167 			incr = sizeof(struct sockaddr_in);
6168 			if (sa->sa_len != incr) {
6169 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6170 				*error = EINVAL;
6171 				*bad_addr = 1;
6172 				return (NULL);
6173 			}
6174 			break;
6175 #endif
6176 #ifdef INET6
6177 		case AF_INET6:
6178 			{
6179 				struct sockaddr_in6 *sin6;
6180 
6181 				sin6 = (struct sockaddr_in6 *)sa;
6182 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6183 					/* Must be non-mapped for connectx */
6184 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6185 					*error = EINVAL;
6186 					*bad_addr = 1;
6187 					return (NULL);
6188 				}
6189 				(*num_v6) += 1;
6190 				incr = sizeof(struct sockaddr_in6);
6191 				if (sa->sa_len != incr) {
6192 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6193 					*error = EINVAL;
6194 					*bad_addr = 1;
6195 					return (NULL);
6196 				}
6197 				break;
6198 			}
6199 #endif
6200 		default:
6201 			*totaddr = i;
6202 			/* we are done */
6203 			break;
6204 		}
6205 		if (i == (size_t)*totaddr) {
6206 			break;
6207 		}
6208 		SCTP_INP_INCR_REF(inp);
6209 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6210 		if (stcb != NULL) {
6211 			/* Already have or am bring up an association */
6212 			return (stcb);
6213 		} else {
6214 			SCTP_INP_DECR_REF(inp);
6215 		}
6216 		if ((at + incr) > (size_t)limit) {
6217 			*totaddr = i;
6218 			break;
6219 		}
6220 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6221 	}
6222 	return ((struct sctp_tcb *)NULL);
6223 }
6224 
6225 /*
6226  * sctp_bindx(ADD) for one address.
6227  * assumes all arguments are valid/checked by caller.
6228  */
6229 void
6230 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6231     struct sockaddr *sa, sctp_assoc_t assoc_id,
6232     uint32_t vrf_id, int *error, void *p)
6233 {
6234 	struct sockaddr *addr_touse;
6235 
6236 #ifdef INET6
6237 	struct sockaddr_in sin;
6238 
6239 #endif
6240 
6241 	/* see if we're bound all already! */
6242 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6243 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6244 		*error = EINVAL;
6245 		return;
6246 	}
6247 	addr_touse = sa;
6248 #ifdef INET6
6249 	if (sa->sa_family == AF_INET6) {
6250 		struct sockaddr_in6 *sin6;
6251 
6252 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6253 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6254 			*error = EINVAL;
6255 			return;
6256 		}
6257 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6258 			/* can only bind v6 on PF_INET6 sockets */
6259 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6260 			*error = EINVAL;
6261 			return;
6262 		}
6263 		sin6 = (struct sockaddr_in6 *)addr_touse;
6264 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6265 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6266 			    SCTP_IPV6_V6ONLY(inp)) {
6267 				/* can't bind v4-mapped on PF_INET sockets */
6268 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6269 				*error = EINVAL;
6270 				return;
6271 			}
6272 			in6_sin6_2_sin(&sin, sin6);
6273 			addr_touse = (struct sockaddr *)&sin;
6274 		}
6275 	}
6276 #endif
6277 #ifdef INET
6278 	if (sa->sa_family == AF_INET) {
6279 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6280 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6281 			*error = EINVAL;
6282 			return;
6283 		}
6284 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6285 		    SCTP_IPV6_V6ONLY(inp)) {
6286 			/* can't bind v4 on PF_INET sockets */
6287 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6288 			*error = EINVAL;
6289 			return;
6290 		}
6291 	}
6292 #endif
6293 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6294 		if (p == NULL) {
6295 			/* Can't get proc for Net/Open BSD */
6296 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6297 			*error = EINVAL;
6298 			return;
6299 		}
6300 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6301 		return;
6302 	}
6303 	/*
6304 	 * No locks required here since bind and mgmt_ep_sa all do their own
6305 	 * locking. If we do something for the FIX: below we may need to
6306 	 * lock in that case.
6307 	 */
6308 	if (assoc_id == 0) {
6309 		/* add the address */
6310 		struct sctp_inpcb *lep;
6311 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6312 
6313 		/* validate the incoming port */
6314 		if ((lsin->sin_port != 0) &&
6315 		    (lsin->sin_port != inp->sctp_lport)) {
6316 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6317 			*error = EINVAL;
6318 			return;
6319 		} else {
6320 			/* user specified 0 port, set it to existing port */
6321 			lsin->sin_port = inp->sctp_lport;
6322 		}
6323 
6324 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6325 		if (lep != NULL) {
6326 			/*
6327 			 * We must decrement the refcount since we have the
6328 			 * ep already and are binding. No remove going on
6329 			 * here.
6330 			 */
6331 			SCTP_INP_DECR_REF(lep);
6332 		}
6333 		if (lep == inp) {
6334 			/* already bound to it.. ok */
6335 			return;
6336 		} else if (lep == NULL) {
6337 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6338 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6339 			    SCTP_ADD_IP_ADDRESS,
6340 			    vrf_id, NULL);
6341 		} else {
6342 			*error = EADDRINUSE;
6343 		}
6344 		if (*error)
6345 			return;
6346 	} else {
6347 		/*
6348 		 * FIX: decide whether we allow assoc based bindx
6349 		 */
6350 	}
6351 }
6352 
6353 /*
6354  * sctp_bindx(DELETE) for one address.
6355  * assumes all arguments are valid/checked by caller.
6356  */
6357 void
6358 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6359     struct sockaddr *sa, sctp_assoc_t assoc_id,
6360     uint32_t vrf_id, int *error)
6361 {
6362 	struct sockaddr *addr_touse;
6363 
6364 #ifdef INET6
6365 	struct sockaddr_in sin;
6366 
6367 #endif
6368 
6369 	/* see if we're bound all already! */
6370 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6371 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6372 		*error = EINVAL;
6373 		return;
6374 	}
6375 	addr_touse = sa;
6376 #if defined(INET6)
6377 	if (sa->sa_family == AF_INET6) {
6378 		struct sockaddr_in6 *sin6;
6379 
6380 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6381 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6382 			*error = EINVAL;
6383 			return;
6384 		}
6385 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6386 			/* can only bind v6 on PF_INET6 sockets */
6387 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6388 			*error = EINVAL;
6389 			return;
6390 		}
6391 		sin6 = (struct sockaddr_in6 *)addr_touse;
6392 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6393 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6394 			    SCTP_IPV6_V6ONLY(inp)) {
6395 				/* can't bind mapped-v4 on PF_INET sockets */
6396 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6397 				*error = EINVAL;
6398 				return;
6399 			}
6400 			in6_sin6_2_sin(&sin, sin6);
6401 			addr_touse = (struct sockaddr *)&sin;
6402 		}
6403 	}
6404 #endif
6405 #ifdef INET
6406 	if (sa->sa_family == AF_INET) {
6407 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6408 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6409 			*error = EINVAL;
6410 			return;
6411 		}
6412 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6413 		    SCTP_IPV6_V6ONLY(inp)) {
6414 			/* can't bind v4 on PF_INET sockets */
6415 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6416 			*error = EINVAL;
6417 			return;
6418 		}
6419 	}
6420 #endif
6421 	/*
6422 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6423 	 * below is ever changed we may need to lock before calling
6424 	 * association level binding.
6425 	 */
6426 	if (assoc_id == 0) {
6427 		/* delete the address */
6428 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6429 		    SCTP_DEL_IP_ADDRESS,
6430 		    vrf_id, NULL);
6431 	} else {
6432 		/*
6433 		 * FIX: decide whether we allow assoc based bindx
6434 		 */
6435 	}
6436 }
6437 
6438 /*
6439  * returns the valid local address count for an assoc, taking into account
6440  * all scoping rules
6441  */
6442 int
6443 sctp_local_addr_count(struct sctp_tcb *stcb)
6444 {
6445 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6446 	int ipv4_addr_legal, ipv6_addr_legal;
6447 	struct sctp_vrf *vrf;
6448 	struct sctp_ifn *sctp_ifn;
6449 	struct sctp_ifa *sctp_ifa;
6450 	int count = 0;
6451 
6452 	/* Turn on all the appropriate scopes */
6453 	loopback_scope = stcb->asoc.loopback_scope;
6454 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6455 	local_scope = stcb->asoc.local_scope;
6456 	site_scope = stcb->asoc.site_scope;
6457 	ipv4_addr_legal = ipv6_addr_legal = 0;
6458 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6459 		ipv6_addr_legal = 1;
6460 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6461 			ipv4_addr_legal = 1;
6462 		}
6463 	} else {
6464 		ipv4_addr_legal = 1;
6465 	}
6466 
6467 	SCTP_IPI_ADDR_RLOCK();
6468 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6469 	if (vrf == NULL) {
6470 		/* no vrf, no addresses */
6471 		SCTP_IPI_ADDR_RUNLOCK();
6472 		return (0);
6473 	}
6474 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6475 		/*
6476 		 * bound all case: go through all ifns on the vrf
6477 		 */
6478 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6479 			if ((loopback_scope == 0) &&
6480 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6481 				continue;
6482 			}
6483 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6484 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6485 					continue;
6486 				switch (sctp_ifa->address.sa.sa_family) {
6487 #ifdef INET
6488 				case AF_INET:
6489 					if (ipv4_addr_legal) {
6490 						struct sockaddr_in *sin;
6491 
6492 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6493 						if (sin->sin_addr.s_addr == 0) {
6494 							/*
6495 							 * skip unspecified
6496 							 * addrs
6497 							 */
6498 							continue;
6499 						}
6500 						if ((ipv4_local_scope == 0) &&
6501 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6502 							continue;
6503 						}
6504 						/* count this one */
6505 						count++;
6506 					} else {
6507 						continue;
6508 					}
6509 					break;
6510 #endif
6511 #ifdef INET6
6512 				case AF_INET6:
6513 					if (ipv6_addr_legal) {
6514 						struct sockaddr_in6 *sin6;
6515 
6516 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6517 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6518 							continue;
6519 						}
6520 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6521 							if (local_scope == 0)
6522 								continue;
6523 							if (sin6->sin6_scope_id == 0) {
6524 								if (sa6_recoverscope(sin6) != 0)
6525 									/*
6526 									 *
6527 									 * bad
6528 									 *
6529 									 * li
6530 									 * nk
6531 									 *
6532 									 * loc
6533 									 * al
6534 									 *
6535 									 * add
6536 									 * re
6537 									 * ss
6538 									 * */
6539 									continue;
6540 							}
6541 						}
6542 						if ((site_scope == 0) &&
6543 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6544 							continue;
6545 						}
6546 						/* count this one */
6547 						count++;
6548 					}
6549 					break;
6550 #endif
6551 				default:
6552 					/* TSNH */
6553 					break;
6554 				}
6555 			}
6556 		}
6557 	} else {
6558 		/*
6559 		 * subset bound case
6560 		 */
6561 		struct sctp_laddr *laddr;
6562 
6563 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6564 		    sctp_nxt_addr) {
6565 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6566 				continue;
6567 			}
6568 			/* count this one */
6569 			count++;
6570 		}
6571 	}
6572 	SCTP_IPI_ADDR_RUNLOCK();
6573 	return (count);
6574 }
6575 
6576 #if defined(SCTP_LOCAL_TRACE_BUF)
6577 
6578 void
6579 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6580 {
6581 	uint32_t saveindex, newindex;
6582 
6583 	do {
6584 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6585 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6586 			newindex = 1;
6587 		} else {
6588 			newindex = saveindex + 1;
6589 		}
6590 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6591 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6592 		saveindex = 0;
6593 	}
6594 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6595 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6596 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6597 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6598 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6599 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6600 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6601 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6602 }
6603 
6604 #endif
6605 /* XXX: Remove the #ifdef after tunneling over IPv6 works also on FreeBSD. */
6606 #ifdef INET
6607 /* We will need to add support
6608  * to bind the ports and such here
6609  * so we can do UDP tunneling. In
6610  * the mean-time, we return error
6611  */
6612 #include <netinet/udp.h>
6613 #include <netinet/udp_var.h>
6614 #include <sys/proc.h>
6615 #ifdef INET6
6616 #include <netinet6/sctp6_var.h>
6617 #endif
6618 
6619 static void
6620 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6621 {
6622 	struct ip *iph;
6623 	struct mbuf *sp, *last;
6624 	struct udphdr *uhdr;
6625 	uint16_t port = 0;
6626 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6627 
6628 	/*
6629 	 * Split out the mbuf chain. Leave the IP header in m, place the
6630 	 * rest in the sp.
6631 	 */
6632 	if ((m->m_flags & M_PKTHDR) == 0) {
6633 		/* Can't handle one that is not a pkt hdr */
6634 		goto out;
6635 	}
6636 	/* pull the src port */
6637 	iph = mtod(m, struct ip *);
6638 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6639 
6640 	port = uhdr->uh_sport;
6641 	sp = m_split(m, off, M_DONTWAIT);
6642 	if (sp == NULL) {
6643 		/* Gak, drop packet, we can't do a split */
6644 		goto out;
6645 	}
6646 	if (sp->m_pkthdr.len < header_size) {
6647 		/* Gak, packet can't have an SCTP header in it - to small */
6648 		m_freem(sp);
6649 		goto out;
6650 	}
6651 	/* ok now pull up the UDP header and SCTP header together */
6652 	sp = m_pullup(sp, header_size);
6653 	if (sp == NULL) {
6654 		/* Gak pullup failed */
6655 		goto out;
6656 	}
6657 	/* trim out the UDP header */
6658 	m_adj(sp, sizeof(struct udphdr));
6659 
6660 	/* Now reconstruct the mbuf chain */
6661 	/* 1) find last one */
6662 	last = m;
6663 	while (last->m_next != NULL) {
6664 		last = last->m_next;
6665 	}
6666 	last->m_next = sp;
6667 	m->m_pkthdr.len += sp->m_pkthdr.len;
6668 	last = m;
6669 	while (last != NULL) {
6670 		last = last->m_next;
6671 	}
6672 	/* Now its ready for sctp_input or sctp6_input */
6673 	iph = mtod(m, struct ip *);
6674 	switch (iph->ip_v) {
6675 #ifdef INET
6676 	case IPVERSION:
6677 		{
6678 			uint16_t len;
6679 
6680 			/* its IPv4 */
6681 			len = SCTP_GET_IPV4_LENGTH(iph);
6682 			len -= sizeof(struct udphdr);
6683 			SCTP_GET_IPV4_LENGTH(iph) = len;
6684 			sctp_input_with_port(m, off, port);
6685 			break;
6686 		}
6687 #endif
6688 #ifdef INET6
6689 	case IPV6_VERSION >> 4:
6690 		{
6691 			/* its IPv6 - NOT supported */
6692 			goto out;
6693 			break;
6694 
6695 		}
6696 #endif
6697 	default:
6698 		{
6699 			m_freem(m);
6700 			break;
6701 		}
6702 	}
6703 	return;
6704 out:
6705 	m_freem(m);
6706 }
6707 
6708 void
6709 sctp_over_udp_stop(void)
6710 {
6711 	struct socket *sop;
6712 
6713 	/*
6714 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6715 	 * for writting!
6716 	 */
6717 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6718 		/* Nothing to do */
6719 		return;
6720 	}
6721 	sop = SCTP_BASE_INFO(udp_tun_socket);
6722 	soclose(sop);
6723 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6724 }
6725 
6726 int
6727 sctp_over_udp_start(void)
6728 {
6729 	uint16_t port;
6730 	int ret;
6731 	struct sockaddr_in sin;
6732 	struct socket *sop = NULL;
6733 	struct thread *th;
6734 	struct ucred *cred;
6735 
6736 	/*
6737 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6738 	 * for writting!
6739 	 */
6740 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6741 	if (port == 0) {
6742 		/* Must have a port set */
6743 		return (EINVAL);
6744 	}
6745 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6746 		/* Already running -- must stop first */
6747 		return (EALREADY);
6748 	}
6749 	th = curthread;
6750 	cred = th->td_ucred;
6751 	if ((ret = socreate(PF_INET, &sop,
6752 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6753 		return (ret);
6754 	}
6755 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6756 	/* call the special UDP hook */
6757 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6758 	if (ret) {
6759 		goto exit_stage_left;
6760 	}
6761 	/* Ok we have a socket, bind it to the port */
6762 	memset(&sin, 0, sizeof(sin));
6763 	sin.sin_len = sizeof(sin);
6764 	sin.sin_family = AF_INET;
6765 	sin.sin_port = htons(port);
6766 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6767 	if (ret) {
6768 		/* Close up we cant get the port */
6769 exit_stage_left:
6770 		sctp_over_udp_stop();
6771 		return (ret);
6772 	}
6773 	/*
6774 	 * Ok we should now get UDP packets directly to our input routine
6775 	 * sctp_recv_upd_tunneled_packet().
6776 	 */
6777 	return (0);
6778 }
6779 
6780 #endif
6781