xref: /freebsd/sys/netinet/sctputil.c (revision 69c5bce6ee1ec42997757e7f1334767d217c5d7d)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *   this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *   the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #endif
45 #include <netinet/sctp_header.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_bsd_addr.h>
53 
54 
55 #ifndef KTR_SCTP
56 #define KTR_SCTP KTR_SUBSYS
57 #endif
58 
59 extern struct sctp_cc_functions sctp_cc_functions[];
60 extern struct sctp_ss_functions sctp_ss_functions[];
61 
62 void
63 sctp_sblog(struct sockbuf *sb,
64     struct sctp_tcb *stcb, int from, int incr)
65 {
66 	struct sctp_cwnd_log sctp_clog;
67 
68 	sctp_clog.x.sb.stcb = stcb;
69 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
70 	if (stcb)
71 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
72 	else
73 		sctp_clog.x.sb.stcb_sbcc = 0;
74 	sctp_clog.x.sb.incr = incr;
75 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
76 	    SCTP_LOG_EVENT_SB,
77 	    from,
78 	    sctp_clog.x.misc.log1,
79 	    sctp_clog.x.misc.log2,
80 	    sctp_clog.x.misc.log3,
81 	    sctp_clog.x.misc.log4);
82 }
83 
84 void
85 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
86 {
87 	struct sctp_cwnd_log sctp_clog;
88 
89 	sctp_clog.x.close.inp = (void *)inp;
90 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
91 	if (stcb) {
92 		sctp_clog.x.close.stcb = (void *)stcb;
93 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
94 	} else {
95 		sctp_clog.x.close.stcb = 0;
96 		sctp_clog.x.close.state = 0;
97 	}
98 	sctp_clog.x.close.loc = loc;
99 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
100 	    SCTP_LOG_EVENT_CLOSE,
101 	    0,
102 	    sctp_clog.x.misc.log1,
103 	    sctp_clog.x.misc.log2,
104 	    sctp_clog.x.misc.log3,
105 	    sctp_clog.x.misc.log4);
106 }
107 
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 
125 }
126 
127 void
128 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
129 {
130 	struct sctp_cwnd_log sctp_clog;
131 
132 	sctp_clog.x.strlog.stcb = stcb;
133 	sctp_clog.x.strlog.n_tsn = tsn;
134 	sctp_clog.x.strlog.n_sseq = sseq;
135 	sctp_clog.x.strlog.e_tsn = 0;
136 	sctp_clog.x.strlog.e_sseq = 0;
137 	sctp_clog.x.strlog.strm = stream;
138 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
139 	    SCTP_LOG_EVENT_STRM,
140 	    from,
141 	    sctp_clog.x.misc.log1,
142 	    sctp_clog.x.misc.log2,
143 	    sctp_clog.x.misc.log3,
144 	    sctp_clog.x.misc.log4);
145 
146 }
147 
148 void
149 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
150 {
151 	struct sctp_cwnd_log sctp_clog;
152 
153 	sctp_clog.x.nagle.stcb = (void *)stcb;
154 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
155 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
156 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
157 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
158 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
159 	    SCTP_LOG_EVENT_NAGLE,
160 	    action,
161 	    sctp_clog.x.misc.log1,
162 	    sctp_clog.x.misc.log2,
163 	    sctp_clog.x.misc.log3,
164 	    sctp_clog.x.misc.log4);
165 }
166 
167 
168 void
169 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
170 {
171 	struct sctp_cwnd_log sctp_clog;
172 
173 	sctp_clog.x.sack.cumack = cumack;
174 	sctp_clog.x.sack.oldcumack = old_cumack;
175 	sctp_clog.x.sack.tsn = tsn;
176 	sctp_clog.x.sack.numGaps = gaps;
177 	sctp_clog.x.sack.numDups = dups;
178 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
179 	    SCTP_LOG_EVENT_SACK,
180 	    from,
181 	    sctp_clog.x.misc.log1,
182 	    sctp_clog.x.misc.log2,
183 	    sctp_clog.x.misc.log3,
184 	    sctp_clog.x.misc.log4);
185 }
186 
187 void
188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
189 {
190 	struct sctp_cwnd_log sctp_clog;
191 
192 	memset(&sctp_clog, 0, sizeof(sctp_clog));
193 	sctp_clog.x.map.base = map;
194 	sctp_clog.x.map.cum = cum;
195 	sctp_clog.x.map.high = high;
196 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
197 	    SCTP_LOG_EVENT_MAP,
198 	    from,
199 	    sctp_clog.x.misc.log1,
200 	    sctp_clog.x.misc.log2,
201 	    sctp_clog.x.misc.log3,
202 	    sctp_clog.x.misc.log4);
203 }
204 
205 void
206 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
207     int from)
208 {
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
213 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
214 	sctp_clog.x.fr.tsn = tsn;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_FR,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 
223 }
224 
225 
226 void
227 sctp_log_mb(struct mbuf *m, int from)
228 {
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	sctp_clog.x.mb.mp = m;
232 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
233 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
234 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
235 	if (SCTP_BUF_IS_EXTENDED(m)) {
236 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
237 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
238 	} else {
239 		sctp_clog.x.mb.ext = 0;
240 		sctp_clog.x.mb.refcnt = 0;
241 	}
242 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
243 	    SCTP_LOG_EVENT_MBUF,
244 	    from,
245 	    sctp_clog.x.misc.log1,
246 	    sctp_clog.x.misc.log2,
247 	    sctp_clog.x.misc.log3,
248 	    sctp_clog.x.misc.log4);
249 }
250 
251 
252 void
253 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
254     int from)
255 {
256 	struct sctp_cwnd_log sctp_clog;
257 
258 	if (control == NULL) {
259 		SCTP_PRINTF("Gak log of NULL?\n");
260 		return;
261 	}
262 	sctp_clog.x.strlog.stcb = control->stcb;
263 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
264 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
265 	sctp_clog.x.strlog.strm = control->sinfo_stream;
266 	if (poschk != NULL) {
267 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
268 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
269 	} else {
270 		sctp_clog.x.strlog.e_tsn = 0;
271 		sctp_clog.x.strlog.e_sseq = 0;
272 	}
273 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
274 	    SCTP_LOG_EVENT_STRM,
275 	    from,
276 	    sctp_clog.x.misc.log1,
277 	    sctp_clog.x.misc.log2,
278 	    sctp_clog.x.misc.log3,
279 	    sctp_clog.x.misc.log4);
280 
281 }
282 
283 void
284 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
285 {
286 	struct sctp_cwnd_log sctp_clog;
287 
288 	sctp_clog.x.cwnd.net = net;
289 	if (stcb->asoc.send_queue_cnt > 255)
290 		sctp_clog.x.cwnd.cnt_in_send = 255;
291 	else
292 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
293 	if (stcb->asoc.stream_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_str = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
297 
298 	if (net) {
299 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
300 		sctp_clog.x.cwnd.inflight = net->flight_size;
301 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
302 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
303 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
304 	}
305 	if (SCTP_CWNDLOG_PRESEND == from) {
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
307 	}
308 	sctp_clog.x.cwnd.cwnd_augment = augment;
309 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
310 	    SCTP_LOG_EVENT_CWND,
311 	    from,
312 	    sctp_clog.x.misc.log1,
313 	    sctp_clog.x.misc.log2,
314 	    sctp_clog.x.misc.log3,
315 	    sctp_clog.x.misc.log4);
316 
317 }
318 
319 void
320 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
321 {
322 	struct sctp_cwnd_log sctp_clog;
323 
324 	memset(&sctp_clog, 0, sizeof(sctp_clog));
325 	if (inp) {
326 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
327 
328 	} else {
329 		sctp_clog.x.lock.sock = (void *)NULL;
330 	}
331 	sctp_clog.x.lock.inp = (void *)inp;
332 	if (stcb) {
333 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
334 	} else {
335 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
336 	}
337 	if (inp) {
338 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
339 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
340 	} else {
341 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
342 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
343 	}
344 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
345 	if (inp && (inp->sctp_socket)) {
346 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
347 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
348 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
349 	} else {
350 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
351 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
352 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
353 	}
354 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
355 	    SCTP_LOG_LOCK_EVENT,
356 	    from,
357 	    sctp_clog.x.misc.log1,
358 	    sctp_clog.x.misc.log2,
359 	    sctp_clog.x.misc.log3,
360 	    sctp_clog.x.misc.log4);
361 
362 }
363 
364 void
365 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
366 {
367 	struct sctp_cwnd_log sctp_clog;
368 
369 	memset(&sctp_clog, 0, sizeof(sctp_clog));
370 	sctp_clog.x.cwnd.net = net;
371 	sctp_clog.x.cwnd.cwnd_new_value = error;
372 	sctp_clog.x.cwnd.inflight = net->flight_size;
373 	sctp_clog.x.cwnd.cwnd_augment = burst;
374 	if (stcb->asoc.send_queue_cnt > 255)
375 		sctp_clog.x.cwnd.cnt_in_send = 255;
376 	else
377 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
378 	if (stcb->asoc.stream_queue_cnt > 255)
379 		sctp_clog.x.cwnd.cnt_in_str = 255;
380 	else
381 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
382 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
383 	    SCTP_LOG_EVENT_MAXBURST,
384 	    from,
385 	    sctp_clog.x.misc.log1,
386 	    sctp_clog.x.misc.log2,
387 	    sctp_clog.x.misc.log3,
388 	    sctp_clog.x.misc.log4);
389 
390 }
391 
392 void
393 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
394 {
395 	struct sctp_cwnd_log sctp_clog;
396 
397 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
398 	sctp_clog.x.rwnd.send_size = snd_size;
399 	sctp_clog.x.rwnd.overhead = overhead;
400 	sctp_clog.x.rwnd.new_rwnd = 0;
401 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
402 	    SCTP_LOG_EVENT_RWND,
403 	    from,
404 	    sctp_clog.x.misc.log1,
405 	    sctp_clog.x.misc.log2,
406 	    sctp_clog.x.misc.log3,
407 	    sctp_clog.x.misc.log4);
408 }
409 
410 void
411 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
412 {
413 	struct sctp_cwnd_log sctp_clog;
414 
415 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
416 	sctp_clog.x.rwnd.send_size = flight_size;
417 	sctp_clog.x.rwnd.overhead = overhead;
418 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
419 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
420 	    SCTP_LOG_EVENT_RWND,
421 	    from,
422 	    sctp_clog.x.misc.log1,
423 	    sctp_clog.x.misc.log2,
424 	    sctp_clog.x.misc.log3,
425 	    sctp_clog.x.misc.log4);
426 }
427 
428 void
429 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
430 {
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
434 	sctp_clog.x.mbcnt.size_change = book;
435 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
436 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_MBCNT,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 
445 }
446 
447 void
448 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
449 {
450 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
451 	    SCTP_LOG_MISC_EVENT,
452 	    from,
453 	    a, b, c, d);
454 }
455 
456 void
457 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
458 {
459 	struct sctp_cwnd_log sctp_clog;
460 
461 	sctp_clog.x.wake.stcb = (void *)stcb;
462 	sctp_clog.x.wake.wake_cnt = wake_cnt;
463 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
464 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
465 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
466 
467 	if (stcb->asoc.stream_queue_cnt < 0xff)
468 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
469 	else
470 		sctp_clog.x.wake.stream_qcnt = 0xff;
471 
472 	if (stcb->asoc.chunks_on_out_queue < 0xff)
473 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
474 	else
475 		sctp_clog.x.wake.chunks_on_oque = 0xff;
476 
477 	sctp_clog.x.wake.sctpflags = 0;
478 	/* set in the defered mode stuff */
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
480 		sctp_clog.x.wake.sctpflags |= 1;
481 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
482 		sctp_clog.x.wake.sctpflags |= 2;
483 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
484 		sctp_clog.x.wake.sctpflags |= 4;
485 	/* what about the sb */
486 	if (stcb->sctp_socket) {
487 		struct socket *so = stcb->sctp_socket;
488 
489 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
490 	} else {
491 		sctp_clog.x.wake.sbflags = 0xff;
492 	}
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_EVENT_WAKE,
495 	    from,
496 	    sctp_clog.x.misc.log1,
497 	    sctp_clog.x.misc.log2,
498 	    sctp_clog.x.misc.log3,
499 	    sctp_clog.x.misc.log4);
500 
501 }
502 
503 void
504 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
505 {
506 	struct sctp_cwnd_log sctp_clog;
507 
508 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
509 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
510 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
511 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
512 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
513 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
514 	sctp_clog.x.blk.sndlen = sendlen;
515 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
516 	    SCTP_LOG_EVENT_BLOCK,
517 	    from,
518 	    sctp_clog.x.misc.log1,
519 	    sctp_clog.x.misc.log2,
520 	    sctp_clog.x.misc.log3,
521 	    sctp_clog.x.misc.log4);
522 
523 }
524 
525 int
526 sctp_fill_stat_log(void *optval, size_t *optsize)
527 {
528 	/* May need to fix this if ktrdump does not work */
529 	return (0);
530 }
531 
532 #ifdef SCTP_AUDITING_ENABLED
533 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
534 static int sctp_audit_indx = 0;
535 
536 static
537 void
538 sctp_print_audit_report(void)
539 {
540 	int i;
541 	int cnt;
542 
543 	cnt = 0;
544 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
545 		if ((sctp_audit_data[i][0] == 0xe0) &&
546 		    (sctp_audit_data[i][1] == 0x01)) {
547 			cnt = 0;
548 			SCTP_PRINTF("\n");
549 		} else if (sctp_audit_data[i][0] == 0xf0) {
550 			cnt = 0;
551 			SCTP_PRINTF("\n");
552 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
553 		    (sctp_audit_data[i][1] == 0x01)) {
554 			SCTP_PRINTF("\n");
555 			cnt = 0;
556 		}
557 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
558 		    (uint32_t) sctp_audit_data[i][1]);
559 		cnt++;
560 		if ((cnt % 14) == 0)
561 			SCTP_PRINTF("\n");
562 	}
563 	for (i = 0; i < sctp_audit_indx; i++) {
564 		if ((sctp_audit_data[i][0] == 0xe0) &&
565 		    (sctp_audit_data[i][1] == 0x01)) {
566 			cnt = 0;
567 			SCTP_PRINTF("\n");
568 		} else if (sctp_audit_data[i][0] == 0xf0) {
569 			cnt = 0;
570 			SCTP_PRINTF("\n");
571 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
572 		    (sctp_audit_data[i][1] == 0x01)) {
573 			SCTP_PRINTF("\n");
574 			cnt = 0;
575 		}
576 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
577 		    (uint32_t) sctp_audit_data[i][1]);
578 		cnt++;
579 		if ((cnt % 14) == 0)
580 			SCTP_PRINTF("\n");
581 	}
582 	SCTP_PRINTF("\n");
583 }
584 
585 void
586 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
587     struct sctp_nets *net)
588 {
589 	int resend_cnt, tot_out, rep, tot_book_cnt;
590 	struct sctp_nets *lnet;
591 	struct sctp_tmit_chunk *chk;
592 
593 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
594 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
595 	sctp_audit_indx++;
596 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
597 		sctp_audit_indx = 0;
598 	}
599 	if (inp == NULL) {
600 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
601 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
602 		sctp_audit_indx++;
603 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
604 			sctp_audit_indx = 0;
605 		}
606 		return;
607 	}
608 	if (stcb == NULL) {
609 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
610 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
611 		sctp_audit_indx++;
612 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
613 			sctp_audit_indx = 0;
614 		}
615 		return;
616 	}
617 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
618 	sctp_audit_data[sctp_audit_indx][1] =
619 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
620 	sctp_audit_indx++;
621 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
622 		sctp_audit_indx = 0;
623 	}
624 	rep = 0;
625 	tot_book_cnt = 0;
626 	resend_cnt = tot_out = 0;
627 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
628 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
629 			resend_cnt++;
630 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
631 			tot_out += chk->book_size;
632 			tot_book_cnt++;
633 		}
634 	}
635 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
636 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
637 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
638 		sctp_audit_indx++;
639 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
640 			sctp_audit_indx = 0;
641 		}
642 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
643 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
644 		rep = 1;
645 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
646 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
647 		sctp_audit_data[sctp_audit_indx][1] =
648 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
649 		sctp_audit_indx++;
650 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
651 			sctp_audit_indx = 0;
652 		}
653 	}
654 	if (tot_out != stcb->asoc.total_flight) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		rep = 1;
662 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
663 		    (int)stcb->asoc.total_flight);
664 		stcb->asoc.total_flight = tot_out;
665 	}
666 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
667 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
668 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
669 		sctp_audit_indx++;
670 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
671 			sctp_audit_indx = 0;
672 		}
673 		rep = 1;
674 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
675 
676 		stcb->asoc.total_flight_count = tot_book_cnt;
677 	}
678 	tot_out = 0;
679 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
680 		tot_out += lnet->flight_size;
681 	}
682 	if (tot_out != stcb->asoc.total_flight) {
683 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
684 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
685 		sctp_audit_indx++;
686 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
687 			sctp_audit_indx = 0;
688 		}
689 		rep = 1;
690 		SCTP_PRINTF("real flight:%d net total was %d\n",
691 		    stcb->asoc.total_flight, tot_out);
692 		/* now corrective action */
693 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
694 
695 			tot_out = 0;
696 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
697 				if ((chk->whoTo == lnet) &&
698 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
699 					tot_out += chk->book_size;
700 				}
701 			}
702 			if (lnet->flight_size != tot_out) {
703 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
704 				    lnet, lnet->flight_size,
705 				    tot_out);
706 				lnet->flight_size = tot_out;
707 			}
708 		}
709 	}
710 	if (rep) {
711 		sctp_print_audit_report();
712 	}
713 }
714 
715 void
716 sctp_audit_log(uint8_t ev, uint8_t fd)
717 {
718 
719 	sctp_audit_data[sctp_audit_indx][0] = ev;
720 	sctp_audit_data[sctp_audit_indx][1] = fd;
721 	sctp_audit_indx++;
722 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
723 		sctp_audit_indx = 0;
724 	}
725 }
726 
727 #endif
728 
729 /*
730  * sctp_stop_timers_for_shutdown() should be called
731  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
732  * state to make sure that all timers are stopped.
733  */
734 void
735 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
736 {
737 	struct sctp_association *asoc;
738 	struct sctp_nets *net;
739 
740 	asoc = &stcb->asoc;
741 
742 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
743 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
747 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
748 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
749 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
750 	}
751 }
752 
753 /*
754  * a list of sizes based on typical mtu's, used only if next hop size not
755  * returned.
756  */
757 static uint32_t sctp_mtu_sizes[] = {
758 	68,
759 	296,
760 	508,
761 	512,
762 	544,
763 	576,
764 	1006,
765 	1492,
766 	1500,
767 	1536,
768 	2002,
769 	2048,
770 	4352,
771 	4464,
772 	8166,
773 	17914,
774 	32000,
775 	65535
776 };
777 
778 /*
779  * Return the largest MTU smaller than val. If there is no
780  * entry, just return val.
781  */
782 uint32_t
783 sctp_get_prev_mtu(uint32_t val)
784 {
785 	uint32_t i;
786 
787 	if (val <= sctp_mtu_sizes[0]) {
788 		return (val);
789 	}
790 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
791 		if (val <= sctp_mtu_sizes[i]) {
792 			break;
793 		}
794 	}
795 	return (sctp_mtu_sizes[i - 1]);
796 }
797 
798 /*
799  * Return the smallest MTU larger than val. If there is no
800  * entry, just return val.
801  */
802 uint32_t
803 sctp_get_next_mtu(struct sctp_inpcb *inp, uint32_t val)
804 {
805 	/* select another MTU that is just bigger than this one */
806 	uint32_t i;
807 
808 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
809 		if (val < sctp_mtu_sizes[i]) {
810 			return (sctp_mtu_sizes[i]);
811 		}
812 	}
813 	return (val);
814 }
815 
816 void
817 sctp_fill_random_store(struct sctp_pcb *m)
818 {
819 	/*
820 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
821 	 * our counter. The result becomes our good random numbers and we
822 	 * then setup to give these out. Note that we do no locking to
823 	 * protect this. This is ok, since if competing folks call this we
824 	 * will get more gobbled gook in the random store which is what we
825 	 * want. There is a danger that two guys will use the same random
826 	 * numbers, but thats ok too since that is random as well :->
827 	 */
828 	m->store_at = 0;
829 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
830 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
831 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
832 	m->random_counter++;
833 }
834 
835 uint32_t
836 sctp_select_initial_TSN(struct sctp_pcb *inp)
837 {
838 	/*
839 	 * A true implementation should use random selection process to get
840 	 * the initial stream sequence number, using RFC1750 as a good
841 	 * guideline
842 	 */
843 	uint32_t x, *xp;
844 	uint8_t *p;
845 	int store_at, new_store;
846 
847 	if (inp->initial_sequence_debug != 0) {
848 		uint32_t ret;
849 
850 		ret = inp->initial_sequence_debug;
851 		inp->initial_sequence_debug++;
852 		return (ret);
853 	}
854 retry:
855 	store_at = inp->store_at;
856 	new_store = store_at + sizeof(uint32_t);
857 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
858 		new_store = 0;
859 	}
860 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
861 		goto retry;
862 	}
863 	if (new_store == 0) {
864 		/* Refill the random store */
865 		sctp_fill_random_store(inp);
866 	}
867 	p = &inp->random_store[store_at];
868 	xp = (uint32_t *) p;
869 	x = *xp;
870 	return (x);
871 }
872 
873 uint32_t
874 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
875 {
876 	uint32_t x, not_done;
877 	struct timeval now;
878 
879 	(void)SCTP_GETTIME_TIMEVAL(&now);
880 	not_done = 1;
881 	while (not_done) {
882 		x = sctp_select_initial_TSN(&inp->sctp_ep);
883 		if (x == 0) {
884 			/* we never use 0 */
885 			continue;
886 		}
887 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
888 			not_done = 0;
889 		}
890 	}
891 	return (x);
892 }
893 
894 int
895 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
896     uint32_t override_tag, uint32_t vrf_id)
897 {
898 	struct sctp_association *asoc;
899 
900 	/*
901 	 * Anything set to zero is taken care of by the allocation routine's
902 	 * bzero
903 	 */
904 
905 	/*
906 	 * Up front select what scoping to apply on addresses I tell my peer
907 	 * Not sure what to do with these right now, we will need to come up
908 	 * with a way to set them. We may need to pass them through from the
909 	 * caller in the sctp_aloc_assoc() function.
910 	 */
911 	int i;
912 
913 	asoc = &stcb->asoc;
914 	/* init all variables to a known value. */
915 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
916 	asoc->max_burst = m->sctp_ep.max_burst;
917 	asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
918 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
919 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
920 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
921 	asoc->ecn_allowed = m->sctp_ecn_enable;
922 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
923 	asoc->sctp_cmt_pf = (uint8_t) 0;
924 	asoc->sctp_frag_point = m->sctp_frag_point;
925 	asoc->sctp_features = m->sctp_features;
926 	asoc->default_dscp = m->sctp_ep.default_dscp;
927 #ifdef INET6
928 	if (m->sctp_ep.default_flowlabel) {
929 		asoc->default_flowlabel = m->sctp_ep.default_flowlabel;
930 	} else {
931 		if (m->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
932 			asoc->default_flowlabel = sctp_select_initial_TSN(&m->sctp_ep);
933 			asoc->default_flowlabel &= 0x000fffff;
934 			asoc->default_flowlabel |= 0x80000000;
935 		} else {
936 			asoc->default_flowlabel = 0;
937 		}
938 	}
939 #endif
940 	asoc->sb_send_resv = 0;
941 	if (override_tag) {
942 		asoc->my_vtag = override_tag;
943 	} else {
944 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
945 	}
946 	/* Get the nonce tags */
947 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
948 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
949 	asoc->vrf_id = vrf_id;
950 
951 #ifdef SCTP_ASOCLOG_OF_TSNS
952 	asoc->tsn_in_at = 0;
953 	asoc->tsn_out_at = 0;
954 	asoc->tsn_in_wrapped = 0;
955 	asoc->tsn_out_wrapped = 0;
956 	asoc->cumack_log_at = 0;
957 	asoc->cumack_log_atsnt = 0;
958 #endif
959 #ifdef SCTP_FS_SPEC_LOG
960 	asoc->fs_index = 0;
961 #endif
962 	asoc->refcnt = 0;
963 	asoc->assoc_up_sent = 0;
964 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
965 	    sctp_select_initial_TSN(&m->sctp_ep);
966 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
967 	/* we are optimisitic here */
968 	asoc->peer_supports_pktdrop = 1;
969 	asoc->peer_supports_nat = 0;
970 	asoc->sent_queue_retran_cnt = 0;
971 
972 	/* for CMT */
973 	asoc->last_net_cmt_send_started = NULL;
974 
975 	/* This will need to be adjusted */
976 	asoc->last_acked_seq = asoc->init_seq_number - 1;
977 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
978 	asoc->asconf_seq_in = asoc->last_acked_seq;
979 
980 	/* here we are different, we hold the next one we expect */
981 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
982 
983 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
984 	asoc->initial_rto = m->sctp_ep.initial_rto;
985 
986 	asoc->max_init_times = m->sctp_ep.max_init_times;
987 	asoc->max_send_times = m->sctp_ep.max_send_times;
988 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
989 	asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold;
990 	asoc->free_chunk_cnt = 0;
991 
992 	asoc->iam_blocking = 0;
993 
994 	asoc->context = m->sctp_context;
995 	asoc->def_send = m->def_send;
996 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
997 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
998 	asoc->pr_sctp_cnt = 0;
999 	asoc->total_output_queue_size = 0;
1000 
1001 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1002 		struct in6pcb *inp6;
1003 
1004 		/* Its a V6 socket */
1005 		inp6 = (struct in6pcb *)m;
1006 		asoc->ipv6_addr_legal = 1;
1007 		/* Now look at the binding flag to see if V4 will be legal */
1008 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1009 			asoc->ipv4_addr_legal = 1;
1010 		} else {
1011 			/* V4 addresses are NOT legal on the association */
1012 			asoc->ipv4_addr_legal = 0;
1013 		}
1014 	} else {
1015 		/* Its a V4 socket, no - V6 */
1016 		asoc->ipv4_addr_legal = 1;
1017 		asoc->ipv6_addr_legal = 0;
1018 	}
1019 
1020 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1021 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1022 
1023 	asoc->smallest_mtu = m->sctp_frag_point;
1024 	asoc->minrto = m->sctp_ep.sctp_minrto;
1025 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1026 
1027 	asoc->locked_on_sending = NULL;
1028 	asoc->stream_locked_on = 0;
1029 	asoc->ecn_echo_cnt_onq = 0;
1030 	asoc->stream_locked = 0;
1031 
1032 	asoc->send_sack = 1;
1033 
1034 	LIST_INIT(&asoc->sctp_restricted_addrs);
1035 
1036 	TAILQ_INIT(&asoc->nets);
1037 	TAILQ_INIT(&asoc->pending_reply_queue);
1038 	TAILQ_INIT(&asoc->asconf_ack_sent);
1039 	/* Setup to fill the hb random cache at first HB */
1040 	asoc->hb_random_idx = 4;
1041 
1042 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1043 
1044 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1045 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1046 
1047 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1048 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1049 
1050 	/*
1051 	 * Now the stream parameters, here we allocate space for all streams
1052 	 * that we request by default.
1053 	 */
1054 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1055 	    m->sctp_ep.pre_open_stream_count;
1056 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1057 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1058 	    SCTP_M_STRMO);
1059 	if (asoc->strmout == NULL) {
1060 		/* big trouble no memory */
1061 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1062 		return (ENOMEM);
1063 	}
1064 	for (i = 0; i < asoc->streamoutcnt; i++) {
1065 		/*
1066 		 * inbound side must be set to 0xffff, also NOTE when we get
1067 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1068 		 * count (streamoutcnt) but first check if we sent to any of
1069 		 * the upper streams that were dropped (if some were). Those
1070 		 * that were dropped must be notified to the upper layer as
1071 		 * failed to send.
1072 		 */
1073 		asoc->strmout[i].next_sequence_sent = 0x0;
1074 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1075 		asoc->strmout[i].stream_no = i;
1076 		asoc->strmout[i].last_msg_incomplete = 0;
1077 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1078 	}
1079 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1080 
1081 	/* Now the mapping array */
1082 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1083 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1084 	    SCTP_M_MAP);
1085 	if (asoc->mapping_array == NULL) {
1086 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1087 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1088 		return (ENOMEM);
1089 	}
1090 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1091 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1092 	    SCTP_M_MAP);
1093 	if (asoc->nr_mapping_array == NULL) {
1094 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1095 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1096 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1097 		return (ENOMEM);
1098 	}
1099 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1100 
1101 	/* Now the init of the other outqueues */
1102 	TAILQ_INIT(&asoc->free_chunks);
1103 	TAILQ_INIT(&asoc->control_send_queue);
1104 	TAILQ_INIT(&asoc->asconf_send_queue);
1105 	TAILQ_INIT(&asoc->send_queue);
1106 	TAILQ_INIT(&asoc->sent_queue);
1107 	TAILQ_INIT(&asoc->reasmqueue);
1108 	TAILQ_INIT(&asoc->resetHead);
1109 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1110 	TAILQ_INIT(&asoc->asconf_queue);
1111 	/* authentication fields */
1112 	asoc->authinfo.random = NULL;
1113 	asoc->authinfo.active_keyid = 0;
1114 	asoc->authinfo.assoc_key = NULL;
1115 	asoc->authinfo.assoc_keyid = 0;
1116 	asoc->authinfo.recv_key = NULL;
1117 	asoc->authinfo.recv_keyid = 0;
1118 	LIST_INIT(&asoc->shared_keys);
1119 	asoc->marked_retrans = 0;
1120 	asoc->timoinit = 0;
1121 	asoc->timodata = 0;
1122 	asoc->timosack = 0;
1123 	asoc->timoshutdown = 0;
1124 	asoc->timoheartbeat = 0;
1125 	asoc->timocookie = 0;
1126 	asoc->timoshutdownack = 0;
1127 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1128 	asoc->discontinuity_time = asoc->start_time;
1129 	/*
1130 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1131 	 * freed later when the association is freed.
1132 	 */
1133 	return (0);
1134 }
1135 
1136 void
1137 sctp_print_mapping_array(struct sctp_association *asoc)
1138 {
1139 	unsigned int i, limit;
1140 
1141 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1142 	    asoc->mapping_array_size,
1143 	    asoc->mapping_array_base_tsn,
1144 	    asoc->cumulative_tsn,
1145 	    asoc->highest_tsn_inside_map,
1146 	    asoc->highest_tsn_inside_nr_map);
1147 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1148 		if (asoc->mapping_array[limit - 1]) {
1149 			break;
1150 		}
1151 	}
1152 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1153 	for (i = 0; i < limit; i++) {
1154 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1155 	}
1156 	if (limit % 16)
1157 		printf("\n");
1158 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1159 		if (asoc->nr_mapping_array[limit - 1]) {
1160 			break;
1161 		}
1162 	}
1163 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1164 	for (i = 0; i < limit; i++) {
1165 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1166 	}
1167 	if (limit % 16)
1168 		printf("\n");
1169 }
1170 
1171 int
1172 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1173 {
1174 	/* mapping array needs to grow */
1175 	uint8_t *new_array1, *new_array2;
1176 	uint32_t new_size;
1177 
1178 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1179 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1180 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1181 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1182 		/* can't get more, forget it */
1183 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1184 		if (new_array1) {
1185 			SCTP_FREE(new_array1, SCTP_M_MAP);
1186 		}
1187 		if (new_array2) {
1188 			SCTP_FREE(new_array2, SCTP_M_MAP);
1189 		}
1190 		return (-1);
1191 	}
1192 	memset(new_array1, 0, new_size);
1193 	memset(new_array2, 0, new_size);
1194 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1195 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1196 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1197 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1198 	asoc->mapping_array = new_array1;
1199 	asoc->nr_mapping_array = new_array2;
1200 	asoc->mapping_array_size = new_size;
1201 	return (0);
1202 }
1203 
1204 
1205 static void
1206 sctp_iterator_work(struct sctp_iterator *it)
1207 {
1208 	int iteration_count = 0;
1209 	int inp_skip = 0;
1210 	int first_in = 1;
1211 	struct sctp_inpcb *tinp;
1212 
1213 	SCTP_INP_INFO_RLOCK();
1214 	SCTP_ITERATOR_LOCK();
1215 	if (it->inp) {
1216 		SCTP_INP_RLOCK(it->inp);
1217 		SCTP_INP_DECR_REF(it->inp);
1218 	}
1219 	if (it->inp == NULL) {
1220 		/* iterator is complete */
1221 done_with_iterator:
1222 		SCTP_ITERATOR_UNLOCK();
1223 		SCTP_INP_INFO_RUNLOCK();
1224 		if (it->function_atend != NULL) {
1225 			(*it->function_atend) (it->pointer, it->val);
1226 		}
1227 		SCTP_FREE(it, SCTP_M_ITER);
1228 		return;
1229 	}
1230 select_a_new_ep:
1231 	if (first_in) {
1232 		first_in = 0;
1233 	} else {
1234 		SCTP_INP_RLOCK(it->inp);
1235 	}
1236 	while (((it->pcb_flags) &&
1237 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1238 	    ((it->pcb_features) &&
1239 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1240 		/* endpoint flags or features don't match, so keep looking */
1241 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1242 			SCTP_INP_RUNLOCK(it->inp);
1243 			goto done_with_iterator;
1244 		}
1245 		tinp = it->inp;
1246 		it->inp = LIST_NEXT(it->inp, sctp_list);
1247 		SCTP_INP_RUNLOCK(tinp);
1248 		if (it->inp == NULL) {
1249 			goto done_with_iterator;
1250 		}
1251 		SCTP_INP_RLOCK(it->inp);
1252 	}
1253 	/* now go through each assoc which is in the desired state */
1254 	if (it->done_current_ep == 0) {
1255 		if (it->function_inp != NULL)
1256 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1257 		it->done_current_ep = 1;
1258 	}
1259 	if (it->stcb == NULL) {
1260 		/* run the per instance function */
1261 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1262 	}
1263 	if ((inp_skip) || it->stcb == NULL) {
1264 		if (it->function_inp_end != NULL) {
1265 			inp_skip = (*it->function_inp_end) (it->inp,
1266 			    it->pointer,
1267 			    it->val);
1268 		}
1269 		SCTP_INP_RUNLOCK(it->inp);
1270 		goto no_stcb;
1271 	}
1272 	while (it->stcb) {
1273 		SCTP_TCB_LOCK(it->stcb);
1274 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1275 			/* not in the right state... keep looking */
1276 			SCTP_TCB_UNLOCK(it->stcb);
1277 			goto next_assoc;
1278 		}
1279 		/* see if we have limited out the iterator loop */
1280 		iteration_count++;
1281 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1282 			/* Pause to let others grab the lock */
1283 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1284 			SCTP_TCB_UNLOCK(it->stcb);
1285 			SCTP_INP_INCR_REF(it->inp);
1286 			SCTP_INP_RUNLOCK(it->inp);
1287 			SCTP_ITERATOR_UNLOCK();
1288 			SCTP_INP_INFO_RUNLOCK();
1289 			SCTP_INP_INFO_RLOCK();
1290 			SCTP_ITERATOR_LOCK();
1291 			if (sctp_it_ctl.iterator_flags) {
1292 				/* We won't be staying here */
1293 				SCTP_INP_DECR_REF(it->inp);
1294 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1295 				if (sctp_it_ctl.iterator_flags &
1296 				    SCTP_ITERATOR_STOP_CUR_IT) {
1297 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1298 					goto done_with_iterator;
1299 				}
1300 				if (sctp_it_ctl.iterator_flags &
1301 				    SCTP_ITERATOR_STOP_CUR_INP) {
1302 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1303 					goto no_stcb;
1304 				}
1305 				/* If we reach here huh? */
1306 				printf("Unknown it ctl flag %x\n",
1307 				    sctp_it_ctl.iterator_flags);
1308 				sctp_it_ctl.iterator_flags = 0;
1309 			}
1310 			SCTP_INP_RLOCK(it->inp);
1311 			SCTP_INP_DECR_REF(it->inp);
1312 			SCTP_TCB_LOCK(it->stcb);
1313 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1314 			iteration_count = 0;
1315 		}
1316 		/* run function on this one */
1317 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1318 
1319 		/*
1320 		 * we lie here, it really needs to have its own type but
1321 		 * first I must verify that this won't effect things :-0
1322 		 */
1323 		if (it->no_chunk_output == 0)
1324 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1325 
1326 		SCTP_TCB_UNLOCK(it->stcb);
1327 next_assoc:
1328 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1329 		if (it->stcb == NULL) {
1330 			/* Run last function */
1331 			if (it->function_inp_end != NULL) {
1332 				inp_skip = (*it->function_inp_end) (it->inp,
1333 				    it->pointer,
1334 				    it->val);
1335 			}
1336 		}
1337 	}
1338 	SCTP_INP_RUNLOCK(it->inp);
1339 no_stcb:
1340 	/* done with all assocs on this endpoint, move on to next endpoint */
1341 	it->done_current_ep = 0;
1342 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1343 		it->inp = NULL;
1344 	} else {
1345 		it->inp = LIST_NEXT(it->inp, sctp_list);
1346 	}
1347 	if (it->inp == NULL) {
1348 		goto done_with_iterator;
1349 	}
1350 	goto select_a_new_ep;
1351 }
1352 
1353 void
1354 sctp_iterator_worker(void)
1355 {
1356 	struct sctp_iterator *it, *nit;
1357 
1358 	/* This function is called with the WQ lock in place */
1359 
1360 	sctp_it_ctl.iterator_running = 1;
1361 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1362 		sctp_it_ctl.cur_it = it;
1363 		/* now lets work on this one */
1364 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1365 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1366 		CURVNET_SET(it->vn);
1367 		sctp_iterator_work(it);
1368 		sctp_it_ctl.cur_it = NULL;
1369 		CURVNET_RESTORE();
1370 		SCTP_IPI_ITERATOR_WQ_LOCK();
1371 		/* sa_ignore FREED_MEMORY */
1372 	}
1373 	sctp_it_ctl.iterator_running = 0;
1374 	return;
1375 }
1376 
1377 
1378 static void
1379 sctp_handle_addr_wq(void)
1380 {
1381 	/* deal with the ADDR wq from the rtsock calls */
1382 	struct sctp_laddr *wi, *nwi;
1383 	struct sctp_asconf_iterator *asc;
1384 
1385 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1386 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1387 	if (asc == NULL) {
1388 		/* Try later, no memory */
1389 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1390 		    (struct sctp_inpcb *)NULL,
1391 		    (struct sctp_tcb *)NULL,
1392 		    (struct sctp_nets *)NULL);
1393 		return;
1394 	}
1395 	LIST_INIT(&asc->list_of_work);
1396 	asc->cnt = 0;
1397 
1398 	SCTP_WQ_ADDR_LOCK();
1399 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1400 		LIST_REMOVE(wi, sctp_nxt_addr);
1401 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1402 		asc->cnt++;
1403 	}
1404 	SCTP_WQ_ADDR_UNLOCK();
1405 
1406 	if (asc->cnt == 0) {
1407 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1408 	} else {
1409 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1410 		    sctp_asconf_iterator_stcb,
1411 		    NULL,	/* No ep end for boundall */
1412 		    SCTP_PCB_FLAGS_BOUNDALL,
1413 		    SCTP_PCB_ANY_FEATURES,
1414 		    SCTP_ASOC_ANY_STATE,
1415 		    (void *)asc, 0,
1416 		    sctp_asconf_iterator_end, NULL, 0);
1417 	}
1418 }
1419 
1420 int retcode = 0;
1421 int cur_oerr = 0;
1422 
1423 void
1424 sctp_timeout_handler(void *t)
1425 {
1426 	struct sctp_inpcb *inp;
1427 	struct sctp_tcb *stcb;
1428 	struct sctp_nets *net;
1429 	struct sctp_timer *tmr;
1430 
1431 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1432 	struct socket *so;
1433 
1434 #endif
1435 	int did_output, type;
1436 
1437 	tmr = (struct sctp_timer *)t;
1438 	inp = (struct sctp_inpcb *)tmr->ep;
1439 	stcb = (struct sctp_tcb *)tmr->tcb;
1440 	net = (struct sctp_nets *)tmr->net;
1441 	CURVNET_SET((struct vnet *)tmr->vnet);
1442 	did_output = 1;
1443 
1444 #ifdef SCTP_AUDITING_ENABLED
1445 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1446 	sctp_auditing(3, inp, stcb, net);
1447 #endif
1448 
1449 	/* sanity checks... */
1450 	if (tmr->self != (void *)tmr) {
1451 		/*
1452 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1453 		 * tmr);
1454 		 */
1455 		CURVNET_RESTORE();
1456 		return;
1457 	}
1458 	tmr->stopped_from = 0xa001;
1459 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1460 		/*
1461 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1462 		 * tmr->type);
1463 		 */
1464 		CURVNET_RESTORE();
1465 		return;
1466 	}
1467 	tmr->stopped_from = 0xa002;
1468 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1469 		CURVNET_RESTORE();
1470 		return;
1471 	}
1472 	/* if this is an iterator timeout, get the struct and clear inp */
1473 	tmr->stopped_from = 0xa003;
1474 	type = tmr->type;
1475 	if (inp) {
1476 		SCTP_INP_INCR_REF(inp);
1477 		if ((inp->sctp_socket == 0) &&
1478 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1479 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1480 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1481 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1482 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1483 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1484 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1485 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1486 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1487 		    ) {
1488 			SCTP_INP_DECR_REF(inp);
1489 			CURVNET_RESTORE();
1490 			return;
1491 		}
1492 	}
1493 	tmr->stopped_from = 0xa004;
1494 	if (stcb) {
1495 		atomic_add_int(&stcb->asoc.refcnt, 1);
1496 		if (stcb->asoc.state == 0) {
1497 			atomic_add_int(&stcb->asoc.refcnt, -1);
1498 			if (inp) {
1499 				SCTP_INP_DECR_REF(inp);
1500 			}
1501 			CURVNET_RESTORE();
1502 			return;
1503 		}
1504 	}
1505 	tmr->stopped_from = 0xa005;
1506 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1507 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1508 		if (inp) {
1509 			SCTP_INP_DECR_REF(inp);
1510 		}
1511 		if (stcb) {
1512 			atomic_add_int(&stcb->asoc.refcnt, -1);
1513 		}
1514 		CURVNET_RESTORE();
1515 		return;
1516 	}
1517 	tmr->stopped_from = 0xa006;
1518 
1519 	if (stcb) {
1520 		SCTP_TCB_LOCK(stcb);
1521 		atomic_add_int(&stcb->asoc.refcnt, -1);
1522 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1523 		    ((stcb->asoc.state == 0) ||
1524 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1525 			SCTP_TCB_UNLOCK(stcb);
1526 			if (inp) {
1527 				SCTP_INP_DECR_REF(inp);
1528 			}
1529 			CURVNET_RESTORE();
1530 			return;
1531 		}
1532 	}
1533 	/* record in stopped what t-o occured */
1534 	tmr->stopped_from = tmr->type;
1535 
1536 	/* mark as being serviced now */
1537 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1538 		/*
1539 		 * Callout has been rescheduled.
1540 		 */
1541 		goto get_out;
1542 	}
1543 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1544 		/*
1545 		 * Not active, so no action.
1546 		 */
1547 		goto get_out;
1548 	}
1549 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1550 
1551 	/* call the handler for the appropriate timer type */
1552 	switch (tmr->type) {
1553 	case SCTP_TIMER_TYPE_ZERO_COPY:
1554 		if (inp == NULL) {
1555 			break;
1556 		}
1557 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1558 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1559 		}
1560 		break;
1561 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1562 		if (inp == NULL) {
1563 			break;
1564 		}
1565 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1566 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1567 		}
1568 		break;
1569 	case SCTP_TIMER_TYPE_ADDR_WQ:
1570 		sctp_handle_addr_wq();
1571 		break;
1572 	case SCTP_TIMER_TYPE_SEND:
1573 		if ((stcb == NULL) || (inp == NULL)) {
1574 			break;
1575 		}
1576 		SCTP_STAT_INCR(sctps_timodata);
1577 		stcb->asoc.timodata++;
1578 		stcb->asoc.num_send_timers_up--;
1579 		if (stcb->asoc.num_send_timers_up < 0) {
1580 			stcb->asoc.num_send_timers_up = 0;
1581 		}
1582 		SCTP_TCB_LOCK_ASSERT(stcb);
1583 		cur_oerr = stcb->asoc.overall_error_count;
1584 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1585 		if (retcode) {
1586 			/* no need to unlock on tcb its gone */
1587 
1588 			goto out_decr;
1589 		}
1590 		SCTP_TCB_LOCK_ASSERT(stcb);
1591 #ifdef SCTP_AUDITING_ENABLED
1592 		sctp_auditing(4, inp, stcb, net);
1593 #endif
1594 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1595 		if ((stcb->asoc.num_send_timers_up == 0) &&
1596 		    (stcb->asoc.sent_queue_cnt > 0)) {
1597 			struct sctp_tmit_chunk *chk;
1598 
1599 			/*
1600 			 * safeguard. If there on some on the sent queue
1601 			 * somewhere but no timers running something is
1602 			 * wrong... so we start a timer on the first chunk
1603 			 * on the send queue on whatever net it is sent to.
1604 			 */
1605 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1606 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1607 			    chk->whoTo);
1608 		}
1609 		break;
1610 	case SCTP_TIMER_TYPE_INIT:
1611 		if ((stcb == NULL) || (inp == NULL)) {
1612 			break;
1613 		}
1614 		SCTP_STAT_INCR(sctps_timoinit);
1615 		stcb->asoc.timoinit++;
1616 		if (sctp_t1init_timer(inp, stcb, net)) {
1617 			/* no need to unlock on tcb its gone */
1618 			goto out_decr;
1619 		}
1620 		/* We do output but not here */
1621 		did_output = 0;
1622 		break;
1623 	case SCTP_TIMER_TYPE_RECV:
1624 		if ((stcb == NULL) || (inp == NULL)) {
1625 			break;
1626 		}
1627 		SCTP_STAT_INCR(sctps_timosack);
1628 		stcb->asoc.timosack++;
1629 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1630 #ifdef SCTP_AUDITING_ENABLED
1631 		sctp_auditing(4, inp, stcb, net);
1632 #endif
1633 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1634 		break;
1635 	case SCTP_TIMER_TYPE_SHUTDOWN:
1636 		if ((stcb == NULL) || (inp == NULL)) {
1637 			break;
1638 		}
1639 		if (sctp_shutdown_timer(inp, stcb, net)) {
1640 			/* no need to unlock on tcb its gone */
1641 			goto out_decr;
1642 		}
1643 		SCTP_STAT_INCR(sctps_timoshutdown);
1644 		stcb->asoc.timoshutdown++;
1645 #ifdef SCTP_AUDITING_ENABLED
1646 		sctp_auditing(4, inp, stcb, net);
1647 #endif
1648 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1649 		break;
1650 	case SCTP_TIMER_TYPE_HEARTBEAT:
1651 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1652 			break;
1653 		}
1654 		SCTP_STAT_INCR(sctps_timoheartbeat);
1655 		stcb->asoc.timoheartbeat++;
1656 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1657 			/* no need to unlock on tcb its gone */
1658 			goto out_decr;
1659 		}
1660 #ifdef SCTP_AUDITING_ENABLED
1661 		sctp_auditing(4, inp, stcb, net);
1662 #endif
1663 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1664 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1665 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1666 		}
1667 		break;
1668 	case SCTP_TIMER_TYPE_COOKIE:
1669 		if ((stcb == NULL) || (inp == NULL)) {
1670 			break;
1671 		}
1672 		if (sctp_cookie_timer(inp, stcb, net)) {
1673 			/* no need to unlock on tcb its gone */
1674 			goto out_decr;
1675 		}
1676 		SCTP_STAT_INCR(sctps_timocookie);
1677 		stcb->asoc.timocookie++;
1678 #ifdef SCTP_AUDITING_ENABLED
1679 		sctp_auditing(4, inp, stcb, net);
1680 #endif
1681 		/*
1682 		 * We consider T3 and Cookie timer pretty much the same with
1683 		 * respect to where from in chunk_output.
1684 		 */
1685 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1686 		break;
1687 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1688 		{
1689 			struct timeval tv;
1690 			int i, secret;
1691 
1692 			if (inp == NULL) {
1693 				break;
1694 			}
1695 			SCTP_STAT_INCR(sctps_timosecret);
1696 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1697 			SCTP_INP_WLOCK(inp);
1698 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1699 			inp->sctp_ep.last_secret_number =
1700 			    inp->sctp_ep.current_secret_number;
1701 			inp->sctp_ep.current_secret_number++;
1702 			if (inp->sctp_ep.current_secret_number >=
1703 			    SCTP_HOW_MANY_SECRETS) {
1704 				inp->sctp_ep.current_secret_number = 0;
1705 			}
1706 			secret = (int)inp->sctp_ep.current_secret_number;
1707 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1708 				inp->sctp_ep.secret_key[secret][i] =
1709 				    sctp_select_initial_TSN(&inp->sctp_ep);
1710 			}
1711 			SCTP_INP_WUNLOCK(inp);
1712 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1713 		}
1714 		did_output = 0;
1715 		break;
1716 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1717 		if ((stcb == NULL) || (inp == NULL)) {
1718 			break;
1719 		}
1720 		SCTP_STAT_INCR(sctps_timopathmtu);
1721 		sctp_pathmtu_timer(inp, stcb, net);
1722 		did_output = 0;
1723 		break;
1724 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1725 		if ((stcb == NULL) || (inp == NULL)) {
1726 			break;
1727 		}
1728 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1729 			/* no need to unlock on tcb its gone */
1730 			goto out_decr;
1731 		}
1732 		SCTP_STAT_INCR(sctps_timoshutdownack);
1733 		stcb->asoc.timoshutdownack++;
1734 #ifdef SCTP_AUDITING_ENABLED
1735 		sctp_auditing(4, inp, stcb, net);
1736 #endif
1737 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1738 		break;
1739 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1740 		if ((stcb == NULL) || (inp == NULL)) {
1741 			break;
1742 		}
1743 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1744 		sctp_abort_an_association(inp, stcb,
1745 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1746 		/* no need to unlock on tcb its gone */
1747 		goto out_decr;
1748 
1749 	case SCTP_TIMER_TYPE_STRRESET:
1750 		if ((stcb == NULL) || (inp == NULL)) {
1751 			break;
1752 		}
1753 		if (sctp_strreset_timer(inp, stcb, net)) {
1754 			/* no need to unlock on tcb its gone */
1755 			goto out_decr;
1756 		}
1757 		SCTP_STAT_INCR(sctps_timostrmrst);
1758 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1759 		break;
1760 	case SCTP_TIMER_TYPE_ASCONF:
1761 		if ((stcb == NULL) || (inp == NULL)) {
1762 			break;
1763 		}
1764 		if (sctp_asconf_timer(inp, stcb, net)) {
1765 			/* no need to unlock on tcb its gone */
1766 			goto out_decr;
1767 		}
1768 		SCTP_STAT_INCR(sctps_timoasconf);
1769 #ifdef SCTP_AUDITING_ENABLED
1770 		sctp_auditing(4, inp, stcb, net);
1771 #endif
1772 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1773 		break;
1774 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1775 		if ((stcb == NULL) || (inp == NULL)) {
1776 			break;
1777 		}
1778 		sctp_delete_prim_timer(inp, stcb, net);
1779 		SCTP_STAT_INCR(sctps_timodelprim);
1780 		break;
1781 
1782 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1783 		if ((stcb == NULL) || (inp == NULL)) {
1784 			break;
1785 		}
1786 		SCTP_STAT_INCR(sctps_timoautoclose);
1787 		sctp_autoclose_timer(inp, stcb, net);
1788 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1789 		did_output = 0;
1790 		break;
1791 	case SCTP_TIMER_TYPE_ASOCKILL:
1792 		if ((stcb == NULL) || (inp == NULL)) {
1793 			break;
1794 		}
1795 		SCTP_STAT_INCR(sctps_timoassockill);
1796 		/* Can we free it yet? */
1797 		SCTP_INP_DECR_REF(inp);
1798 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1799 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1800 		so = SCTP_INP_SO(inp);
1801 		atomic_add_int(&stcb->asoc.refcnt, 1);
1802 		SCTP_TCB_UNLOCK(stcb);
1803 		SCTP_SOCKET_LOCK(so, 1);
1804 		SCTP_TCB_LOCK(stcb);
1805 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1806 #endif
1807 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1808 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1809 		SCTP_SOCKET_UNLOCK(so, 1);
1810 #endif
1811 		/*
1812 		 * free asoc, always unlocks (or destroy's) so prevent
1813 		 * duplicate unlock or unlock of a free mtx :-0
1814 		 */
1815 		stcb = NULL;
1816 		goto out_no_decr;
1817 	case SCTP_TIMER_TYPE_INPKILL:
1818 		SCTP_STAT_INCR(sctps_timoinpkill);
1819 		if (inp == NULL) {
1820 			break;
1821 		}
1822 		/*
1823 		 * special case, take away our increment since WE are the
1824 		 * killer
1825 		 */
1826 		SCTP_INP_DECR_REF(inp);
1827 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1828 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1829 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1830 		inp = NULL;
1831 		goto out_no_decr;
1832 	default:
1833 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1834 		    tmr->type);
1835 		break;
1836 	};
1837 #ifdef SCTP_AUDITING_ENABLED
1838 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1839 	if (inp)
1840 		sctp_auditing(5, inp, stcb, net);
1841 #endif
1842 	if ((did_output) && stcb) {
1843 		/*
1844 		 * Now we need to clean up the control chunk chain if an
1845 		 * ECNE is on it. It must be marked as UNSENT again so next
1846 		 * call will continue to send it until such time that we get
1847 		 * a CWR, to remove it. It is, however, less likely that we
1848 		 * will find a ecn echo on the chain though.
1849 		 */
1850 		sctp_fix_ecn_echo(&stcb->asoc);
1851 	}
1852 get_out:
1853 	if (stcb) {
1854 		SCTP_TCB_UNLOCK(stcb);
1855 	}
1856 out_decr:
1857 	if (inp) {
1858 		SCTP_INP_DECR_REF(inp);
1859 	}
1860 out_no_decr:
1861 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1862 	    type);
1863 	CURVNET_RESTORE();
1864 }
1865 
1866 void
1867 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1868     struct sctp_nets *net)
1869 {
1870 	uint32_t to_ticks;
1871 	struct sctp_timer *tmr;
1872 
1873 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1874 		return;
1875 
1876 	to_ticks = 0;
1877 
1878 	tmr = NULL;
1879 	if (stcb) {
1880 		SCTP_TCB_LOCK_ASSERT(stcb);
1881 	}
1882 	switch (t_type) {
1883 	case SCTP_TIMER_TYPE_ZERO_COPY:
1884 		tmr = &inp->sctp_ep.zero_copy_timer;
1885 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1886 		break;
1887 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1888 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1889 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1890 		break;
1891 	case SCTP_TIMER_TYPE_ADDR_WQ:
1892 		/* Only 1 tick away :-) */
1893 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1894 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1895 		break;
1896 	case SCTP_TIMER_TYPE_SEND:
1897 		/* Here we use the RTO timer */
1898 		{
1899 			int rto_val;
1900 
1901 			if ((stcb == NULL) || (net == NULL)) {
1902 				return;
1903 			}
1904 			tmr = &net->rxt_timer;
1905 			if (net->RTO == 0) {
1906 				rto_val = stcb->asoc.initial_rto;
1907 			} else {
1908 				rto_val = net->RTO;
1909 			}
1910 			to_ticks = MSEC_TO_TICKS(rto_val);
1911 		}
1912 		break;
1913 	case SCTP_TIMER_TYPE_INIT:
1914 		/*
1915 		 * Here we use the INIT timer default usually about 1
1916 		 * minute.
1917 		 */
1918 		if ((stcb == NULL) || (net == NULL)) {
1919 			return;
1920 		}
1921 		tmr = &net->rxt_timer;
1922 		if (net->RTO == 0) {
1923 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1924 		} else {
1925 			to_ticks = MSEC_TO_TICKS(net->RTO);
1926 		}
1927 		break;
1928 	case SCTP_TIMER_TYPE_RECV:
1929 		/*
1930 		 * Here we use the Delayed-Ack timer value from the inp
1931 		 * ususually about 200ms.
1932 		 */
1933 		if (stcb == NULL) {
1934 			return;
1935 		}
1936 		tmr = &stcb->asoc.dack_timer;
1937 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1938 		break;
1939 	case SCTP_TIMER_TYPE_SHUTDOWN:
1940 		/* Here we use the RTO of the destination. */
1941 		if ((stcb == NULL) || (net == NULL)) {
1942 			return;
1943 		}
1944 		if (net->RTO == 0) {
1945 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1946 		} else {
1947 			to_ticks = MSEC_TO_TICKS(net->RTO);
1948 		}
1949 		tmr = &net->rxt_timer;
1950 		break;
1951 	case SCTP_TIMER_TYPE_HEARTBEAT:
1952 		/*
1953 		 * the net is used here so that we can add in the RTO. Even
1954 		 * though we use a different timer. We also add the HB timer
1955 		 * PLUS a random jitter.
1956 		 */
1957 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1958 			return;
1959 		} else {
1960 			uint32_t rndval;
1961 			uint32_t jitter;
1962 
1963 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1964 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1965 				return;
1966 			}
1967 			if (net->RTO == 0) {
1968 				to_ticks = stcb->asoc.initial_rto;
1969 			} else {
1970 				to_ticks = net->RTO;
1971 			}
1972 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1973 			jitter = rndval % to_ticks;
1974 			if (jitter >= (to_ticks >> 1)) {
1975 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1976 			} else {
1977 				to_ticks = to_ticks - jitter;
1978 			}
1979 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1980 			    !(net->dest_state & SCTP_ADDR_PF)) {
1981 				to_ticks += net->heart_beat_delay;
1982 			}
1983 			/*
1984 			 * Now we must convert the to_ticks that are now in
1985 			 * ms to ticks.
1986 			 */
1987 			to_ticks = MSEC_TO_TICKS(to_ticks);
1988 			tmr = &net->hb_timer;
1989 		}
1990 		break;
1991 	case SCTP_TIMER_TYPE_COOKIE:
1992 		/*
1993 		 * Here we can use the RTO timer from the network since one
1994 		 * RTT was compelete. If a retran happened then we will be
1995 		 * using the RTO initial value.
1996 		 */
1997 		if ((stcb == NULL) || (net == NULL)) {
1998 			return;
1999 		}
2000 		if (net->RTO == 0) {
2001 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2002 		} else {
2003 			to_ticks = MSEC_TO_TICKS(net->RTO);
2004 		}
2005 		tmr = &net->rxt_timer;
2006 		break;
2007 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2008 		/*
2009 		 * nothing needed but the endpoint here ususually about 60
2010 		 * minutes.
2011 		 */
2012 		if (inp == NULL) {
2013 			return;
2014 		}
2015 		tmr = &inp->sctp_ep.signature_change;
2016 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2017 		break;
2018 	case SCTP_TIMER_TYPE_ASOCKILL:
2019 		if (stcb == NULL) {
2020 			return;
2021 		}
2022 		tmr = &stcb->asoc.strreset_timer;
2023 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2024 		break;
2025 	case SCTP_TIMER_TYPE_INPKILL:
2026 		/*
2027 		 * The inp is setup to die. We re-use the signature_chage
2028 		 * timer since that has stopped and we are in the GONE
2029 		 * state.
2030 		 */
2031 		if (inp == NULL) {
2032 			return;
2033 		}
2034 		tmr = &inp->sctp_ep.signature_change;
2035 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2036 		break;
2037 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2038 		/*
2039 		 * Here we use the value found in the EP for PMTU ususually
2040 		 * about 10 minutes.
2041 		 */
2042 		if ((stcb == NULL) || (inp == NULL)) {
2043 			return;
2044 		}
2045 		if (net == NULL) {
2046 			return;
2047 		}
2048 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2049 			return;
2050 		}
2051 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2052 		tmr = &net->pmtu_timer;
2053 		break;
2054 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2055 		/* Here we use the RTO of the destination */
2056 		if ((stcb == NULL) || (net == NULL)) {
2057 			return;
2058 		}
2059 		if (net->RTO == 0) {
2060 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2061 		} else {
2062 			to_ticks = MSEC_TO_TICKS(net->RTO);
2063 		}
2064 		tmr = &net->rxt_timer;
2065 		break;
2066 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2067 		/*
2068 		 * Here we use the endpoints shutdown guard timer usually
2069 		 * about 3 minutes.
2070 		 */
2071 		if ((inp == NULL) || (stcb == NULL)) {
2072 			return;
2073 		}
2074 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2075 		tmr = &stcb->asoc.shut_guard_timer;
2076 		break;
2077 	case SCTP_TIMER_TYPE_STRRESET:
2078 		/*
2079 		 * Here the timer comes from the stcb but its value is from
2080 		 * the net's RTO.
2081 		 */
2082 		if ((stcb == NULL) || (net == NULL)) {
2083 			return;
2084 		}
2085 		if (net->RTO == 0) {
2086 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2087 		} else {
2088 			to_ticks = MSEC_TO_TICKS(net->RTO);
2089 		}
2090 		tmr = &stcb->asoc.strreset_timer;
2091 		break;
2092 	case SCTP_TIMER_TYPE_ASCONF:
2093 		/*
2094 		 * Here the timer comes from the stcb but its value is from
2095 		 * the net's RTO.
2096 		 */
2097 		if ((stcb == NULL) || (net == NULL)) {
2098 			return;
2099 		}
2100 		if (net->RTO == 0) {
2101 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2102 		} else {
2103 			to_ticks = MSEC_TO_TICKS(net->RTO);
2104 		}
2105 		tmr = &stcb->asoc.asconf_timer;
2106 		break;
2107 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2108 		if ((stcb == NULL) || (net != NULL)) {
2109 			return;
2110 		}
2111 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2112 		tmr = &stcb->asoc.delete_prim_timer;
2113 		break;
2114 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2115 		if (stcb == NULL) {
2116 			return;
2117 		}
2118 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2119 			/*
2120 			 * Really an error since stcb is NOT set to
2121 			 * autoclose
2122 			 */
2123 			return;
2124 		}
2125 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2126 		tmr = &stcb->asoc.autoclose_timer;
2127 		break;
2128 	default:
2129 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2130 		    __FUNCTION__, t_type);
2131 		return;
2132 		break;
2133 	};
2134 	if ((to_ticks <= 0) || (tmr == NULL)) {
2135 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2136 		    __FUNCTION__, t_type, to_ticks, tmr);
2137 		return;
2138 	}
2139 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2140 		/*
2141 		 * we do NOT allow you to have it already running. if it is
2142 		 * we leave the current one up unchanged
2143 		 */
2144 		return;
2145 	}
2146 	/* At this point we can proceed */
2147 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2148 		stcb->asoc.num_send_timers_up++;
2149 	}
2150 	tmr->stopped_from = 0;
2151 	tmr->type = t_type;
2152 	tmr->ep = (void *)inp;
2153 	tmr->tcb = (void *)stcb;
2154 	tmr->net = (void *)net;
2155 	tmr->self = (void *)tmr;
2156 	tmr->vnet = (void *)curvnet;
2157 	tmr->ticks = sctp_get_tick_count();
2158 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2159 	return;
2160 }
2161 
2162 void
2163 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2164     struct sctp_nets *net, uint32_t from)
2165 {
2166 	struct sctp_timer *tmr;
2167 
2168 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2169 	    (inp == NULL))
2170 		return;
2171 
2172 	tmr = NULL;
2173 	if (stcb) {
2174 		SCTP_TCB_LOCK_ASSERT(stcb);
2175 	}
2176 	switch (t_type) {
2177 	case SCTP_TIMER_TYPE_ZERO_COPY:
2178 		tmr = &inp->sctp_ep.zero_copy_timer;
2179 		break;
2180 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2181 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2182 		break;
2183 	case SCTP_TIMER_TYPE_ADDR_WQ:
2184 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2185 		break;
2186 	case SCTP_TIMER_TYPE_SEND:
2187 		if ((stcb == NULL) || (net == NULL)) {
2188 			return;
2189 		}
2190 		tmr = &net->rxt_timer;
2191 		break;
2192 	case SCTP_TIMER_TYPE_INIT:
2193 		if ((stcb == NULL) || (net == NULL)) {
2194 			return;
2195 		}
2196 		tmr = &net->rxt_timer;
2197 		break;
2198 	case SCTP_TIMER_TYPE_RECV:
2199 		if (stcb == NULL) {
2200 			return;
2201 		}
2202 		tmr = &stcb->asoc.dack_timer;
2203 		break;
2204 	case SCTP_TIMER_TYPE_SHUTDOWN:
2205 		if ((stcb == NULL) || (net == NULL)) {
2206 			return;
2207 		}
2208 		tmr = &net->rxt_timer;
2209 		break;
2210 	case SCTP_TIMER_TYPE_HEARTBEAT:
2211 		if ((stcb == NULL) || (net == NULL)) {
2212 			return;
2213 		}
2214 		tmr = &net->hb_timer;
2215 		break;
2216 	case SCTP_TIMER_TYPE_COOKIE:
2217 		if ((stcb == NULL) || (net == NULL)) {
2218 			return;
2219 		}
2220 		tmr = &net->rxt_timer;
2221 		break;
2222 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2223 		/* nothing needed but the endpoint here */
2224 		tmr = &inp->sctp_ep.signature_change;
2225 		/*
2226 		 * We re-use the newcookie timer for the INP kill timer. We
2227 		 * must assure that we do not kill it by accident.
2228 		 */
2229 		break;
2230 	case SCTP_TIMER_TYPE_ASOCKILL:
2231 		/*
2232 		 * Stop the asoc kill timer.
2233 		 */
2234 		if (stcb == NULL) {
2235 			return;
2236 		}
2237 		tmr = &stcb->asoc.strreset_timer;
2238 		break;
2239 
2240 	case SCTP_TIMER_TYPE_INPKILL:
2241 		/*
2242 		 * The inp is setup to die. We re-use the signature_chage
2243 		 * timer since that has stopped and we are in the GONE
2244 		 * state.
2245 		 */
2246 		tmr = &inp->sctp_ep.signature_change;
2247 		break;
2248 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2249 		if ((stcb == NULL) || (net == NULL)) {
2250 			return;
2251 		}
2252 		tmr = &net->pmtu_timer;
2253 		break;
2254 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2255 		if ((stcb == NULL) || (net == NULL)) {
2256 			return;
2257 		}
2258 		tmr = &net->rxt_timer;
2259 		break;
2260 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2261 		if (stcb == NULL) {
2262 			return;
2263 		}
2264 		tmr = &stcb->asoc.shut_guard_timer;
2265 		break;
2266 	case SCTP_TIMER_TYPE_STRRESET:
2267 		if (stcb == NULL) {
2268 			return;
2269 		}
2270 		tmr = &stcb->asoc.strreset_timer;
2271 		break;
2272 	case SCTP_TIMER_TYPE_ASCONF:
2273 		if (stcb == NULL) {
2274 			return;
2275 		}
2276 		tmr = &stcb->asoc.asconf_timer;
2277 		break;
2278 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2279 		if (stcb == NULL) {
2280 			return;
2281 		}
2282 		tmr = &stcb->asoc.delete_prim_timer;
2283 		break;
2284 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2285 		if (stcb == NULL) {
2286 			return;
2287 		}
2288 		tmr = &stcb->asoc.autoclose_timer;
2289 		break;
2290 	default:
2291 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2292 		    __FUNCTION__, t_type);
2293 		break;
2294 	};
2295 	if (tmr == NULL) {
2296 		return;
2297 	}
2298 	if ((tmr->type != t_type) && tmr->type) {
2299 		/*
2300 		 * Ok we have a timer that is under joint use. Cookie timer
2301 		 * per chance with the SEND timer. We therefore are NOT
2302 		 * running the timer that the caller wants stopped.  So just
2303 		 * return.
2304 		 */
2305 		return;
2306 	}
2307 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2308 		stcb->asoc.num_send_timers_up--;
2309 		if (stcb->asoc.num_send_timers_up < 0) {
2310 			stcb->asoc.num_send_timers_up = 0;
2311 		}
2312 	}
2313 	tmr->self = NULL;
2314 	tmr->stopped_from = from;
2315 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2316 	return;
2317 }
2318 
2319 uint32_t
2320 sctp_calculate_len(struct mbuf *m)
2321 {
2322 	uint32_t tlen = 0;
2323 	struct mbuf *at;
2324 
2325 	at = m;
2326 	while (at) {
2327 		tlen += SCTP_BUF_LEN(at);
2328 		at = SCTP_BUF_NEXT(at);
2329 	}
2330 	return (tlen);
2331 }
2332 
2333 void
2334 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2335     struct sctp_association *asoc, uint32_t mtu)
2336 {
2337 	/*
2338 	 * Reset the P-MTU size on this association, this involves changing
2339 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2340 	 * allow the DF flag to be cleared.
2341 	 */
2342 	struct sctp_tmit_chunk *chk;
2343 	unsigned int eff_mtu, ovh;
2344 
2345 	asoc->smallest_mtu = mtu;
2346 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2347 		ovh = SCTP_MIN_OVERHEAD;
2348 	} else {
2349 		ovh = SCTP_MIN_V4_OVERHEAD;
2350 	}
2351 	eff_mtu = mtu - ovh;
2352 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2353 		if (chk->send_size > eff_mtu) {
2354 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2355 		}
2356 	}
2357 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2358 		if (chk->send_size > eff_mtu) {
2359 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2360 		}
2361 	}
2362 }
2363 
2364 
2365 /*
2366  * given an association and starting time of the current RTT period return
2367  * RTO in number of msecs net should point to the current network
2368  */
2369 
2370 uint32_t
2371 sctp_calculate_rto(struct sctp_tcb *stcb,
2372     struct sctp_association *asoc,
2373     struct sctp_nets *net,
2374     struct timeval *told,
2375     int safe, int rtt_from_sack)
2376 {
2377 	/*-
2378 	 * given an association and the starting time of the current RTT
2379 	 * period (in value1/value2) return RTO in number of msecs.
2380 	 */
2381 	int32_t rtt;		/* RTT in ms */
2382 	uint32_t new_rto;
2383 	int first_measure = 0;
2384 	struct timeval now, then, *old;
2385 
2386 	/* Copy it out for sparc64 */
2387 	if (safe == sctp_align_unsafe_makecopy) {
2388 		old = &then;
2389 		memcpy(&then, told, sizeof(struct timeval));
2390 	} else if (safe == sctp_align_safe_nocopy) {
2391 		old = told;
2392 	} else {
2393 		/* error */
2394 		SCTP_PRINTF("Huh, bad rto calc call\n");
2395 		return (0);
2396 	}
2397 	/************************/
2398 	/* 1. calculate new RTT */
2399 	/************************/
2400 	/* get the current time */
2401 	if (stcb->asoc.use_precise_time) {
2402 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2403 	} else {
2404 		(void)SCTP_GETTIME_TIMEVAL(&now);
2405 	}
2406 	timevalsub(&now, old);
2407 	/* store the current RTT in us */
2408 	net->rtt = (uint64_t) 10000000 *(uint64_t) now.tv_sec +
2409 	         (uint64_t) now.tv_usec;
2410 
2411 	/* computer rtt in ms */
2412 	rtt = net->rtt / 1000;
2413 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2414 		/*
2415 		 * Tell the CC module that a new update has just occurred
2416 		 * from a sack
2417 		 */
2418 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2419 	}
2420 	/*
2421 	 * Do we need to determine the lan? We do this only on sacks i.e.
2422 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2423 	 */
2424 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2425 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2426 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2427 			net->lan_type = SCTP_LAN_INTERNET;
2428 		} else {
2429 			net->lan_type = SCTP_LAN_LOCAL;
2430 		}
2431 	}
2432 	/***************************/
2433 	/* 2. update RTTVAR & SRTT */
2434 	/***************************/
2435 	/*-
2436 	 * Compute the scaled average lastsa and the
2437 	 * scaled variance lastsv as described in van Jacobson
2438 	 * Paper "Congestion Avoidance and Control", Annex A.
2439 	 *
2440 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2441 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2442 	 */
2443 	if (net->RTO_measured) {
2444 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2445 		net->lastsa += rtt;
2446 		if (rtt < 0) {
2447 			rtt = -rtt;
2448 		}
2449 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2450 		net->lastsv += rtt;
2451 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2452 			rto_logging(net, SCTP_LOG_RTTVAR);
2453 		}
2454 	} else {
2455 		/* First RTO measurment */
2456 		net->RTO_measured = 1;
2457 		first_measure = 1;
2458 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2459 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2460 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2461 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2462 		}
2463 	}
2464 	if (net->lastsv == 0) {
2465 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2466 	}
2467 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2468 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2469 	    (stcb->asoc.sat_network_lockout == 0)) {
2470 		stcb->asoc.sat_network = 1;
2471 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2472 		stcb->asoc.sat_network = 0;
2473 		stcb->asoc.sat_network_lockout = 1;
2474 	}
2475 	/* bound it, per C6/C7 in Section 5.3.1 */
2476 	if (new_rto < stcb->asoc.minrto) {
2477 		new_rto = stcb->asoc.minrto;
2478 	}
2479 	if (new_rto > stcb->asoc.maxrto) {
2480 		new_rto = stcb->asoc.maxrto;
2481 	}
2482 	/* we are now returning the RTO */
2483 	return (new_rto);
2484 }
2485 
2486 /*
2487  * return a pointer to a contiguous piece of data from the given mbuf chain
2488  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2489  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2490  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2491  */
2492 caddr_t
2493 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2494 {
2495 	uint32_t count;
2496 	uint8_t *ptr;
2497 
2498 	ptr = in_ptr;
2499 	if ((off < 0) || (len <= 0))
2500 		return (NULL);
2501 
2502 	/* find the desired start location */
2503 	while ((m != NULL) && (off > 0)) {
2504 		if (off < SCTP_BUF_LEN(m))
2505 			break;
2506 		off -= SCTP_BUF_LEN(m);
2507 		m = SCTP_BUF_NEXT(m);
2508 	}
2509 	if (m == NULL)
2510 		return (NULL);
2511 
2512 	/* is the current mbuf large enough (eg. contiguous)? */
2513 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2514 		return (mtod(m, caddr_t)+off);
2515 	} else {
2516 		/* else, it spans more than one mbuf, so save a temp copy... */
2517 		while ((m != NULL) && (len > 0)) {
2518 			count = min(SCTP_BUF_LEN(m) - off, len);
2519 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2520 			len -= count;
2521 			ptr += count;
2522 			off = 0;
2523 			m = SCTP_BUF_NEXT(m);
2524 		}
2525 		if ((m == NULL) && (len > 0))
2526 			return (NULL);
2527 		else
2528 			return ((caddr_t)in_ptr);
2529 	}
2530 }
2531 
2532 
2533 
2534 struct sctp_paramhdr *
2535 sctp_get_next_param(struct mbuf *m,
2536     int offset,
2537     struct sctp_paramhdr *pull,
2538     int pull_limit)
2539 {
2540 	/* This just provides a typed signature to Peter's Pull routine */
2541 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2542 	    (uint8_t *) pull));
2543 }
2544 
2545 
2546 int
2547 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2548 {
2549 	/*
2550 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2551 	 * padlen is > 3 this routine will fail.
2552 	 */
2553 	uint8_t *dp;
2554 	int i;
2555 
2556 	if (padlen > 3) {
2557 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2558 		return (ENOBUFS);
2559 	}
2560 	if (padlen <= M_TRAILINGSPACE(m)) {
2561 		/*
2562 		 * The easy way. We hope the majority of the time we hit
2563 		 * here :)
2564 		 */
2565 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2566 		SCTP_BUF_LEN(m) += padlen;
2567 	} else {
2568 		/* Hard way we must grow the mbuf */
2569 		struct mbuf *tmp;
2570 
2571 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2572 		if (tmp == NULL) {
2573 			/* Out of space GAK! we are in big trouble. */
2574 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2575 			return (ENOSPC);
2576 		}
2577 		/* setup and insert in middle */
2578 		SCTP_BUF_LEN(tmp) = padlen;
2579 		SCTP_BUF_NEXT(tmp) = NULL;
2580 		SCTP_BUF_NEXT(m) = tmp;
2581 		dp = mtod(tmp, uint8_t *);
2582 	}
2583 	/* zero out the pad */
2584 	for (i = 0; i < padlen; i++) {
2585 		*dp = 0;
2586 		dp++;
2587 	}
2588 	return (0);
2589 }
2590 
2591 int
2592 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2593 {
2594 	/* find the last mbuf in chain and pad it */
2595 	struct mbuf *m_at;
2596 
2597 	m_at = m;
2598 	if (last_mbuf) {
2599 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2600 	} else {
2601 		while (m_at) {
2602 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2603 				return (sctp_add_pad_tombuf(m_at, padval));
2604 			}
2605 			m_at = SCTP_BUF_NEXT(m_at);
2606 		}
2607 	}
2608 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2609 	return (EFAULT);
2610 }
2611 
2612 static void
2613 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2614     uint32_t error, void *data, int so_locked
2615 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2616     SCTP_UNUSED
2617 #endif
2618 )
2619 {
2620 	struct mbuf *m_notify;
2621 	struct sctp_assoc_change *sac;
2622 	struct sctp_queued_to_read *control;
2623 
2624 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2625 	struct socket *so;
2626 
2627 #endif
2628 
2629 	/*
2630 	 * For TCP model AND UDP connected sockets we will send an error up
2631 	 * when an ABORT comes in.
2632 	 */
2633 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2634 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2635 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2636 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2637 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2638 			stcb->sctp_socket->so_error = ECONNREFUSED;
2639 		} else {
2640 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2641 			stcb->sctp_socket->so_error = ECONNRESET;
2642 		}
2643 		/* Wake ANY sleepers */
2644 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2645 		so = SCTP_INP_SO(stcb->sctp_ep);
2646 		if (!so_locked) {
2647 			atomic_add_int(&stcb->asoc.refcnt, 1);
2648 			SCTP_TCB_UNLOCK(stcb);
2649 			SCTP_SOCKET_LOCK(so, 1);
2650 			SCTP_TCB_LOCK(stcb);
2651 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2652 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2653 				SCTP_SOCKET_UNLOCK(so, 1);
2654 				return;
2655 			}
2656 		}
2657 #endif
2658 		socantrcvmore(stcb->sctp_socket);
2659 		sorwakeup(stcb->sctp_socket);
2660 		sowwakeup(stcb->sctp_socket);
2661 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2662 		if (!so_locked) {
2663 			SCTP_SOCKET_UNLOCK(so, 1);
2664 		}
2665 #endif
2666 	}
2667 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2668 		/* event not enabled */
2669 		return;
2670 	}
2671 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2672 	if (m_notify == NULL)
2673 		/* no space left */
2674 		return;
2675 	SCTP_BUF_LEN(m_notify) = 0;
2676 
2677 	sac = mtod(m_notify, struct sctp_assoc_change *);
2678 	sac->sac_type = SCTP_ASSOC_CHANGE;
2679 	sac->sac_flags = 0;
2680 	sac->sac_length = sizeof(struct sctp_assoc_change);
2681 	sac->sac_state = event;
2682 	sac->sac_error = error;
2683 	/* XXX verify these stream counts */
2684 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2685 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2686 	sac->sac_assoc_id = sctp_get_associd(stcb);
2687 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2688 	SCTP_BUF_NEXT(m_notify) = NULL;
2689 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2690 	    0, 0, 0, 0, 0, 0,
2691 	    m_notify);
2692 	if (control == NULL) {
2693 		/* no memory */
2694 		sctp_m_freem(m_notify);
2695 		return;
2696 	}
2697 	control->length = SCTP_BUF_LEN(m_notify);
2698 	/* not that we need this */
2699 	control->tail_mbuf = m_notify;
2700 	control->spec_flags = M_NOTIFICATION;
2701 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2702 	    control,
2703 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2704 	    so_locked);
2705 	if (event == SCTP_COMM_LOST) {
2706 		/* Wake up any sleeper */
2707 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2708 		so = SCTP_INP_SO(stcb->sctp_ep);
2709 		if (!so_locked) {
2710 			atomic_add_int(&stcb->asoc.refcnt, 1);
2711 			SCTP_TCB_UNLOCK(stcb);
2712 			SCTP_SOCKET_LOCK(so, 1);
2713 			SCTP_TCB_LOCK(stcb);
2714 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2715 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2716 				SCTP_SOCKET_UNLOCK(so, 1);
2717 				return;
2718 			}
2719 		}
2720 #endif
2721 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2722 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2723 		if (!so_locked) {
2724 			SCTP_SOCKET_UNLOCK(so, 1);
2725 		}
2726 #endif
2727 	}
2728 }
2729 
2730 static void
2731 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2732     struct sockaddr *sa, uint32_t error)
2733 {
2734 	struct mbuf *m_notify;
2735 	struct sctp_paddr_change *spc;
2736 	struct sctp_queued_to_read *control;
2737 
2738 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2739 		/* event not enabled */
2740 		return;
2741 	}
2742 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2743 	if (m_notify == NULL)
2744 		return;
2745 	SCTP_BUF_LEN(m_notify) = 0;
2746 	spc = mtod(m_notify, struct sctp_paddr_change *);
2747 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2748 	spc->spc_flags = 0;
2749 	spc->spc_length = sizeof(struct sctp_paddr_change);
2750 	switch (sa->sa_family) {
2751 #ifdef INET
2752 	case AF_INET:
2753 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2754 		break;
2755 #endif
2756 #ifdef INET6
2757 	case AF_INET6:
2758 		{
2759 			struct sockaddr_in6 *sin6;
2760 
2761 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2762 
2763 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2764 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2765 				if (sin6->sin6_scope_id == 0) {
2766 					/* recover scope_id for user */
2767 					(void)sa6_recoverscope(sin6);
2768 				} else {
2769 					/* clear embedded scope_id for user */
2770 					in6_clearscope(&sin6->sin6_addr);
2771 				}
2772 			}
2773 			break;
2774 		}
2775 #endif
2776 	default:
2777 		/* TSNH */
2778 		break;
2779 	}
2780 	spc->spc_state = state;
2781 	spc->spc_error = error;
2782 	spc->spc_assoc_id = sctp_get_associd(stcb);
2783 
2784 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2785 	SCTP_BUF_NEXT(m_notify) = NULL;
2786 
2787 	/* append to socket */
2788 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2789 	    0, 0, 0, 0, 0, 0,
2790 	    m_notify);
2791 	if (control == NULL) {
2792 		/* no memory */
2793 		sctp_m_freem(m_notify);
2794 		return;
2795 	}
2796 	control->length = SCTP_BUF_LEN(m_notify);
2797 	control->spec_flags = M_NOTIFICATION;
2798 	/* not that we need this */
2799 	control->tail_mbuf = m_notify;
2800 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2801 	    control,
2802 	    &stcb->sctp_socket->so_rcv, 1,
2803 	    SCTP_READ_LOCK_NOT_HELD,
2804 	    SCTP_SO_NOT_LOCKED);
2805 }
2806 
2807 
2808 static void
2809 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2810     struct sctp_tmit_chunk *chk, int so_locked
2811 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2812     SCTP_UNUSED
2813 #endif
2814 )
2815 {
2816 	struct mbuf *m_notify;
2817 	struct sctp_send_failed *ssf;
2818 	struct sctp_queued_to_read *control;
2819 	int length;
2820 
2821 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2822 		/* event not enabled */
2823 		return;
2824 	}
2825 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2826 	if (m_notify == NULL)
2827 		/* no space left */
2828 		return;
2829 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2830 	length -= sizeof(struct sctp_data_chunk);
2831 	SCTP_BUF_LEN(m_notify) = 0;
2832 	ssf = mtod(m_notify, struct sctp_send_failed *);
2833 	ssf->ssf_type = SCTP_SEND_FAILED;
2834 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2835 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2836 	else
2837 		ssf->ssf_flags = SCTP_DATA_SENT;
2838 	ssf->ssf_length = length;
2839 	ssf->ssf_error = error;
2840 	/* not exactly what the user sent in, but should be close :) */
2841 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2842 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2843 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2844 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2845 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2846 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2847 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2848 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2849 
2850 	if (chk->data) {
2851 		/*
2852 		 * trim off the sctp chunk header(it should be there)
2853 		 */
2854 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2855 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2856 			sctp_mbuf_crush(chk->data);
2857 			chk->send_size -= sizeof(struct sctp_data_chunk);
2858 		}
2859 	}
2860 	SCTP_BUF_NEXT(m_notify) = chk->data;
2861 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2862 	/* Steal off the mbuf */
2863 	chk->data = NULL;
2864 	/*
2865 	 * For this case, we check the actual socket buffer, since the assoc
2866 	 * is going away we don't want to overfill the socket buffer for a
2867 	 * non-reader
2868 	 */
2869 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2870 		sctp_m_freem(m_notify);
2871 		return;
2872 	}
2873 	/* append to socket */
2874 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2875 	    0, 0, 0, 0, 0, 0,
2876 	    m_notify);
2877 	if (control == NULL) {
2878 		/* no memory */
2879 		sctp_m_freem(m_notify);
2880 		return;
2881 	}
2882 	control->spec_flags = M_NOTIFICATION;
2883 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2884 	    control,
2885 	    &stcb->sctp_socket->so_rcv, 1,
2886 	    SCTP_READ_LOCK_NOT_HELD,
2887 	    so_locked);
2888 }
2889 
2890 
2891 static void
2892 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2893     struct sctp_stream_queue_pending *sp, int so_locked
2894 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2895     SCTP_UNUSED
2896 #endif
2897 )
2898 {
2899 	struct mbuf *m_notify;
2900 	struct sctp_send_failed *ssf;
2901 	struct sctp_queued_to_read *control;
2902 	int length;
2903 
2904 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2905 		/* event not enabled */
2906 		return;
2907 	}
2908 	length = sizeof(struct sctp_send_failed) + sp->length;
2909 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2910 	if (m_notify == NULL)
2911 		/* no space left */
2912 		return;
2913 	SCTP_BUF_LEN(m_notify) = 0;
2914 	ssf = mtod(m_notify, struct sctp_send_failed *);
2915 	ssf->ssf_type = SCTP_SEND_FAILED;
2916 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2917 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2918 	else
2919 		ssf->ssf_flags = SCTP_DATA_SENT;
2920 	ssf->ssf_length = length;
2921 	ssf->ssf_error = error;
2922 	/* not exactly what the user sent in, but should be close :) */
2923 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2924 	ssf->ssf_info.sinfo_stream = sp->stream;
2925 	ssf->ssf_info.sinfo_ssn = sp->strseq;
2926 	if (sp->some_taken) {
2927 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2928 	} else {
2929 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2930 	}
2931 	ssf->ssf_info.sinfo_ppid = sp->ppid;
2932 	ssf->ssf_info.sinfo_context = sp->context;
2933 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2934 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2935 	SCTP_BUF_NEXT(m_notify) = sp->data;
2936 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2937 
2938 	/* Steal off the mbuf */
2939 	sp->data = NULL;
2940 	/*
2941 	 * For this case, we check the actual socket buffer, since the assoc
2942 	 * is going away we don't want to overfill the socket buffer for a
2943 	 * non-reader
2944 	 */
2945 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2946 		sctp_m_freem(m_notify);
2947 		return;
2948 	}
2949 	/* append to socket */
2950 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2951 	    0, 0, 0, 0, 0, 0,
2952 	    m_notify);
2953 	if (control == NULL) {
2954 		/* no memory */
2955 		sctp_m_freem(m_notify);
2956 		return;
2957 	}
2958 	control->spec_flags = M_NOTIFICATION;
2959 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2960 	    control,
2961 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
2962 }
2963 
2964 
2965 
2966 static void
2967 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
2968     uint32_t error)
2969 {
2970 	struct mbuf *m_notify;
2971 	struct sctp_adaptation_event *sai;
2972 	struct sctp_queued_to_read *control;
2973 
2974 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
2975 		/* event not enabled */
2976 		return;
2977 	}
2978 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
2979 	if (m_notify == NULL)
2980 		/* no space left */
2981 		return;
2982 	SCTP_BUF_LEN(m_notify) = 0;
2983 	sai = mtod(m_notify, struct sctp_adaptation_event *);
2984 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
2985 	sai->sai_flags = 0;
2986 	sai->sai_length = sizeof(struct sctp_adaptation_event);
2987 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
2988 	sai->sai_assoc_id = sctp_get_associd(stcb);
2989 
2990 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
2991 	SCTP_BUF_NEXT(m_notify) = NULL;
2992 
2993 	/* append to socket */
2994 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2995 	    0, 0, 0, 0, 0, 0,
2996 	    m_notify);
2997 	if (control == NULL) {
2998 		/* no memory */
2999 		sctp_m_freem(m_notify);
3000 		return;
3001 	}
3002 	control->length = SCTP_BUF_LEN(m_notify);
3003 	control->spec_flags = M_NOTIFICATION;
3004 	/* not that we need this */
3005 	control->tail_mbuf = m_notify;
3006 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3007 	    control,
3008 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3009 }
3010 
3011 /* This always must be called with the read-queue LOCKED in the INP */
3012 static void
3013 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3014     uint32_t val, int so_locked
3015 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3016     SCTP_UNUSED
3017 #endif
3018 )
3019 {
3020 	struct mbuf *m_notify;
3021 	struct sctp_pdapi_event *pdapi;
3022 	struct sctp_queued_to_read *control;
3023 	struct sockbuf *sb;
3024 
3025 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3026 		/* event not enabled */
3027 		return;
3028 	}
3029 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3030 		return;
3031 	}
3032 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3033 	if (m_notify == NULL)
3034 		/* no space left */
3035 		return;
3036 	SCTP_BUF_LEN(m_notify) = 0;
3037 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3038 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3039 	pdapi->pdapi_flags = 0;
3040 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3041 	pdapi->pdapi_indication = error;
3042 	pdapi->pdapi_stream = (val >> 16);
3043 	pdapi->pdapi_seq = (val & 0x0000ffff);
3044 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3045 
3046 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3047 	SCTP_BUF_NEXT(m_notify) = NULL;
3048 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3049 	    0, 0, 0, 0, 0, 0,
3050 	    m_notify);
3051 	if (control == NULL) {
3052 		/* no memory */
3053 		sctp_m_freem(m_notify);
3054 		return;
3055 	}
3056 	control->spec_flags = M_NOTIFICATION;
3057 	control->length = SCTP_BUF_LEN(m_notify);
3058 	/* not that we need this */
3059 	control->tail_mbuf = m_notify;
3060 	control->held_length = 0;
3061 	control->length = 0;
3062 	sb = &stcb->sctp_socket->so_rcv;
3063 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3064 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3065 	}
3066 	sctp_sballoc(stcb, sb, m_notify);
3067 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3068 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3069 	}
3070 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3071 	control->end_added = 1;
3072 	if (stcb->asoc.control_pdapi)
3073 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3074 	else {
3075 		/* we really should not see this case */
3076 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3077 	}
3078 	if (stcb->sctp_ep && stcb->sctp_socket) {
3079 		/* This should always be the case */
3080 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3081 		struct socket *so;
3082 
3083 		so = SCTP_INP_SO(stcb->sctp_ep);
3084 		if (!so_locked) {
3085 			atomic_add_int(&stcb->asoc.refcnt, 1);
3086 			SCTP_TCB_UNLOCK(stcb);
3087 			SCTP_SOCKET_LOCK(so, 1);
3088 			SCTP_TCB_LOCK(stcb);
3089 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3090 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3091 				SCTP_SOCKET_UNLOCK(so, 1);
3092 				return;
3093 			}
3094 		}
3095 #endif
3096 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3097 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3098 		if (!so_locked) {
3099 			SCTP_SOCKET_UNLOCK(so, 1);
3100 		}
3101 #endif
3102 	}
3103 }
3104 
3105 static void
3106 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3107 {
3108 	struct mbuf *m_notify;
3109 	struct sctp_shutdown_event *sse;
3110 	struct sctp_queued_to_read *control;
3111 
3112 	/*
3113 	 * For TCP model AND UDP connected sockets we will send an error up
3114 	 * when an SHUTDOWN completes
3115 	 */
3116 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3117 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3118 		/* mark socket closed for read/write and wakeup! */
3119 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3120 		struct socket *so;
3121 
3122 		so = SCTP_INP_SO(stcb->sctp_ep);
3123 		atomic_add_int(&stcb->asoc.refcnt, 1);
3124 		SCTP_TCB_UNLOCK(stcb);
3125 		SCTP_SOCKET_LOCK(so, 1);
3126 		SCTP_TCB_LOCK(stcb);
3127 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3128 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3129 			SCTP_SOCKET_UNLOCK(so, 1);
3130 			return;
3131 		}
3132 #endif
3133 		socantsendmore(stcb->sctp_socket);
3134 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3135 		SCTP_SOCKET_UNLOCK(so, 1);
3136 #endif
3137 	}
3138 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3139 		/* event not enabled */
3140 		return;
3141 	}
3142 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3143 	if (m_notify == NULL)
3144 		/* no space left */
3145 		return;
3146 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3147 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3148 	sse->sse_flags = 0;
3149 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3150 	sse->sse_assoc_id = sctp_get_associd(stcb);
3151 
3152 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3153 	SCTP_BUF_NEXT(m_notify) = NULL;
3154 
3155 	/* append to socket */
3156 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3157 	    0, 0, 0, 0, 0, 0,
3158 	    m_notify);
3159 	if (control == NULL) {
3160 		/* no memory */
3161 		sctp_m_freem(m_notify);
3162 		return;
3163 	}
3164 	control->spec_flags = M_NOTIFICATION;
3165 	control->length = SCTP_BUF_LEN(m_notify);
3166 	/* not that we need this */
3167 	control->tail_mbuf = m_notify;
3168 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3169 	    control,
3170 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3171 }
3172 
3173 static void
3174 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3175     int so_locked
3176 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3177     SCTP_UNUSED
3178 #endif
3179 )
3180 {
3181 	struct mbuf *m_notify;
3182 	struct sctp_sender_dry_event *event;
3183 	struct sctp_queued_to_read *control;
3184 
3185 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3186 		/* event not enabled */
3187 		return;
3188 	}
3189 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3190 	if (m_notify == NULL) {
3191 		/* no space left */
3192 		return;
3193 	}
3194 	SCTP_BUF_LEN(m_notify) = 0;
3195 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3196 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3197 	event->sender_dry_flags = 0;
3198 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3199 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3200 
3201 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3202 	SCTP_BUF_NEXT(m_notify) = NULL;
3203 
3204 	/* append to socket */
3205 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3206 	    0, 0, 0, 0, 0, 0, m_notify);
3207 	if (control == NULL) {
3208 		/* no memory */
3209 		sctp_m_freem(m_notify);
3210 		return;
3211 	}
3212 	control->length = SCTP_BUF_LEN(m_notify);
3213 	control->spec_flags = M_NOTIFICATION;
3214 	/* not that we need this */
3215 	control->tail_mbuf = m_notify;
3216 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3217 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3218 }
3219 
3220 
3221 static void
3222 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3223 {
3224 	struct mbuf *m_notify;
3225 	struct sctp_queued_to_read *control;
3226 	struct sctp_stream_reset_event *strreset;
3227 	int len;
3228 
3229 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3230 		/* event not enabled */
3231 		return;
3232 	}
3233 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3234 	if (m_notify == NULL)
3235 		/* no space left */
3236 		return;
3237 	SCTP_BUF_LEN(m_notify) = 0;
3238 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3239 	if (len > M_TRAILINGSPACE(m_notify)) {
3240 		/* never enough room */
3241 		sctp_m_freem(m_notify);
3242 		return;
3243 	}
3244 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3245 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3246 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3247 	strreset->strreset_length = len;
3248 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3249 	strreset->strreset_list[0] = number_entries;
3250 
3251 	SCTP_BUF_LEN(m_notify) = len;
3252 	SCTP_BUF_NEXT(m_notify) = NULL;
3253 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3254 		/* no space */
3255 		sctp_m_freem(m_notify);
3256 		return;
3257 	}
3258 	/* append to socket */
3259 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3260 	    0, 0, 0, 0, 0, 0,
3261 	    m_notify);
3262 	if (control == NULL) {
3263 		/* no memory */
3264 		sctp_m_freem(m_notify);
3265 		return;
3266 	}
3267 	control->spec_flags = M_NOTIFICATION;
3268 	control->length = SCTP_BUF_LEN(m_notify);
3269 	/* not that we need this */
3270 	control->tail_mbuf = m_notify;
3271 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3272 	    control,
3273 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3274 }
3275 
3276 
3277 static void
3278 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3279     int number_entries, uint16_t * list, int flag)
3280 {
3281 	struct mbuf *m_notify;
3282 	struct sctp_queued_to_read *control;
3283 	struct sctp_stream_reset_event *strreset;
3284 	int len;
3285 
3286 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3287 		/* event not enabled */
3288 		return;
3289 	}
3290 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3291 	if (m_notify == NULL)
3292 		/* no space left */
3293 		return;
3294 	SCTP_BUF_LEN(m_notify) = 0;
3295 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3296 	if (len > M_TRAILINGSPACE(m_notify)) {
3297 		/* never enough room */
3298 		sctp_m_freem(m_notify);
3299 		return;
3300 	}
3301 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3302 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3303 	if (number_entries == 0) {
3304 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3305 	} else {
3306 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3307 	}
3308 	strreset->strreset_length = len;
3309 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3310 	if (number_entries) {
3311 		int i;
3312 
3313 		for (i = 0; i < number_entries; i++) {
3314 			strreset->strreset_list[i] = ntohs(list[i]);
3315 		}
3316 	}
3317 	SCTP_BUF_LEN(m_notify) = len;
3318 	SCTP_BUF_NEXT(m_notify) = NULL;
3319 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3320 		/* no space */
3321 		sctp_m_freem(m_notify);
3322 		return;
3323 	}
3324 	/* append to socket */
3325 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3326 	    0, 0, 0, 0, 0, 0,
3327 	    m_notify);
3328 	if (control == NULL) {
3329 		/* no memory */
3330 		sctp_m_freem(m_notify);
3331 		return;
3332 	}
3333 	control->spec_flags = M_NOTIFICATION;
3334 	control->length = SCTP_BUF_LEN(m_notify);
3335 	/* not that we need this */
3336 	control->tail_mbuf = m_notify;
3337 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3338 	    control,
3339 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3340 }
3341 
3342 
3343 void
3344 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3345     uint32_t error, void *data, int so_locked
3346 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3347     SCTP_UNUSED
3348 #endif
3349 )
3350 {
3351 	if ((stcb == NULL) ||
3352 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3353 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3354 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3355 		/* If the socket is gone we are out of here */
3356 		return;
3357 	}
3358 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3359 		return;
3360 	}
3361 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3362 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3363 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3364 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3365 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3366 			/* Don't report these in front states */
3367 			return;
3368 		}
3369 	}
3370 	switch (notification) {
3371 	case SCTP_NOTIFY_ASSOC_UP:
3372 		if (stcb->asoc.assoc_up_sent == 0) {
3373 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3374 			stcb->asoc.assoc_up_sent = 1;
3375 		}
3376 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3377 			sctp_notify_adaptation_layer(stcb, error);
3378 		}
3379 		if (stcb->asoc.peer_supports_auth == 0) {
3380 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3381 			    NULL, so_locked);
3382 		}
3383 		break;
3384 	case SCTP_NOTIFY_ASSOC_DOWN:
3385 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3386 		break;
3387 	case SCTP_NOTIFY_INTERFACE_DOWN:
3388 		{
3389 			struct sctp_nets *net;
3390 
3391 			net = (struct sctp_nets *)data;
3392 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3393 			    (struct sockaddr *)&net->ro._l_addr, error);
3394 			break;
3395 		}
3396 	case SCTP_NOTIFY_INTERFACE_UP:
3397 		{
3398 			struct sctp_nets *net;
3399 
3400 			net = (struct sctp_nets *)data;
3401 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3402 			    (struct sockaddr *)&net->ro._l_addr, error);
3403 			break;
3404 		}
3405 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3406 		{
3407 			struct sctp_nets *net;
3408 
3409 			net = (struct sctp_nets *)data;
3410 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3411 			    (struct sockaddr *)&net->ro._l_addr, error);
3412 			break;
3413 		}
3414 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3415 		sctp_notify_send_failed2(stcb, error,
3416 		    (struct sctp_stream_queue_pending *)data, so_locked);
3417 		break;
3418 	case SCTP_NOTIFY_DG_FAIL:
3419 		sctp_notify_send_failed(stcb, error,
3420 		    (struct sctp_tmit_chunk *)data, so_locked);
3421 		break;
3422 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3423 		{
3424 			uint32_t val;
3425 
3426 			val = *((uint32_t *) data);
3427 
3428 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3429 			break;
3430 		}
3431 	case SCTP_NOTIFY_STRDATA_ERR:
3432 		break;
3433 	case SCTP_NOTIFY_ASSOC_ABORTED:
3434 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3435 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3436 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3437 		} else {
3438 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3439 		}
3440 		break;
3441 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3442 		break;
3443 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3444 		break;
3445 	case SCTP_NOTIFY_ASSOC_RESTART:
3446 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3447 		if (stcb->asoc.peer_supports_auth == 0) {
3448 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3449 			    NULL, so_locked);
3450 		}
3451 		break;
3452 	case SCTP_NOTIFY_HB_RESP:
3453 		break;
3454 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3455 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3456 		break;
3457 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3458 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3459 		break;
3460 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3461 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3462 		break;
3463 
3464 	case SCTP_NOTIFY_STR_RESET_SEND:
3465 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3466 		break;
3467 	case SCTP_NOTIFY_STR_RESET_RECV:
3468 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3469 		break;
3470 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3471 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3472 		break;
3473 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3474 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3475 		break;
3476 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3477 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3478 		    error);
3479 		break;
3480 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3481 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3482 		    error);
3483 		break;
3484 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3485 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3486 		    error);
3487 		break;
3488 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3489 		break;
3490 	case SCTP_NOTIFY_ASCONF_FAILED:
3491 		break;
3492 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3493 		sctp_notify_shutdown_event(stcb);
3494 		break;
3495 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3496 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3497 		    (uint16_t) (uintptr_t) data,
3498 		    so_locked);
3499 		break;
3500 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3501 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3502 		    (uint16_t) (uintptr_t) data,
3503 		    so_locked);
3504 		break;
3505 	case SCTP_NOTIFY_NO_PEER_AUTH:
3506 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3507 		    (uint16_t) (uintptr_t) data,
3508 		    so_locked);
3509 		break;
3510 	case SCTP_NOTIFY_SENDER_DRY:
3511 		sctp_notify_sender_dry_event(stcb, so_locked);
3512 		break;
3513 	default:
3514 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3515 		    __FUNCTION__, notification, notification);
3516 		break;
3517 	}			/* end switch */
3518 }
3519 
3520 void
3521 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3522 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3523     SCTP_UNUSED
3524 #endif
3525 )
3526 {
3527 	struct sctp_association *asoc;
3528 	struct sctp_stream_out *outs;
3529 	struct sctp_tmit_chunk *chk, *nchk;
3530 	struct sctp_stream_queue_pending *sp, *nsp;
3531 	int i;
3532 
3533 	if (stcb == NULL) {
3534 		return;
3535 	}
3536 	asoc = &stcb->asoc;
3537 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3538 		/* already being freed */
3539 		return;
3540 	}
3541 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3542 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3543 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3544 		return;
3545 	}
3546 	/* now through all the gunk freeing chunks */
3547 	if (holds_lock == 0) {
3548 		SCTP_TCB_SEND_LOCK(stcb);
3549 	}
3550 	/* sent queue SHOULD be empty */
3551 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3552 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3553 		asoc->sent_queue_cnt--;
3554 		if (chk->data != NULL) {
3555 			sctp_free_bufspace(stcb, asoc, chk, 1);
3556 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3557 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3558 			if (chk->data) {
3559 				sctp_m_freem(chk->data);
3560 				chk->data = NULL;
3561 			}
3562 		}
3563 		sctp_free_a_chunk(stcb, chk, so_locked);
3564 		/* sa_ignore FREED_MEMORY */
3565 	}
3566 	/* pending send queue SHOULD be empty */
3567 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3568 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3569 		asoc->send_queue_cnt--;
3570 		if (chk->data != NULL) {
3571 			sctp_free_bufspace(stcb, asoc, chk, 1);
3572 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3573 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3574 			if (chk->data) {
3575 				sctp_m_freem(chk->data);
3576 				chk->data = NULL;
3577 			}
3578 		}
3579 		sctp_free_a_chunk(stcb, chk, so_locked);
3580 		/* sa_ignore FREED_MEMORY */
3581 	}
3582 	for (i = 0; i < asoc->streamoutcnt; i++) {
3583 		/* For each stream */
3584 		outs = &asoc->strmout[i];
3585 		/* clean up any sends there */
3586 		asoc->locked_on_sending = NULL;
3587 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3588 			asoc->stream_queue_cnt--;
3589 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3590 			sctp_free_spbufspace(stcb, asoc, sp);
3591 			if (sp->data) {
3592 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3593 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3594 				if (sp->data) {
3595 					sctp_m_freem(sp->data);
3596 					sp->data = NULL;
3597 				}
3598 			}
3599 			if (sp->net) {
3600 				sctp_free_remote_addr(sp->net);
3601 				sp->net = NULL;
3602 			}
3603 			/* Free the chunk */
3604 			sctp_free_a_strmoq(stcb, sp, so_locked);
3605 			/* sa_ignore FREED_MEMORY */
3606 		}
3607 	}
3608 
3609 	if (holds_lock == 0) {
3610 		SCTP_TCB_SEND_UNLOCK(stcb);
3611 	}
3612 }
3613 
3614 void
3615 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3616 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3617     SCTP_UNUSED
3618 #endif
3619 )
3620 {
3621 	if (stcb == NULL) {
3622 		return;
3623 	}
3624 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3625 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3626 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3627 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3628 	}
3629 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3630 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3631 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3632 		return;
3633 	}
3634 	/* Tell them we lost the asoc */
3635 	sctp_report_all_outbound(stcb, 1, so_locked);
3636 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3637 }
3638 
3639 void
3640 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3641     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3642     uint32_t vrf_id, uint16_t port)
3643 {
3644 	uint32_t vtag;
3645 
3646 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3647 	struct socket *so;
3648 
3649 #endif
3650 
3651 	vtag = 0;
3652 	if (stcb != NULL) {
3653 		/* We have a TCB to abort, send notification too */
3654 		vtag = stcb->asoc.peer_vtag;
3655 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3656 		/* get the assoc vrf id and table id */
3657 		vrf_id = stcb->asoc.vrf_id;
3658 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3659 	}
3660 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3661 	if (stcb != NULL) {
3662 		/* Ok, now lets free it */
3663 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3664 		so = SCTP_INP_SO(inp);
3665 		atomic_add_int(&stcb->asoc.refcnt, 1);
3666 		SCTP_TCB_UNLOCK(stcb);
3667 		SCTP_SOCKET_LOCK(so, 1);
3668 		SCTP_TCB_LOCK(stcb);
3669 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3670 #endif
3671 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3672 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3673 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3674 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3675 		}
3676 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3677 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3678 		SCTP_SOCKET_UNLOCK(so, 1);
3679 #endif
3680 	}
3681 }
3682 
3683 #ifdef SCTP_ASOCLOG_OF_TSNS
3684 void
3685 sctp_print_out_track_log(struct sctp_tcb *stcb)
3686 {
3687 #ifdef NOSIY_PRINTS
3688 	int i;
3689 
3690 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3691 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3692 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3693 		SCTP_PRINTF("None rcvd\n");
3694 		goto none_in;
3695 	}
3696 	if (stcb->asoc.tsn_in_wrapped) {
3697 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3698 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3699 			    stcb->asoc.in_tsnlog[i].tsn,
3700 			    stcb->asoc.in_tsnlog[i].strm,
3701 			    stcb->asoc.in_tsnlog[i].seq,
3702 			    stcb->asoc.in_tsnlog[i].flgs,
3703 			    stcb->asoc.in_tsnlog[i].sz);
3704 		}
3705 	}
3706 	if (stcb->asoc.tsn_in_at) {
3707 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3708 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3709 			    stcb->asoc.in_tsnlog[i].tsn,
3710 			    stcb->asoc.in_tsnlog[i].strm,
3711 			    stcb->asoc.in_tsnlog[i].seq,
3712 			    stcb->asoc.in_tsnlog[i].flgs,
3713 			    stcb->asoc.in_tsnlog[i].sz);
3714 		}
3715 	}
3716 none_in:
3717 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3718 	if ((stcb->asoc.tsn_out_at == 0) &&
3719 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3720 		SCTP_PRINTF("None sent\n");
3721 	}
3722 	if (stcb->asoc.tsn_out_wrapped) {
3723 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3724 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3725 			    stcb->asoc.out_tsnlog[i].tsn,
3726 			    stcb->asoc.out_tsnlog[i].strm,
3727 			    stcb->asoc.out_tsnlog[i].seq,
3728 			    stcb->asoc.out_tsnlog[i].flgs,
3729 			    stcb->asoc.out_tsnlog[i].sz);
3730 		}
3731 	}
3732 	if (stcb->asoc.tsn_out_at) {
3733 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3734 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3735 			    stcb->asoc.out_tsnlog[i].tsn,
3736 			    stcb->asoc.out_tsnlog[i].strm,
3737 			    stcb->asoc.out_tsnlog[i].seq,
3738 			    stcb->asoc.out_tsnlog[i].flgs,
3739 			    stcb->asoc.out_tsnlog[i].sz);
3740 		}
3741 	}
3742 #endif
3743 }
3744 
3745 #endif
3746 
3747 void
3748 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3749     int error, struct mbuf *op_err,
3750     int so_locked
3751 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3752     SCTP_UNUSED
3753 #endif
3754 )
3755 {
3756 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3757 	struct socket *so;
3758 
3759 #endif
3760 
3761 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3762 	so = SCTP_INP_SO(inp);
3763 #endif
3764 	if (stcb == NULL) {
3765 		/* Got to have a TCB */
3766 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3767 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3768 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3769 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3770 			}
3771 		}
3772 		return;
3773 	} else {
3774 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3775 	}
3776 	/* notify the ulp */
3777 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3778 		sctp_abort_notification(stcb, error, so_locked);
3779 	/* notify the peer */
3780 #if defined(SCTP_PANIC_ON_ABORT)
3781 	panic("aborting an association");
3782 #endif
3783 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3784 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3785 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3786 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3787 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3788 	}
3789 	/* now free the asoc */
3790 #ifdef SCTP_ASOCLOG_OF_TSNS
3791 	sctp_print_out_track_log(stcb);
3792 #endif
3793 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3794 	if (!so_locked) {
3795 		atomic_add_int(&stcb->asoc.refcnt, 1);
3796 		SCTP_TCB_UNLOCK(stcb);
3797 		SCTP_SOCKET_LOCK(so, 1);
3798 		SCTP_TCB_LOCK(stcb);
3799 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3800 	}
3801 #endif
3802 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3803 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3804 	if (!so_locked) {
3805 		SCTP_SOCKET_UNLOCK(so, 1);
3806 	}
3807 #endif
3808 }
3809 
3810 void
3811 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3812     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3813 {
3814 	struct sctp_chunkhdr *ch, chunk_buf;
3815 	unsigned int chk_length;
3816 
3817 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3818 	/* Generate a TO address for future reference */
3819 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3820 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3821 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3822 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3823 		}
3824 	}
3825 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3826 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3827 	while (ch != NULL) {
3828 		chk_length = ntohs(ch->chunk_length);
3829 		if (chk_length < sizeof(*ch)) {
3830 			/* break to abort land */
3831 			break;
3832 		}
3833 		switch (ch->chunk_type) {
3834 		case SCTP_COOKIE_ECHO:
3835 			/* We hit here only if the assoc is being freed */
3836 			return;
3837 		case SCTP_PACKET_DROPPED:
3838 			/* we don't respond to pkt-dropped */
3839 			return;
3840 		case SCTP_ABORT_ASSOCIATION:
3841 			/* we don't respond with an ABORT to an ABORT */
3842 			return;
3843 		case SCTP_SHUTDOWN_COMPLETE:
3844 			/*
3845 			 * we ignore it since we are not waiting for it and
3846 			 * peer is gone
3847 			 */
3848 			return;
3849 		case SCTP_SHUTDOWN_ACK:
3850 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
3851 			return;
3852 		default:
3853 			break;
3854 		}
3855 		offset += SCTP_SIZE32(chk_length);
3856 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3857 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3858 	}
3859 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
3860 }
3861 
3862 /*
3863  * check the inbound datagram to make sure there is not an abort inside it,
3864  * if there is return 1, else return 0.
3865  */
3866 int
3867 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3868 {
3869 	struct sctp_chunkhdr *ch;
3870 	struct sctp_init_chunk *init_chk, chunk_buf;
3871 	int offset;
3872 	unsigned int chk_length;
3873 
3874 	offset = iphlen + sizeof(struct sctphdr);
3875 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3876 	    (uint8_t *) & chunk_buf);
3877 	while (ch != NULL) {
3878 		chk_length = ntohs(ch->chunk_length);
3879 		if (chk_length < sizeof(*ch)) {
3880 			/* packet is probably corrupt */
3881 			break;
3882 		}
3883 		/* we seem to be ok, is it an abort? */
3884 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3885 			/* yep, tell them */
3886 			return (1);
3887 		}
3888 		if (ch->chunk_type == SCTP_INITIATION) {
3889 			/* need to update the Vtag */
3890 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
3891 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
3892 			if (init_chk != NULL) {
3893 				*vtagfill = ntohl(init_chk->init.initiate_tag);
3894 			}
3895 		}
3896 		/* Nope, move to the next chunk */
3897 		offset += SCTP_SIZE32(chk_length);
3898 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3899 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3900 	}
3901 	return (0);
3902 }
3903 
3904 /*
3905  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
3906  * set (i.e. it's 0) so, create this function to compare link local scopes
3907  */
3908 #ifdef INET6
3909 uint32_t
3910 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
3911 {
3912 	struct sockaddr_in6 a, b;
3913 
3914 	/* save copies */
3915 	a = *addr1;
3916 	b = *addr2;
3917 
3918 	if (a.sin6_scope_id == 0)
3919 		if (sa6_recoverscope(&a)) {
3920 			/* can't get scope, so can't match */
3921 			return (0);
3922 		}
3923 	if (b.sin6_scope_id == 0)
3924 		if (sa6_recoverscope(&b)) {
3925 			/* can't get scope, so can't match */
3926 			return (0);
3927 		}
3928 	if (a.sin6_scope_id != b.sin6_scope_id)
3929 		return (0);
3930 
3931 	return (1);
3932 }
3933 
3934 /*
3935  * returns a sockaddr_in6 with embedded scope recovered and removed
3936  */
3937 struct sockaddr_in6 *
3938 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
3939 {
3940 	/* check and strip embedded scope junk */
3941 	if (addr->sin6_family == AF_INET6) {
3942 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
3943 			if (addr->sin6_scope_id == 0) {
3944 				*store = *addr;
3945 				if (!sa6_recoverscope(store)) {
3946 					/* use the recovered scope */
3947 					addr = store;
3948 				}
3949 			} else {
3950 				/* else, return the original "to" addr */
3951 				in6_clearscope(&addr->sin6_addr);
3952 			}
3953 		}
3954 	}
3955 	return (addr);
3956 }
3957 
3958 #endif
3959 
3960 /*
3961  * are the two addresses the same?  currently a "scopeless" check returns: 1
3962  * if same, 0 if not
3963  */
3964 int
3965 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
3966 {
3967 
3968 	/* must be valid */
3969 	if (sa1 == NULL || sa2 == NULL)
3970 		return (0);
3971 
3972 	/* must be the same family */
3973 	if (sa1->sa_family != sa2->sa_family)
3974 		return (0);
3975 
3976 	switch (sa1->sa_family) {
3977 #ifdef INET6
3978 	case AF_INET6:
3979 		{
3980 			/* IPv6 addresses */
3981 			struct sockaddr_in6 *sin6_1, *sin6_2;
3982 
3983 			sin6_1 = (struct sockaddr_in6 *)sa1;
3984 			sin6_2 = (struct sockaddr_in6 *)sa2;
3985 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
3986 			    sin6_2));
3987 		}
3988 #endif
3989 #ifdef INET
3990 	case AF_INET:
3991 		{
3992 			/* IPv4 addresses */
3993 			struct sockaddr_in *sin_1, *sin_2;
3994 
3995 			sin_1 = (struct sockaddr_in *)sa1;
3996 			sin_2 = (struct sockaddr_in *)sa2;
3997 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
3998 		}
3999 #endif
4000 	default:
4001 		/* we don't do these... */
4002 		return (0);
4003 	}
4004 }
4005 
4006 void
4007 sctp_print_address(struct sockaddr *sa)
4008 {
4009 #ifdef INET6
4010 	char ip6buf[INET6_ADDRSTRLEN];
4011 
4012 	ip6buf[0] = 0;
4013 #endif
4014 
4015 	switch (sa->sa_family) {
4016 #ifdef INET6
4017 	case AF_INET6:
4018 		{
4019 			struct sockaddr_in6 *sin6;
4020 
4021 			sin6 = (struct sockaddr_in6 *)sa;
4022 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4023 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4024 			    ntohs(sin6->sin6_port),
4025 			    sin6->sin6_scope_id);
4026 			break;
4027 		}
4028 #endif
4029 #ifdef INET
4030 	case AF_INET:
4031 		{
4032 			struct sockaddr_in *sin;
4033 			unsigned char *p;
4034 
4035 			sin = (struct sockaddr_in *)sa;
4036 			p = (unsigned char *)&sin->sin_addr;
4037 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4038 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4039 			break;
4040 		}
4041 #endif
4042 	default:
4043 		SCTP_PRINTF("?\n");
4044 		break;
4045 	}
4046 }
4047 
4048 void
4049 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4050 {
4051 	switch (iph->ip_v) {
4052 #ifdef INET
4053 	case IPVERSION:
4054 		{
4055 			struct sockaddr_in lsa, fsa;
4056 
4057 			bzero(&lsa, sizeof(lsa));
4058 			lsa.sin_len = sizeof(lsa);
4059 			lsa.sin_family = AF_INET;
4060 			lsa.sin_addr = iph->ip_src;
4061 			lsa.sin_port = sh->src_port;
4062 			bzero(&fsa, sizeof(fsa));
4063 			fsa.sin_len = sizeof(fsa);
4064 			fsa.sin_family = AF_INET;
4065 			fsa.sin_addr = iph->ip_dst;
4066 			fsa.sin_port = sh->dest_port;
4067 			SCTP_PRINTF("src: ");
4068 			sctp_print_address((struct sockaddr *)&lsa);
4069 			SCTP_PRINTF("dest: ");
4070 			sctp_print_address((struct sockaddr *)&fsa);
4071 			break;
4072 		}
4073 #endif
4074 #ifdef INET6
4075 	case IPV6_VERSION >> 4:
4076 		{
4077 			struct ip6_hdr *ip6;
4078 			struct sockaddr_in6 lsa6, fsa6;
4079 
4080 			ip6 = (struct ip6_hdr *)iph;
4081 			bzero(&lsa6, sizeof(lsa6));
4082 			lsa6.sin6_len = sizeof(lsa6);
4083 			lsa6.sin6_family = AF_INET6;
4084 			lsa6.sin6_addr = ip6->ip6_src;
4085 			lsa6.sin6_port = sh->src_port;
4086 			bzero(&fsa6, sizeof(fsa6));
4087 			fsa6.sin6_len = sizeof(fsa6);
4088 			fsa6.sin6_family = AF_INET6;
4089 			fsa6.sin6_addr = ip6->ip6_dst;
4090 			fsa6.sin6_port = sh->dest_port;
4091 			SCTP_PRINTF("src: ");
4092 			sctp_print_address((struct sockaddr *)&lsa6);
4093 			SCTP_PRINTF("dest: ");
4094 			sctp_print_address((struct sockaddr *)&fsa6);
4095 			break;
4096 		}
4097 #endif
4098 	default:
4099 		/* TSNH */
4100 		break;
4101 	}
4102 }
4103 
4104 void
4105 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4106     struct sctp_inpcb *new_inp,
4107     struct sctp_tcb *stcb,
4108     int waitflags)
4109 {
4110 	/*
4111 	 * go through our old INP and pull off any control structures that
4112 	 * belong to stcb and move then to the new inp.
4113 	 */
4114 	struct socket *old_so, *new_so;
4115 	struct sctp_queued_to_read *control, *nctl;
4116 	struct sctp_readhead tmp_queue;
4117 	struct mbuf *m;
4118 	int error = 0;
4119 
4120 	old_so = old_inp->sctp_socket;
4121 	new_so = new_inp->sctp_socket;
4122 	TAILQ_INIT(&tmp_queue);
4123 	error = sblock(&old_so->so_rcv, waitflags);
4124 	if (error) {
4125 		/*
4126 		 * Gak, can't get sblock, we have a problem. data will be
4127 		 * left stranded.. and we don't dare look at it since the
4128 		 * other thread may be reading something. Oh well, its a
4129 		 * screwed up app that does a peeloff OR a accept while
4130 		 * reading from the main socket... actually its only the
4131 		 * peeloff() case, since I think read will fail on a
4132 		 * listening socket..
4133 		 */
4134 		return;
4135 	}
4136 	/* lock the socket buffers */
4137 	SCTP_INP_READ_LOCK(old_inp);
4138 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4139 		/* Pull off all for out target stcb */
4140 		if (control->stcb == stcb) {
4141 			/* remove it we want it */
4142 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4143 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4144 			m = control->data;
4145 			while (m) {
4146 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4147 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4148 				}
4149 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4150 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4151 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4152 				}
4153 				m = SCTP_BUF_NEXT(m);
4154 			}
4155 		}
4156 	}
4157 	SCTP_INP_READ_UNLOCK(old_inp);
4158 	/* Remove the sb-lock on the old socket */
4159 
4160 	sbunlock(&old_so->so_rcv);
4161 	/* Now we move them over to the new socket buffer */
4162 	SCTP_INP_READ_LOCK(new_inp);
4163 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4164 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4165 		m = control->data;
4166 		while (m) {
4167 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4168 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4169 			}
4170 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4171 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4172 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4173 			}
4174 			m = SCTP_BUF_NEXT(m);
4175 		}
4176 	}
4177 	SCTP_INP_READ_UNLOCK(new_inp);
4178 }
4179 
4180 void
4181 sctp_add_to_readq(struct sctp_inpcb *inp,
4182     struct sctp_tcb *stcb,
4183     struct sctp_queued_to_read *control,
4184     struct sockbuf *sb,
4185     int end,
4186     int inp_read_lock_held,
4187     int so_locked
4188 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4189     SCTP_UNUSED
4190 #endif
4191 )
4192 {
4193 	/*
4194 	 * Here we must place the control on the end of the socket read
4195 	 * queue AND increment sb_cc so that select will work properly on
4196 	 * read.
4197 	 */
4198 	struct mbuf *m, *prev = NULL;
4199 
4200 	if (inp == NULL) {
4201 		/* Gak, TSNH!! */
4202 #ifdef INVARIANTS
4203 		panic("Gak, inp NULL on add_to_readq");
4204 #endif
4205 		return;
4206 	}
4207 	if (inp_read_lock_held == 0)
4208 		SCTP_INP_READ_LOCK(inp);
4209 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4210 		sctp_free_remote_addr(control->whoFrom);
4211 		if (control->data) {
4212 			sctp_m_freem(control->data);
4213 			control->data = NULL;
4214 		}
4215 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4216 		if (inp_read_lock_held == 0)
4217 			SCTP_INP_READ_UNLOCK(inp);
4218 		return;
4219 	}
4220 	if (!(control->spec_flags & M_NOTIFICATION)) {
4221 		atomic_add_int(&inp->total_recvs, 1);
4222 		if (!control->do_not_ref_stcb) {
4223 			atomic_add_int(&stcb->total_recvs, 1);
4224 		}
4225 	}
4226 	m = control->data;
4227 	control->held_length = 0;
4228 	control->length = 0;
4229 	while (m) {
4230 		if (SCTP_BUF_LEN(m) == 0) {
4231 			/* Skip mbufs with NO length */
4232 			if (prev == NULL) {
4233 				/* First one */
4234 				control->data = sctp_m_free(m);
4235 				m = control->data;
4236 			} else {
4237 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4238 				m = SCTP_BUF_NEXT(prev);
4239 			}
4240 			if (m == NULL) {
4241 				control->tail_mbuf = prev;
4242 			}
4243 			continue;
4244 		}
4245 		prev = m;
4246 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4247 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4248 		}
4249 		sctp_sballoc(stcb, sb, m);
4250 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4251 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4252 		}
4253 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4254 		m = SCTP_BUF_NEXT(m);
4255 	}
4256 	if (prev != NULL) {
4257 		control->tail_mbuf = prev;
4258 	} else {
4259 		/* Everything got collapsed out?? */
4260 		sctp_free_remote_addr(control->whoFrom);
4261 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4262 		if (inp_read_lock_held == 0)
4263 			SCTP_INP_READ_UNLOCK(inp);
4264 		return;
4265 	}
4266 	if (end) {
4267 		control->end_added = 1;
4268 	}
4269 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4270 	if (inp_read_lock_held == 0)
4271 		SCTP_INP_READ_UNLOCK(inp);
4272 	if (inp && inp->sctp_socket) {
4273 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4274 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4275 		} else {
4276 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4277 			struct socket *so;
4278 
4279 			so = SCTP_INP_SO(inp);
4280 			if (!so_locked) {
4281 				atomic_add_int(&stcb->asoc.refcnt, 1);
4282 				SCTP_TCB_UNLOCK(stcb);
4283 				SCTP_SOCKET_LOCK(so, 1);
4284 				SCTP_TCB_LOCK(stcb);
4285 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4286 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4287 					SCTP_SOCKET_UNLOCK(so, 1);
4288 					return;
4289 				}
4290 			}
4291 #endif
4292 			sctp_sorwakeup(inp, inp->sctp_socket);
4293 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4294 			if (!so_locked) {
4295 				SCTP_SOCKET_UNLOCK(so, 1);
4296 			}
4297 #endif
4298 		}
4299 	}
4300 }
4301 
4302 
4303 int
4304 sctp_append_to_readq(struct sctp_inpcb *inp,
4305     struct sctp_tcb *stcb,
4306     struct sctp_queued_to_read *control,
4307     struct mbuf *m,
4308     int end,
4309     int ctls_cumack,
4310     struct sockbuf *sb)
4311 {
4312 	/*
4313 	 * A partial delivery API event is underway. OR we are appending on
4314 	 * the reassembly queue.
4315 	 *
4316 	 * If PDAPI this means we need to add m to the end of the data.
4317 	 * Increase the length in the control AND increment the sb_cc.
4318 	 * Otherwise sb is NULL and all we need to do is put it at the end
4319 	 * of the mbuf chain.
4320 	 */
4321 	int len = 0;
4322 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4323 
4324 	if (inp) {
4325 		SCTP_INP_READ_LOCK(inp);
4326 	}
4327 	if (control == NULL) {
4328 get_out:
4329 		if (inp) {
4330 			SCTP_INP_READ_UNLOCK(inp);
4331 		}
4332 		return (-1);
4333 	}
4334 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4335 		SCTP_INP_READ_UNLOCK(inp);
4336 		return 0;
4337 	}
4338 	if (control->end_added) {
4339 		/* huh this one is complete? */
4340 		goto get_out;
4341 	}
4342 	mm = m;
4343 	if (mm == NULL) {
4344 		goto get_out;
4345 	}
4346 	while (mm) {
4347 		if (SCTP_BUF_LEN(mm) == 0) {
4348 			/* Skip mbufs with NO lenght */
4349 			if (prev == NULL) {
4350 				/* First one */
4351 				m = sctp_m_free(mm);
4352 				mm = m;
4353 			} else {
4354 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4355 				mm = SCTP_BUF_NEXT(prev);
4356 			}
4357 			continue;
4358 		}
4359 		prev = mm;
4360 		len += SCTP_BUF_LEN(mm);
4361 		if (sb) {
4362 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4363 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4364 			}
4365 			sctp_sballoc(stcb, sb, mm);
4366 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4367 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4368 			}
4369 		}
4370 		mm = SCTP_BUF_NEXT(mm);
4371 	}
4372 	if (prev) {
4373 		tail = prev;
4374 	} else {
4375 		/* Really there should always be a prev */
4376 		if (m == NULL) {
4377 			/* Huh nothing left? */
4378 #ifdef INVARIANTS
4379 			panic("Nothing left to add?");
4380 #else
4381 			goto get_out;
4382 #endif
4383 		}
4384 		tail = m;
4385 	}
4386 	if (control->tail_mbuf) {
4387 		/* append */
4388 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4389 		control->tail_mbuf = tail;
4390 	} else {
4391 		/* nothing there */
4392 #ifdef INVARIANTS
4393 		if (control->data != NULL) {
4394 			panic("This should NOT happen");
4395 		}
4396 #endif
4397 		control->data = m;
4398 		control->tail_mbuf = tail;
4399 	}
4400 	atomic_add_int(&control->length, len);
4401 	if (end) {
4402 		/* message is complete */
4403 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4404 			stcb->asoc.control_pdapi = NULL;
4405 		}
4406 		control->held_length = 0;
4407 		control->end_added = 1;
4408 	}
4409 	if (stcb == NULL) {
4410 		control->do_not_ref_stcb = 1;
4411 	}
4412 	/*
4413 	 * When we are appending in partial delivery, the cum-ack is used
4414 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4415 	 * is populated in the outbound sinfo structure from the true cumack
4416 	 * if the association exists...
4417 	 */
4418 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4419 	if (inp) {
4420 		SCTP_INP_READ_UNLOCK(inp);
4421 	}
4422 	if (inp && inp->sctp_socket) {
4423 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4424 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4425 		} else {
4426 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4427 			struct socket *so;
4428 
4429 			so = SCTP_INP_SO(inp);
4430 			atomic_add_int(&stcb->asoc.refcnt, 1);
4431 			SCTP_TCB_UNLOCK(stcb);
4432 			SCTP_SOCKET_LOCK(so, 1);
4433 			SCTP_TCB_LOCK(stcb);
4434 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4435 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4436 				SCTP_SOCKET_UNLOCK(so, 1);
4437 				return (0);
4438 			}
4439 #endif
4440 			sctp_sorwakeup(inp, inp->sctp_socket);
4441 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4442 			SCTP_SOCKET_UNLOCK(so, 1);
4443 #endif
4444 		}
4445 	}
4446 	return (0);
4447 }
4448 
4449 
4450 
4451 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4452  *************ALTERNATE ROUTING CODE
4453  */
4454 
4455 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4456  *************ALTERNATE ROUTING CODE
4457  */
4458 
4459 struct mbuf *
4460 sctp_generate_invmanparam(int err)
4461 {
4462 	/* Return a MBUF with a invalid mandatory parameter */
4463 	struct mbuf *m;
4464 
4465 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4466 	if (m) {
4467 		struct sctp_paramhdr *ph;
4468 
4469 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4470 		ph = mtod(m, struct sctp_paramhdr *);
4471 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4472 		ph->param_type = htons(err);
4473 	}
4474 	return (m);
4475 }
4476 
4477 #ifdef SCTP_MBCNT_LOGGING
4478 void
4479 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4480     struct sctp_tmit_chunk *tp1, int chk_cnt)
4481 {
4482 	if (tp1->data == NULL) {
4483 		return;
4484 	}
4485 	asoc->chunks_on_out_queue -= chk_cnt;
4486 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4487 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4488 		    asoc->total_output_queue_size,
4489 		    tp1->book_size,
4490 		    0,
4491 		    tp1->mbcnt);
4492 	}
4493 	if (asoc->total_output_queue_size >= tp1->book_size) {
4494 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4495 	} else {
4496 		asoc->total_output_queue_size = 0;
4497 	}
4498 
4499 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4500 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4501 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4502 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4503 		} else {
4504 			stcb->sctp_socket->so_snd.sb_cc = 0;
4505 
4506 		}
4507 	}
4508 }
4509 
4510 #endif
4511 
4512 int
4513 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4514     int reason, int so_locked
4515 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4516     SCTP_UNUSED
4517 #endif
4518 )
4519 {
4520 	struct sctp_stream_out *strq;
4521 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4522 	struct sctp_stream_queue_pending *sp;
4523 	uint16_t stream = 0, seq = 0;
4524 	uint8_t foundeom = 0;
4525 	int ret_sz = 0;
4526 	int notdone;
4527 	int do_wakeup_routine = 0;
4528 
4529 	stream = tp1->rec.data.stream_number;
4530 	seq = tp1->rec.data.stream_seq;
4531 	do {
4532 		ret_sz += tp1->book_size;
4533 		if (tp1->data != NULL) {
4534 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4535 				sctp_flight_size_decrease(tp1);
4536 				sctp_total_flight_decrease(stcb, tp1);
4537 			}
4538 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4539 			stcb->asoc.peers_rwnd += tp1->send_size;
4540 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4541 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4542 			if (tp1->data) {
4543 				sctp_m_freem(tp1->data);
4544 				tp1->data = NULL;
4545 			}
4546 			do_wakeup_routine = 1;
4547 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4548 				stcb->asoc.sent_queue_cnt_removeable--;
4549 			}
4550 		}
4551 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4552 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4553 		    SCTP_DATA_NOT_FRAG) {
4554 			/* not frag'ed we ae done   */
4555 			notdone = 0;
4556 			foundeom = 1;
4557 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4558 			/* end of frag, we are done */
4559 			notdone = 0;
4560 			foundeom = 1;
4561 		} else {
4562 			/*
4563 			 * Its a begin or middle piece, we must mark all of
4564 			 * it
4565 			 */
4566 			notdone = 1;
4567 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4568 		}
4569 	} while (tp1 && notdone);
4570 	if (foundeom == 0) {
4571 		/*
4572 		 * The multi-part message was scattered across the send and
4573 		 * sent queue.
4574 		 */
4575 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4576 			if ((tp1->rec.data.stream_number != stream) ||
4577 			    (tp1->rec.data.stream_seq != seq)) {
4578 				break;
4579 			}
4580 			/*
4581 			 * save to chk in case we have some on stream out
4582 			 * queue. If so and we have an un-transmitted one we
4583 			 * don't have to fudge the TSN.
4584 			 */
4585 			chk = tp1;
4586 			ret_sz += tp1->book_size;
4587 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4588 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4589 			if (tp1->data) {
4590 				sctp_m_freem(tp1->data);
4591 				tp1->data = NULL;
4592 			}
4593 			/* No flight involved here book the size to 0 */
4594 			tp1->book_size = 0;
4595 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4596 				foundeom = 1;
4597 			}
4598 			do_wakeup_routine = 1;
4599 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4600 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4601 			/*
4602 			 * on to the sent queue so we can wait for it to be
4603 			 * passed by.
4604 			 */
4605 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4606 			    sctp_next);
4607 			stcb->asoc.send_queue_cnt--;
4608 			stcb->asoc.sent_queue_cnt++;
4609 		}
4610 	}
4611 	if (foundeom == 0) {
4612 		/*
4613 		 * Still no eom found. That means there is stuff left on the
4614 		 * stream out queue.. yuck.
4615 		 */
4616 		strq = &stcb->asoc.strmout[stream];
4617 		SCTP_TCB_SEND_LOCK(stcb);
4618 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4619 			/* FIXME: Shouldn't this be a serial number check? */
4620 			if (sp->strseq > seq) {
4621 				break;
4622 			}
4623 			/* Check if its our SEQ */
4624 			if (sp->strseq == seq) {
4625 				sp->discard_rest = 1;
4626 				/*
4627 				 * We may need to put a chunk on the queue
4628 				 * that holds the TSN that would have been
4629 				 * sent with the LAST bit.
4630 				 */
4631 				if (chk == NULL) {
4632 					/* Yep, we have to */
4633 					sctp_alloc_a_chunk(stcb, chk);
4634 					if (chk == NULL) {
4635 						/*
4636 						 * we are hosed. All we can
4637 						 * do is nothing.. which
4638 						 * will cause an abort if
4639 						 * the peer is paying
4640 						 * attention.
4641 						 */
4642 						goto oh_well;
4643 					}
4644 					memset(chk, 0, sizeof(*chk));
4645 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4646 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4647 					chk->asoc = &stcb->asoc;
4648 					chk->rec.data.stream_seq = sp->strseq;
4649 					chk->rec.data.stream_number = sp->stream;
4650 					chk->rec.data.payloadtype = sp->ppid;
4651 					chk->rec.data.context = sp->context;
4652 					chk->flags = sp->act_flags;
4653 					if (sp->net)
4654 						chk->whoTo = sp->net;
4655 					else
4656 						chk->whoTo = stcb->asoc.primary_destination;
4657 					atomic_add_int(&chk->whoTo->ref_count, 1);
4658 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4659 					stcb->asoc.pr_sctp_cnt++;
4660 					chk->pr_sctp_on = 1;
4661 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4662 					stcb->asoc.sent_queue_cnt++;
4663 					stcb->asoc.pr_sctp_cnt++;
4664 				} else {
4665 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4666 				}
4667 		oh_well:
4668 				if (sp->data) {
4669 					/*
4670 					 * Pull any data to free up the SB
4671 					 * and allow sender to "add more"
4672 					 * whilc we will throw away :-)
4673 					 */
4674 					sctp_free_spbufspace(stcb, &stcb->asoc,
4675 					    sp);
4676 					ret_sz += sp->length;
4677 					do_wakeup_routine = 1;
4678 					sp->some_taken = 1;
4679 					sctp_m_freem(sp->data);
4680 					sp->length = 0;
4681 					sp->data = NULL;
4682 					sp->tail_mbuf = NULL;
4683 				}
4684 				break;
4685 			}
4686 		}		/* End tailq_foreach */
4687 		SCTP_TCB_SEND_UNLOCK(stcb);
4688 	}
4689 	if (do_wakeup_routine) {
4690 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4691 		struct socket *so;
4692 
4693 		so = SCTP_INP_SO(stcb->sctp_ep);
4694 		if (!so_locked) {
4695 			atomic_add_int(&stcb->asoc.refcnt, 1);
4696 			SCTP_TCB_UNLOCK(stcb);
4697 			SCTP_SOCKET_LOCK(so, 1);
4698 			SCTP_TCB_LOCK(stcb);
4699 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4700 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4701 				/* assoc was freed while we were unlocked */
4702 				SCTP_SOCKET_UNLOCK(so, 1);
4703 				return (ret_sz);
4704 			}
4705 		}
4706 #endif
4707 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4708 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4709 		if (!so_locked) {
4710 			SCTP_SOCKET_UNLOCK(so, 1);
4711 		}
4712 #endif
4713 	}
4714 	return (ret_sz);
4715 }
4716 
4717 /*
4718  * checks to see if the given address, sa, is one that is currently known by
4719  * the kernel note: can't distinguish the same address on multiple interfaces
4720  * and doesn't handle multiple addresses with different zone/scope id's note:
4721  * ifa_ifwithaddr() compares the entire sockaddr struct
4722  */
4723 struct sctp_ifa *
4724 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4725     int holds_lock)
4726 {
4727 	struct sctp_laddr *laddr;
4728 
4729 	if (holds_lock == 0) {
4730 		SCTP_INP_RLOCK(inp);
4731 	}
4732 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4733 		if (laddr->ifa == NULL)
4734 			continue;
4735 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4736 			continue;
4737 #ifdef INET
4738 		if (addr->sa_family == AF_INET) {
4739 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4740 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4741 				/* found him. */
4742 				if (holds_lock == 0) {
4743 					SCTP_INP_RUNLOCK(inp);
4744 				}
4745 				return (laddr->ifa);
4746 				break;
4747 			}
4748 		}
4749 #endif
4750 #ifdef INET6
4751 		if (addr->sa_family == AF_INET6) {
4752 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4753 			    &laddr->ifa->address.sin6)) {
4754 				/* found him. */
4755 				if (holds_lock == 0) {
4756 					SCTP_INP_RUNLOCK(inp);
4757 				}
4758 				return (laddr->ifa);
4759 				break;
4760 			}
4761 		}
4762 #endif
4763 	}
4764 	if (holds_lock == 0) {
4765 		SCTP_INP_RUNLOCK(inp);
4766 	}
4767 	return (NULL);
4768 }
4769 
4770 uint32_t
4771 sctp_get_ifa_hash_val(struct sockaddr *addr)
4772 {
4773 	switch (addr->sa_family) {
4774 #ifdef INET
4775 	case AF_INET:
4776 		{
4777 			struct sockaddr_in *sin;
4778 
4779 			sin = (struct sockaddr_in *)addr;
4780 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4781 		}
4782 #endif
4783 #ifdef INET6
4784 	case INET6:
4785 		{
4786 			struct sockaddr_in6 *sin6;
4787 			uint32_t hash_of_addr;
4788 
4789 			sin6 = (struct sockaddr_in6 *)addr;
4790 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4791 			    sin6->sin6_addr.s6_addr32[1] +
4792 			    sin6->sin6_addr.s6_addr32[2] +
4793 			    sin6->sin6_addr.s6_addr32[3]);
4794 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4795 			return (hash_of_addr);
4796 		}
4797 #endif
4798 	default:
4799 		break;
4800 	}
4801 	return (0);
4802 }
4803 
4804 struct sctp_ifa *
4805 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4806 {
4807 	struct sctp_ifa *sctp_ifap;
4808 	struct sctp_vrf *vrf;
4809 	struct sctp_ifalist *hash_head;
4810 	uint32_t hash_of_addr;
4811 
4812 	if (holds_lock == 0)
4813 		SCTP_IPI_ADDR_RLOCK();
4814 
4815 	vrf = sctp_find_vrf(vrf_id);
4816 	if (vrf == NULL) {
4817 stage_right:
4818 		if (holds_lock == 0)
4819 			SCTP_IPI_ADDR_RUNLOCK();
4820 		return (NULL);
4821 	}
4822 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4823 
4824 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4825 	if (hash_head == NULL) {
4826 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4827 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4828 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4829 		sctp_print_address(addr);
4830 		SCTP_PRINTF("No such bucket for address\n");
4831 		if (holds_lock == 0)
4832 			SCTP_IPI_ADDR_RUNLOCK();
4833 
4834 		return (NULL);
4835 	}
4836 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4837 		if (sctp_ifap == NULL) {
4838 #ifdef INVARIANTS
4839 			panic("Huh LIST_FOREACH corrupt");
4840 			goto stage_right;
4841 #else
4842 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4843 			goto stage_right;
4844 #endif
4845 		}
4846 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4847 			continue;
4848 #ifdef INET
4849 		if (addr->sa_family == AF_INET) {
4850 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4851 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4852 				/* found him. */
4853 				if (holds_lock == 0)
4854 					SCTP_IPI_ADDR_RUNLOCK();
4855 				return (sctp_ifap);
4856 				break;
4857 			}
4858 		}
4859 #endif
4860 #ifdef INET6
4861 		if (addr->sa_family == AF_INET6) {
4862 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4863 			    &sctp_ifap->address.sin6)) {
4864 				/* found him. */
4865 				if (holds_lock == 0)
4866 					SCTP_IPI_ADDR_RUNLOCK();
4867 				return (sctp_ifap);
4868 				break;
4869 			}
4870 		}
4871 #endif
4872 	}
4873 	if (holds_lock == 0)
4874 		SCTP_IPI_ADDR_RUNLOCK();
4875 	return (NULL);
4876 }
4877 
4878 static void
4879 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4880     uint32_t rwnd_req)
4881 {
4882 	/* User pulled some data, do we need a rwnd update? */
4883 	int r_unlocked = 0;
4884 	uint32_t dif, rwnd;
4885 	struct socket *so = NULL;
4886 
4887 	if (stcb == NULL)
4888 		return;
4889 
4890 	atomic_add_int(&stcb->asoc.refcnt, 1);
4891 
4892 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4893 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4894 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4895 		/* Pre-check If we are freeing no update */
4896 		goto no_lock;
4897 	}
4898 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4899 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4900 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4901 		goto out;
4902 	}
4903 	so = stcb->sctp_socket;
4904 	if (so == NULL) {
4905 		goto out;
4906 	}
4907 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4908 	/* Have you have freed enough to look */
4909 	*freed_so_far = 0;
4910 	/* Yep, its worth a look and the lock overhead */
4911 
4912 	/* Figure out what the rwnd would be */
4913 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4914 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4915 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4916 	} else {
4917 		dif = 0;
4918 	}
4919 	if (dif >= rwnd_req) {
4920 		if (hold_rlock) {
4921 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4922 			r_unlocked = 1;
4923 		}
4924 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4925 			/*
4926 			 * One last check before we allow the guy possibly
4927 			 * to get in. There is a race, where the guy has not
4928 			 * reached the gate. In that case
4929 			 */
4930 			goto out;
4931 		}
4932 		SCTP_TCB_LOCK(stcb);
4933 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4934 			/* No reports here */
4935 			SCTP_TCB_UNLOCK(stcb);
4936 			goto out;
4937 		}
4938 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4939 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
4940 
4941 		sctp_chunk_output(stcb->sctp_ep, stcb,
4942 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4943 		/* make sure no timer is running */
4944 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4945 		SCTP_TCB_UNLOCK(stcb);
4946 	} else {
4947 		/* Update how much we have pending */
4948 		stcb->freed_by_sorcv_sincelast = dif;
4949 	}
4950 out:
4951 	if (so && r_unlocked && hold_rlock) {
4952 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4953 	}
4954 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4955 no_lock:
4956 	atomic_add_int(&stcb->asoc.refcnt, -1);
4957 	return;
4958 }
4959 
4960 int
4961 sctp_sorecvmsg(struct socket *so,
4962     struct uio *uio,
4963     struct mbuf **mp,
4964     struct sockaddr *from,
4965     int fromlen,
4966     int *msg_flags,
4967     struct sctp_sndrcvinfo *sinfo,
4968     int filling_sinfo)
4969 {
4970 	/*
4971 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4972 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4973 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4974 	 * On the way out we may send out any combination of:
4975 	 * MSG_NOTIFICATION MSG_EOR
4976 	 *
4977 	 */
4978 	struct sctp_inpcb *inp = NULL;
4979 	int my_len = 0;
4980 	int cp_len = 0, error = 0;
4981 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4982 	struct mbuf *m = NULL;
4983 	struct sctp_tcb *stcb = NULL;
4984 	int wakeup_read_socket = 0;
4985 	int freecnt_applied = 0;
4986 	int out_flags = 0, in_flags = 0;
4987 	int block_allowed = 1;
4988 	uint32_t freed_so_far = 0;
4989 	uint32_t copied_so_far = 0;
4990 	int in_eeor_mode = 0;
4991 	int no_rcv_needed = 0;
4992 	uint32_t rwnd_req = 0;
4993 	int hold_sblock = 0;
4994 	int hold_rlock = 0;
4995 	int slen = 0;
4996 	uint32_t held_length = 0;
4997 	int sockbuf_lock = 0;
4998 
4999 	if (uio == NULL) {
5000 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5001 		return (EINVAL);
5002 	}
5003 	if (msg_flags) {
5004 		in_flags = *msg_flags;
5005 		if (in_flags & MSG_PEEK)
5006 			SCTP_STAT_INCR(sctps_read_peeks);
5007 	} else {
5008 		in_flags = 0;
5009 	}
5010 	slen = uio->uio_resid;
5011 
5012 	/* Pull in and set up our int flags */
5013 	if (in_flags & MSG_OOB) {
5014 		/* Out of band's NOT supported */
5015 		return (EOPNOTSUPP);
5016 	}
5017 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5018 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5019 		return (EINVAL);
5020 	}
5021 	if ((in_flags & (MSG_DONTWAIT
5022 	    | MSG_NBIO
5023 	    )) ||
5024 	    SCTP_SO_IS_NBIO(so)) {
5025 		block_allowed = 0;
5026 	}
5027 	/* setup the endpoint */
5028 	inp = (struct sctp_inpcb *)so->so_pcb;
5029 	if (inp == NULL) {
5030 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5031 		return (EFAULT);
5032 	}
5033 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5034 	/* Must be at least a MTU's worth */
5035 	if (rwnd_req < SCTP_MIN_RWND)
5036 		rwnd_req = SCTP_MIN_RWND;
5037 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5038 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5039 		sctp_misc_ints(SCTP_SORECV_ENTER,
5040 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5041 	}
5042 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5043 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5044 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5045 	}
5046 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5047 	sockbuf_lock = 1;
5048 	if (error) {
5049 		goto release_unlocked;
5050 	}
5051 restart:
5052 
5053 
5054 restart_nosblocks:
5055 	if (hold_sblock == 0) {
5056 		SOCKBUF_LOCK(&so->so_rcv);
5057 		hold_sblock = 1;
5058 	}
5059 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5060 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5061 		goto out;
5062 	}
5063 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5064 		if (so->so_error) {
5065 			error = so->so_error;
5066 			if ((in_flags & MSG_PEEK) == 0)
5067 				so->so_error = 0;
5068 			goto out;
5069 		} else {
5070 			if (so->so_rcv.sb_cc == 0) {
5071 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5072 				/* indicate EOF */
5073 				error = 0;
5074 				goto out;
5075 			}
5076 		}
5077 	}
5078 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5079 		/* we need to wait for data */
5080 		if ((so->so_rcv.sb_cc == 0) &&
5081 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5082 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5083 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5084 				/*
5085 				 * For active open side clear flags for
5086 				 * re-use passive open is blocked by
5087 				 * connect.
5088 				 */
5089 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5090 					/*
5091 					 * You were aborted, passive side
5092 					 * always hits here
5093 					 */
5094 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5095 					error = ECONNRESET;
5096 				}
5097 				so->so_state &= ~(SS_ISCONNECTING |
5098 				    SS_ISDISCONNECTING |
5099 				    SS_ISCONFIRMING |
5100 				    SS_ISCONNECTED);
5101 				if (error == 0) {
5102 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5103 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5104 						error = ENOTCONN;
5105 					}
5106 				}
5107 				goto out;
5108 			}
5109 		}
5110 		error = sbwait(&so->so_rcv);
5111 		if (error) {
5112 			goto out;
5113 		}
5114 		held_length = 0;
5115 		goto restart_nosblocks;
5116 	} else if (so->so_rcv.sb_cc == 0) {
5117 		if (so->so_error) {
5118 			error = so->so_error;
5119 			if ((in_flags & MSG_PEEK) == 0)
5120 				so->so_error = 0;
5121 		} else {
5122 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5123 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5124 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5125 					/*
5126 					 * For active open side clear flags
5127 					 * for re-use passive open is
5128 					 * blocked by connect.
5129 					 */
5130 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5131 						/*
5132 						 * You were aborted, passive
5133 						 * side always hits here
5134 						 */
5135 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5136 						error = ECONNRESET;
5137 					}
5138 					so->so_state &= ~(SS_ISCONNECTING |
5139 					    SS_ISDISCONNECTING |
5140 					    SS_ISCONFIRMING |
5141 					    SS_ISCONNECTED);
5142 					if (error == 0) {
5143 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5144 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5145 							error = ENOTCONN;
5146 						}
5147 					}
5148 					goto out;
5149 				}
5150 			}
5151 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5152 			error = EWOULDBLOCK;
5153 		}
5154 		goto out;
5155 	}
5156 	if (hold_sblock == 1) {
5157 		SOCKBUF_UNLOCK(&so->so_rcv);
5158 		hold_sblock = 0;
5159 	}
5160 	/* we possibly have data we can read */
5161 	/* sa_ignore FREED_MEMORY */
5162 	control = TAILQ_FIRST(&inp->read_queue);
5163 	if (control == NULL) {
5164 		/*
5165 		 * This could be happening since the appender did the
5166 		 * increment but as not yet did the tailq insert onto the
5167 		 * read_queue
5168 		 */
5169 		if (hold_rlock == 0) {
5170 			SCTP_INP_READ_LOCK(inp);
5171 			hold_rlock = 1;
5172 		}
5173 		control = TAILQ_FIRST(&inp->read_queue);
5174 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5175 #ifdef INVARIANTS
5176 			panic("Huh, its non zero and nothing on control?");
5177 #endif
5178 			so->so_rcv.sb_cc = 0;
5179 		}
5180 		SCTP_INP_READ_UNLOCK(inp);
5181 		hold_rlock = 0;
5182 		goto restart;
5183 	}
5184 	if ((control->length == 0) &&
5185 	    (control->do_not_ref_stcb)) {
5186 		/*
5187 		 * Clean up code for freeing assoc that left behind a
5188 		 * pdapi.. maybe a peer in EEOR that just closed after
5189 		 * sending and never indicated a EOR.
5190 		 */
5191 		if (hold_rlock == 0) {
5192 			hold_rlock = 1;
5193 			SCTP_INP_READ_LOCK(inp);
5194 		}
5195 		control->held_length = 0;
5196 		if (control->data) {
5197 			/* Hmm there is data here .. fix */
5198 			struct mbuf *m_tmp;
5199 			int cnt = 0;
5200 
5201 			m_tmp = control->data;
5202 			while (m_tmp) {
5203 				cnt += SCTP_BUF_LEN(m_tmp);
5204 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5205 					control->tail_mbuf = m_tmp;
5206 					control->end_added = 1;
5207 				}
5208 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5209 			}
5210 			control->length = cnt;
5211 		} else {
5212 			/* remove it */
5213 			TAILQ_REMOVE(&inp->read_queue, control, next);
5214 			/* Add back any hiddend data */
5215 			sctp_free_remote_addr(control->whoFrom);
5216 			sctp_free_a_readq(stcb, control);
5217 		}
5218 		if (hold_rlock) {
5219 			hold_rlock = 0;
5220 			SCTP_INP_READ_UNLOCK(inp);
5221 		}
5222 		goto restart;
5223 	}
5224 	if ((control->length == 0) &&
5225 	    (control->end_added == 1)) {
5226 		/*
5227 		 * Do we also need to check for (control->pdapi_aborted ==
5228 		 * 1)?
5229 		 */
5230 		if (hold_rlock == 0) {
5231 			hold_rlock = 1;
5232 			SCTP_INP_READ_LOCK(inp);
5233 		}
5234 		TAILQ_REMOVE(&inp->read_queue, control, next);
5235 		if (control->data) {
5236 #ifdef INVARIANTS
5237 			panic("control->data not null but control->length == 0");
5238 #else
5239 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5240 			sctp_m_freem(control->data);
5241 			control->data = NULL;
5242 #endif
5243 		}
5244 		if (control->aux_data) {
5245 			sctp_m_free(control->aux_data);
5246 			control->aux_data = NULL;
5247 		}
5248 		sctp_free_remote_addr(control->whoFrom);
5249 		sctp_free_a_readq(stcb, control);
5250 		if (hold_rlock) {
5251 			hold_rlock = 0;
5252 			SCTP_INP_READ_UNLOCK(inp);
5253 		}
5254 		goto restart;
5255 	}
5256 	if (control->length == 0) {
5257 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5258 		    (filling_sinfo)) {
5259 			/* find a more suitable one then this */
5260 			ctl = TAILQ_NEXT(control, next);
5261 			while (ctl) {
5262 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5263 				    (ctl->some_taken ||
5264 				    (ctl->spec_flags & M_NOTIFICATION) ||
5265 				    ((ctl->do_not_ref_stcb == 0) &&
5266 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5267 				    ) {
5268 					/*-
5269 					 * If we have a different TCB next, and there is data
5270 					 * present. If we have already taken some (pdapi), OR we can
5271 					 * ref the tcb and no delivery as started on this stream, we
5272 					 * take it. Note we allow a notification on a different
5273 					 * assoc to be delivered..
5274 					 */
5275 					control = ctl;
5276 					goto found_one;
5277 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5278 					    (ctl->length) &&
5279 					    ((ctl->some_taken) ||
5280 					    ((ctl->do_not_ref_stcb == 0) &&
5281 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5282 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5283 					/*-
5284 					 * If we have the same tcb, and there is data present, and we
5285 					 * have the strm interleave feature present. Then if we have
5286 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5287 					 * not started a delivery for this stream, we can take it.
5288 					 * Note we do NOT allow a notificaiton on the same assoc to
5289 					 * be delivered.
5290 					 */
5291 					control = ctl;
5292 					goto found_one;
5293 				}
5294 				ctl = TAILQ_NEXT(ctl, next);
5295 			}
5296 		}
5297 		/*
5298 		 * if we reach here, not suitable replacement is available
5299 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5300 		 * into the our held count, and its time to sleep again.
5301 		 */
5302 		held_length = so->so_rcv.sb_cc;
5303 		control->held_length = so->so_rcv.sb_cc;
5304 		goto restart;
5305 	}
5306 	/* Clear the held length since there is something to read */
5307 	control->held_length = 0;
5308 	if (hold_rlock) {
5309 		SCTP_INP_READ_UNLOCK(inp);
5310 		hold_rlock = 0;
5311 	}
5312 found_one:
5313 	/*
5314 	 * If we reach here, control has a some data for us to read off.
5315 	 * Note that stcb COULD be NULL.
5316 	 */
5317 	control->some_taken++;
5318 	if (hold_sblock) {
5319 		SOCKBUF_UNLOCK(&so->so_rcv);
5320 		hold_sblock = 0;
5321 	}
5322 	stcb = control->stcb;
5323 	if (stcb) {
5324 		if ((control->do_not_ref_stcb == 0) &&
5325 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5326 			if (freecnt_applied == 0)
5327 				stcb = NULL;
5328 		} else if (control->do_not_ref_stcb == 0) {
5329 			/* you can't free it on me please */
5330 			/*
5331 			 * The lock on the socket buffer protects us so the
5332 			 * free code will stop. But since we used the
5333 			 * socketbuf lock and the sender uses the tcb_lock
5334 			 * to increment, we need to use the atomic add to
5335 			 * the refcnt
5336 			 */
5337 			if (freecnt_applied) {
5338 #ifdef INVARIANTS
5339 				panic("refcnt already incremented");
5340 #else
5341 				printf("refcnt already incremented?\n");
5342 #endif
5343 			} else {
5344 				atomic_add_int(&stcb->asoc.refcnt, 1);
5345 				freecnt_applied = 1;
5346 			}
5347 			/*
5348 			 * Setup to remember how much we have not yet told
5349 			 * the peer our rwnd has opened up. Note we grab the
5350 			 * value from the tcb from last time. Note too that
5351 			 * sack sending clears this when a sack is sent,
5352 			 * which is fine. Once we hit the rwnd_req, we then
5353 			 * will go to the sctp_user_rcvd() that will not
5354 			 * lock until it KNOWs it MUST send a WUP-SACK.
5355 			 */
5356 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5357 			stcb->freed_by_sorcv_sincelast = 0;
5358 		}
5359 	}
5360 	if (stcb &&
5361 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5362 	    control->do_not_ref_stcb == 0) {
5363 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5364 	}
5365 	/* First lets get off the sinfo and sockaddr info */
5366 	if ((sinfo) && filling_sinfo) {
5367 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5368 		nxt = TAILQ_NEXT(control, next);
5369 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5370 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5371 			struct sctp_extrcvinfo *s_extra;
5372 
5373 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5374 			if ((nxt) &&
5375 			    (nxt->length)) {
5376 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5377 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5378 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5379 				}
5380 				if (nxt->spec_flags & M_NOTIFICATION) {
5381 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5382 				}
5383 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5384 				s_extra->sreinfo_next_length = nxt->length;
5385 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5386 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5387 				if (nxt->tail_mbuf != NULL) {
5388 					if (nxt->end_added) {
5389 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5390 					}
5391 				}
5392 			} else {
5393 				/*
5394 				 * we explicitly 0 this, since the memcpy
5395 				 * got some other things beyond the older
5396 				 * sinfo_ that is on the control's structure
5397 				 * :-D
5398 				 */
5399 				nxt = NULL;
5400 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5401 				s_extra->sreinfo_next_aid = 0;
5402 				s_extra->sreinfo_next_length = 0;
5403 				s_extra->sreinfo_next_ppid = 0;
5404 				s_extra->sreinfo_next_stream = 0;
5405 			}
5406 		}
5407 		/*
5408 		 * update off the real current cum-ack, if we have an stcb.
5409 		 */
5410 		if ((control->do_not_ref_stcb == 0) && stcb)
5411 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5412 		/*
5413 		 * mask off the high bits, we keep the actual chunk bits in
5414 		 * there.
5415 		 */
5416 		sinfo->sinfo_flags &= 0x00ff;
5417 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5418 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5419 		}
5420 	}
5421 #ifdef SCTP_ASOCLOG_OF_TSNS
5422 	{
5423 		int index, newindex;
5424 		struct sctp_pcbtsn_rlog *entry;
5425 
5426 		do {
5427 			index = inp->readlog_index;
5428 			newindex = index + 1;
5429 			if (newindex >= SCTP_READ_LOG_SIZE) {
5430 				newindex = 0;
5431 			}
5432 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5433 		entry = &inp->readlog[index];
5434 		entry->vtag = control->sinfo_assoc_id;
5435 		entry->strm = control->sinfo_stream;
5436 		entry->seq = control->sinfo_ssn;
5437 		entry->sz = control->length;
5438 		entry->flgs = control->sinfo_flags;
5439 	}
5440 #endif
5441 	if (fromlen && from) {
5442 		struct sockaddr *to;
5443 
5444 #ifdef INET
5445 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5446 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5447 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5448 #else
5449 		/* No AF_INET use AF_INET6 */
5450 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5451 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5452 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5453 #endif
5454 
5455 		to = from;
5456 #if defined(INET) && defined(INET6)
5457 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5458 		    (to->sa_family == AF_INET) &&
5459 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5460 			struct sockaddr_in *sin;
5461 			struct sockaddr_in6 sin6;
5462 
5463 			sin = (struct sockaddr_in *)to;
5464 			bzero(&sin6, sizeof(sin6));
5465 			sin6.sin6_family = AF_INET6;
5466 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5467 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5468 			bcopy(&sin->sin_addr,
5469 			    &sin6.sin6_addr.s6_addr32[3],
5470 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5471 			sin6.sin6_port = sin->sin_port;
5472 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5473 		}
5474 #endif
5475 #if defined(INET6)
5476 		{
5477 			struct sockaddr_in6 lsa6, *to6;
5478 
5479 			to6 = (struct sockaddr_in6 *)to;
5480 			sctp_recover_scope_mac(to6, (&lsa6));
5481 		}
5482 #endif
5483 	}
5484 	/* now copy out what data we can */
5485 	if (mp == NULL) {
5486 		/* copy out each mbuf in the chain up to length */
5487 get_more_data:
5488 		m = control->data;
5489 		while (m) {
5490 			/* Move out all we can */
5491 			cp_len = (int)uio->uio_resid;
5492 			my_len = (int)SCTP_BUF_LEN(m);
5493 			if (cp_len > my_len) {
5494 				/* not enough in this buf */
5495 				cp_len = my_len;
5496 			}
5497 			if (hold_rlock) {
5498 				SCTP_INP_READ_UNLOCK(inp);
5499 				hold_rlock = 0;
5500 			}
5501 			if (cp_len > 0)
5502 				error = uiomove(mtod(m, char *), cp_len, uio);
5503 			/* re-read */
5504 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5505 				goto release;
5506 			}
5507 			if ((control->do_not_ref_stcb == 0) && stcb &&
5508 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5509 				no_rcv_needed = 1;
5510 			}
5511 			if (error) {
5512 				/* error we are out of here */
5513 				goto release;
5514 			}
5515 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5516 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5517 			    ((control->end_added == 0) ||
5518 			    (control->end_added &&
5519 			    (TAILQ_NEXT(control, next) == NULL)))
5520 			    ) {
5521 				SCTP_INP_READ_LOCK(inp);
5522 				hold_rlock = 1;
5523 			}
5524 			if (cp_len == SCTP_BUF_LEN(m)) {
5525 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5526 				    (control->end_added)) {
5527 					out_flags |= MSG_EOR;
5528 					if ((control->do_not_ref_stcb == 0) &&
5529 					    (control->stcb != NULL) &&
5530 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5531 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5532 				}
5533 				if (control->spec_flags & M_NOTIFICATION) {
5534 					out_flags |= MSG_NOTIFICATION;
5535 				}
5536 				/* we ate up the mbuf */
5537 				if (in_flags & MSG_PEEK) {
5538 					/* just looking */
5539 					m = SCTP_BUF_NEXT(m);
5540 					copied_so_far += cp_len;
5541 				} else {
5542 					/* dispose of the mbuf */
5543 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5544 						sctp_sblog(&so->so_rcv,
5545 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5546 					}
5547 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5548 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5549 						sctp_sblog(&so->so_rcv,
5550 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5551 					}
5552 					copied_so_far += cp_len;
5553 					freed_so_far += cp_len;
5554 					freed_so_far += MSIZE;
5555 					atomic_subtract_int(&control->length, cp_len);
5556 					control->data = sctp_m_free(m);
5557 					m = control->data;
5558 					/*
5559 					 * been through it all, must hold sb
5560 					 * lock ok to null tail
5561 					 */
5562 					if (control->data == NULL) {
5563 #ifdef INVARIANTS
5564 						if ((control->end_added == 0) ||
5565 						    (TAILQ_NEXT(control, next) == NULL)) {
5566 							/*
5567 							 * If the end is not
5568 							 * added, OR the
5569 							 * next is NOT null
5570 							 * we MUST have the
5571 							 * lock.
5572 							 */
5573 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5574 								panic("Hmm we don't own the lock?");
5575 							}
5576 						}
5577 #endif
5578 						control->tail_mbuf = NULL;
5579 #ifdef INVARIANTS
5580 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5581 							panic("end_added, nothing left and no MSG_EOR");
5582 						}
5583 #endif
5584 					}
5585 				}
5586 			} else {
5587 				/* Do we need to trim the mbuf? */
5588 				if (control->spec_flags & M_NOTIFICATION) {
5589 					out_flags |= MSG_NOTIFICATION;
5590 				}
5591 				if ((in_flags & MSG_PEEK) == 0) {
5592 					SCTP_BUF_RESV_UF(m, cp_len);
5593 					SCTP_BUF_LEN(m) -= cp_len;
5594 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5595 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5596 					}
5597 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5598 					if ((control->do_not_ref_stcb == 0) &&
5599 					    stcb) {
5600 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5601 					}
5602 					copied_so_far += cp_len;
5603 					freed_so_far += cp_len;
5604 					freed_so_far += MSIZE;
5605 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5606 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5607 						    SCTP_LOG_SBRESULT, 0);
5608 					}
5609 					atomic_subtract_int(&control->length, cp_len);
5610 				} else {
5611 					copied_so_far += cp_len;
5612 				}
5613 			}
5614 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5615 				break;
5616 			}
5617 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5618 			    (control->do_not_ref_stcb == 0) &&
5619 			    (freed_so_far >= rwnd_req)) {
5620 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5621 			}
5622 		}		/* end while(m) */
5623 		/*
5624 		 * At this point we have looked at it all and we either have
5625 		 * a MSG_EOR/or read all the user wants... <OR>
5626 		 * control->length == 0.
5627 		 */
5628 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5629 			/* we are done with this control */
5630 			if (control->length == 0) {
5631 				if (control->data) {
5632 #ifdef INVARIANTS
5633 					panic("control->data not null at read eor?");
5634 #else
5635 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5636 					sctp_m_freem(control->data);
5637 					control->data = NULL;
5638 #endif
5639 				}
5640 		done_with_control:
5641 				if (TAILQ_NEXT(control, next) == NULL) {
5642 					/*
5643 					 * If we don't have a next we need a
5644 					 * lock, if there is a next
5645 					 * interrupt is filling ahead of us
5646 					 * and we don't need a lock to
5647 					 * remove this guy (which is the
5648 					 * head of the queue).
5649 					 */
5650 					if (hold_rlock == 0) {
5651 						SCTP_INP_READ_LOCK(inp);
5652 						hold_rlock = 1;
5653 					}
5654 				}
5655 				TAILQ_REMOVE(&inp->read_queue, control, next);
5656 				/* Add back any hiddend data */
5657 				if (control->held_length) {
5658 					held_length = 0;
5659 					control->held_length = 0;
5660 					wakeup_read_socket = 1;
5661 				}
5662 				if (control->aux_data) {
5663 					sctp_m_free(control->aux_data);
5664 					control->aux_data = NULL;
5665 				}
5666 				no_rcv_needed = control->do_not_ref_stcb;
5667 				sctp_free_remote_addr(control->whoFrom);
5668 				control->data = NULL;
5669 				sctp_free_a_readq(stcb, control);
5670 				control = NULL;
5671 				if ((freed_so_far >= rwnd_req) &&
5672 				    (no_rcv_needed == 0))
5673 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5674 
5675 			} else {
5676 				/*
5677 				 * The user did not read all of this
5678 				 * message, turn off the returned MSG_EOR
5679 				 * since we are leaving more behind on the
5680 				 * control to read.
5681 				 */
5682 #ifdef INVARIANTS
5683 				if (control->end_added &&
5684 				    (control->data == NULL) &&
5685 				    (control->tail_mbuf == NULL)) {
5686 					panic("Gak, control->length is corrupt?");
5687 				}
5688 #endif
5689 				no_rcv_needed = control->do_not_ref_stcb;
5690 				out_flags &= ~MSG_EOR;
5691 			}
5692 		}
5693 		if (out_flags & MSG_EOR) {
5694 			goto release;
5695 		}
5696 		if ((uio->uio_resid == 0) ||
5697 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5698 		    ) {
5699 			goto release;
5700 		}
5701 		/*
5702 		 * If I hit here the receiver wants more and this message is
5703 		 * NOT done (pd-api). So two questions. Can we block? if not
5704 		 * we are done. Did the user NOT set MSG_WAITALL?
5705 		 */
5706 		if (block_allowed == 0) {
5707 			goto release;
5708 		}
5709 		/*
5710 		 * We need to wait for more data a few things: - We don't
5711 		 * sbunlock() so we don't get someone else reading. - We
5712 		 * must be sure to account for the case where what is added
5713 		 * is NOT to our control when we wakeup.
5714 		 */
5715 
5716 		/*
5717 		 * Do we need to tell the transport a rwnd update might be
5718 		 * needed before we go to sleep?
5719 		 */
5720 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5721 		    ((freed_so_far >= rwnd_req) &&
5722 		    (control->do_not_ref_stcb == 0) &&
5723 		    (no_rcv_needed == 0))) {
5724 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5725 		}
5726 wait_some_more:
5727 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5728 			goto release;
5729 		}
5730 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5731 			goto release;
5732 
5733 		if (hold_rlock == 1) {
5734 			SCTP_INP_READ_UNLOCK(inp);
5735 			hold_rlock = 0;
5736 		}
5737 		if (hold_sblock == 0) {
5738 			SOCKBUF_LOCK(&so->so_rcv);
5739 			hold_sblock = 1;
5740 		}
5741 		if ((copied_so_far) && (control->length == 0) &&
5742 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5743 			goto release;
5744 		}
5745 		if (so->so_rcv.sb_cc <= control->held_length) {
5746 			error = sbwait(&so->so_rcv);
5747 			if (error) {
5748 				goto release;
5749 			}
5750 			control->held_length = 0;
5751 		}
5752 		if (hold_sblock) {
5753 			SOCKBUF_UNLOCK(&so->so_rcv);
5754 			hold_sblock = 0;
5755 		}
5756 		if (control->length == 0) {
5757 			/* still nothing here */
5758 			if (control->end_added == 1) {
5759 				/* he aborted, or is done i.e.did a shutdown */
5760 				out_flags |= MSG_EOR;
5761 				if (control->pdapi_aborted) {
5762 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5763 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5764 
5765 					out_flags |= MSG_TRUNC;
5766 				} else {
5767 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5768 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5769 				}
5770 				goto done_with_control;
5771 			}
5772 			if (so->so_rcv.sb_cc > held_length) {
5773 				control->held_length = so->so_rcv.sb_cc;
5774 				held_length = 0;
5775 			}
5776 			goto wait_some_more;
5777 		} else if (control->data == NULL) {
5778 			/*
5779 			 * we must re-sync since data is probably being
5780 			 * added
5781 			 */
5782 			SCTP_INP_READ_LOCK(inp);
5783 			if ((control->length > 0) && (control->data == NULL)) {
5784 				/*
5785 				 * big trouble.. we have the lock and its
5786 				 * corrupt?
5787 				 */
5788 #ifdef INVARIANTS
5789 				panic("Impossible data==NULL length !=0");
5790 #endif
5791 				out_flags |= MSG_EOR;
5792 				out_flags |= MSG_TRUNC;
5793 				control->length = 0;
5794 				SCTP_INP_READ_UNLOCK(inp);
5795 				goto done_with_control;
5796 			}
5797 			SCTP_INP_READ_UNLOCK(inp);
5798 			/* We will fall around to get more data */
5799 		}
5800 		goto get_more_data;
5801 	} else {
5802 		/*-
5803 		 * Give caller back the mbuf chain,
5804 		 * store in uio_resid the length
5805 		 */
5806 		wakeup_read_socket = 0;
5807 		if ((control->end_added == 0) ||
5808 		    (TAILQ_NEXT(control, next) == NULL)) {
5809 			/* Need to get rlock */
5810 			if (hold_rlock == 0) {
5811 				SCTP_INP_READ_LOCK(inp);
5812 				hold_rlock = 1;
5813 			}
5814 		}
5815 		if (control->end_added) {
5816 			out_flags |= MSG_EOR;
5817 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5818 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5819 		}
5820 		if (control->spec_flags & M_NOTIFICATION) {
5821 			out_flags |= MSG_NOTIFICATION;
5822 		}
5823 		uio->uio_resid = control->length;
5824 		*mp = control->data;
5825 		m = control->data;
5826 		while (m) {
5827 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5828 				sctp_sblog(&so->so_rcv,
5829 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5830 			}
5831 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5832 			freed_so_far += SCTP_BUF_LEN(m);
5833 			freed_so_far += MSIZE;
5834 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5835 				sctp_sblog(&so->so_rcv,
5836 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5837 			}
5838 			m = SCTP_BUF_NEXT(m);
5839 		}
5840 		control->data = control->tail_mbuf = NULL;
5841 		control->length = 0;
5842 		if (out_flags & MSG_EOR) {
5843 			/* Done with this control */
5844 			goto done_with_control;
5845 		}
5846 	}
5847 release:
5848 	if (hold_rlock == 1) {
5849 		SCTP_INP_READ_UNLOCK(inp);
5850 		hold_rlock = 0;
5851 	}
5852 	if (hold_sblock == 1) {
5853 		SOCKBUF_UNLOCK(&so->so_rcv);
5854 		hold_sblock = 0;
5855 	}
5856 	sbunlock(&so->so_rcv);
5857 	sockbuf_lock = 0;
5858 
5859 release_unlocked:
5860 	if (hold_sblock) {
5861 		SOCKBUF_UNLOCK(&so->so_rcv);
5862 		hold_sblock = 0;
5863 	}
5864 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5865 		if ((freed_so_far >= rwnd_req) &&
5866 		    (control && (control->do_not_ref_stcb == 0)) &&
5867 		    (no_rcv_needed == 0))
5868 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5869 	}
5870 out:
5871 	if (msg_flags) {
5872 		*msg_flags = out_flags;
5873 	}
5874 	if (((out_flags & MSG_EOR) == 0) &&
5875 	    ((in_flags & MSG_PEEK) == 0) &&
5876 	    (sinfo) &&
5877 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5878 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
5879 		struct sctp_extrcvinfo *s_extra;
5880 
5881 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5882 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5883 	}
5884 	if (hold_rlock == 1) {
5885 		SCTP_INP_READ_UNLOCK(inp);
5886 		hold_rlock = 0;
5887 	}
5888 	if (hold_sblock) {
5889 		SOCKBUF_UNLOCK(&so->so_rcv);
5890 		hold_sblock = 0;
5891 	}
5892 	if (sockbuf_lock) {
5893 		sbunlock(&so->so_rcv);
5894 	}
5895 	if (freecnt_applied) {
5896 		/*
5897 		 * The lock on the socket buffer protects us so the free
5898 		 * code will stop. But since we used the socketbuf lock and
5899 		 * the sender uses the tcb_lock to increment, we need to use
5900 		 * the atomic add to the refcnt.
5901 		 */
5902 		if (stcb == NULL) {
5903 #ifdef INVARIANTS
5904 			panic("stcb for refcnt has gone NULL?");
5905 			goto stage_left;
5906 #else
5907 			goto stage_left;
5908 #endif
5909 		}
5910 		atomic_add_int(&stcb->asoc.refcnt, -1);
5911 		freecnt_applied = 0;
5912 		/* Save the value back for next time */
5913 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5914 	}
5915 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5916 		if (stcb) {
5917 			sctp_misc_ints(SCTP_SORECV_DONE,
5918 			    freed_so_far,
5919 			    ((uio) ? (slen - uio->uio_resid) : slen),
5920 			    stcb->asoc.my_rwnd,
5921 			    so->so_rcv.sb_cc);
5922 		} else {
5923 			sctp_misc_ints(SCTP_SORECV_DONE,
5924 			    freed_so_far,
5925 			    ((uio) ? (slen - uio->uio_resid) : slen),
5926 			    0,
5927 			    so->so_rcv.sb_cc);
5928 		}
5929 	}
5930 stage_left:
5931 	if (wakeup_read_socket) {
5932 		sctp_sorwakeup(inp, so);
5933 	}
5934 	return (error);
5935 }
5936 
5937 
5938 #ifdef SCTP_MBUF_LOGGING
5939 struct mbuf *
5940 sctp_m_free(struct mbuf *m)
5941 {
5942 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5943 		if (SCTP_BUF_IS_EXTENDED(m)) {
5944 			sctp_log_mb(m, SCTP_MBUF_IFREE);
5945 		}
5946 	}
5947 	return (m_free(m));
5948 }
5949 
5950 void
5951 sctp_m_freem(struct mbuf *mb)
5952 {
5953 	while (mb != NULL)
5954 		mb = sctp_m_free(mb);
5955 }
5956 
5957 #endif
5958 
5959 int
5960 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5961 {
5962 	/*
5963 	 * Given a local address. For all associations that holds the
5964 	 * address, request a peer-set-primary.
5965 	 */
5966 	struct sctp_ifa *ifa;
5967 	struct sctp_laddr *wi;
5968 
5969 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5970 	if (ifa == NULL) {
5971 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
5972 		return (EADDRNOTAVAIL);
5973 	}
5974 	/*
5975 	 * Now that we have the ifa we must awaken the iterator with this
5976 	 * message.
5977 	 */
5978 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
5979 	if (wi == NULL) {
5980 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
5981 		return (ENOMEM);
5982 	}
5983 	/* Now incr the count and int wi structure */
5984 	SCTP_INCR_LADDR_COUNT();
5985 	bzero(wi, sizeof(*wi));
5986 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
5987 	wi->ifa = ifa;
5988 	wi->action = SCTP_SET_PRIM_ADDR;
5989 	atomic_add_int(&ifa->refcount, 1);
5990 
5991 	/* Now add it to the work queue */
5992 	SCTP_WQ_ADDR_LOCK();
5993 	/*
5994 	 * Should this really be a tailq? As it is we will process the
5995 	 * newest first :-0
5996 	 */
5997 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
5998 	SCTP_WQ_ADDR_UNLOCK();
5999 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6000 	    (struct sctp_inpcb *)NULL,
6001 	    (struct sctp_tcb *)NULL,
6002 	    (struct sctp_nets *)NULL);
6003 	return (0);
6004 }
6005 
6006 
6007 int
6008 sctp_soreceive(struct socket *so,
6009     struct sockaddr **psa,
6010     struct uio *uio,
6011     struct mbuf **mp0,
6012     struct mbuf **controlp,
6013     int *flagsp)
6014 {
6015 	int error, fromlen;
6016 	uint8_t sockbuf[256];
6017 	struct sockaddr *from;
6018 	struct sctp_extrcvinfo sinfo;
6019 	int filling_sinfo = 1;
6020 	struct sctp_inpcb *inp;
6021 
6022 	inp = (struct sctp_inpcb *)so->so_pcb;
6023 	/* pickup the assoc we are reading from */
6024 	if (inp == NULL) {
6025 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6026 		return (EINVAL);
6027 	}
6028 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6029 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6030 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6031 	    (controlp == NULL)) {
6032 		/* user does not want the sndrcv ctl */
6033 		filling_sinfo = 0;
6034 	}
6035 	if (psa) {
6036 		from = (struct sockaddr *)sockbuf;
6037 		fromlen = sizeof(sockbuf);
6038 		from->sa_len = 0;
6039 	} else {
6040 		from = NULL;
6041 		fromlen = 0;
6042 	}
6043 
6044 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6045 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6046 	if ((controlp) && (filling_sinfo)) {
6047 		/* copy back the sinfo in a CMSG format */
6048 		if (filling_sinfo)
6049 			*controlp = sctp_build_ctl_nchunk(inp,
6050 			    (struct sctp_sndrcvinfo *)&sinfo);
6051 		else
6052 			*controlp = NULL;
6053 	}
6054 	if (psa) {
6055 		/* copy back the address info */
6056 		if (from && from->sa_len) {
6057 			*psa = sodupsockaddr(from, M_NOWAIT);
6058 		} else {
6059 			*psa = NULL;
6060 		}
6061 	}
6062 	return (error);
6063 }
6064 
6065 
6066 
6067 
6068 
6069 int
6070 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6071     int totaddr, int *error)
6072 {
6073 	int added = 0;
6074 	int i;
6075 	struct sctp_inpcb *inp;
6076 	struct sockaddr *sa;
6077 	size_t incr = 0;
6078 
6079 #ifdef INET
6080 	struct sockaddr_in *sin;
6081 
6082 #endif
6083 #ifdef INET6
6084 	struct sockaddr_in6 *sin6;
6085 
6086 #endif
6087 
6088 	sa = addr;
6089 	inp = stcb->sctp_ep;
6090 	*error = 0;
6091 	for (i = 0; i < totaddr; i++) {
6092 		switch (sa->sa_family) {
6093 #ifdef INET
6094 		case AF_INET:
6095 			incr = sizeof(struct sockaddr_in);
6096 			sin = (struct sockaddr_in *)sa;
6097 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6098 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6099 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6100 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6101 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6102 				*error = EINVAL;
6103 				goto out_now;
6104 			}
6105 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6106 				/* assoc gone no un-lock */
6107 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6108 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6109 				*error = ENOBUFS;
6110 				goto out_now;
6111 			}
6112 			added++;
6113 			break;
6114 #endif
6115 #ifdef INET6
6116 		case AF_INET6:
6117 			incr = sizeof(struct sockaddr_in6);
6118 			sin6 = (struct sockaddr_in6 *)sa;
6119 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6120 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6121 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6122 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6123 				*error = EINVAL;
6124 				goto out_now;
6125 			}
6126 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6127 				/* assoc gone no un-lock */
6128 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6129 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6130 				*error = ENOBUFS;
6131 				goto out_now;
6132 			}
6133 			added++;
6134 			break;
6135 #endif
6136 		default:
6137 			break;
6138 		}
6139 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6140 	}
6141 out_now:
6142 	return (added);
6143 }
6144 
6145 struct sctp_tcb *
6146 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6147     int *totaddr, int *num_v4, int *num_v6, int *error,
6148     int limit, int *bad_addr)
6149 {
6150 	struct sockaddr *sa;
6151 	struct sctp_tcb *stcb = NULL;
6152 	size_t incr, at, i;
6153 
6154 	at = incr = 0;
6155 	sa = addr;
6156 
6157 	*error = *num_v6 = *num_v4 = 0;
6158 	/* account and validate addresses */
6159 	for (i = 0; i < (size_t)*totaddr; i++) {
6160 		switch (sa->sa_family) {
6161 #ifdef INET
6162 		case AF_INET:
6163 			(*num_v4) += 1;
6164 			incr = sizeof(struct sockaddr_in);
6165 			if (sa->sa_len != incr) {
6166 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6167 				*error = EINVAL;
6168 				*bad_addr = 1;
6169 				return (NULL);
6170 			}
6171 			break;
6172 #endif
6173 #ifdef INET6
6174 		case AF_INET6:
6175 			{
6176 				struct sockaddr_in6 *sin6;
6177 
6178 				sin6 = (struct sockaddr_in6 *)sa;
6179 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6180 					/* Must be non-mapped for connectx */
6181 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6182 					*error = EINVAL;
6183 					*bad_addr = 1;
6184 					return (NULL);
6185 				}
6186 				(*num_v6) += 1;
6187 				incr = sizeof(struct sockaddr_in6);
6188 				if (sa->sa_len != incr) {
6189 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6190 					*error = EINVAL;
6191 					*bad_addr = 1;
6192 					return (NULL);
6193 				}
6194 				break;
6195 			}
6196 #endif
6197 		default:
6198 			*totaddr = i;
6199 			/* we are done */
6200 			break;
6201 		}
6202 		if (i == (size_t)*totaddr) {
6203 			break;
6204 		}
6205 		SCTP_INP_INCR_REF(inp);
6206 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6207 		if (stcb != NULL) {
6208 			/* Already have or am bring up an association */
6209 			return (stcb);
6210 		} else {
6211 			SCTP_INP_DECR_REF(inp);
6212 		}
6213 		if ((at + incr) > (size_t)limit) {
6214 			*totaddr = i;
6215 			break;
6216 		}
6217 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6218 	}
6219 	return ((struct sctp_tcb *)NULL);
6220 }
6221 
6222 /*
6223  * sctp_bindx(ADD) for one address.
6224  * assumes all arguments are valid/checked by caller.
6225  */
6226 void
6227 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6228     struct sockaddr *sa, sctp_assoc_t assoc_id,
6229     uint32_t vrf_id, int *error, void *p)
6230 {
6231 	struct sockaddr *addr_touse;
6232 
6233 #ifdef INET6
6234 	struct sockaddr_in sin;
6235 
6236 #endif
6237 
6238 	/* see if we're bound all already! */
6239 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6240 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6241 		*error = EINVAL;
6242 		return;
6243 	}
6244 	addr_touse = sa;
6245 #ifdef INET6
6246 	if (sa->sa_family == AF_INET6) {
6247 		struct sockaddr_in6 *sin6;
6248 
6249 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6250 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6251 			*error = EINVAL;
6252 			return;
6253 		}
6254 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6255 			/* can only bind v6 on PF_INET6 sockets */
6256 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6257 			*error = EINVAL;
6258 			return;
6259 		}
6260 		sin6 = (struct sockaddr_in6 *)addr_touse;
6261 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6262 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6263 			    SCTP_IPV6_V6ONLY(inp)) {
6264 				/* can't bind v4-mapped on PF_INET sockets */
6265 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6266 				*error = EINVAL;
6267 				return;
6268 			}
6269 			in6_sin6_2_sin(&sin, sin6);
6270 			addr_touse = (struct sockaddr *)&sin;
6271 		}
6272 	}
6273 #endif
6274 #ifdef INET
6275 	if (sa->sa_family == AF_INET) {
6276 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6277 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6278 			*error = EINVAL;
6279 			return;
6280 		}
6281 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6282 		    SCTP_IPV6_V6ONLY(inp)) {
6283 			/* can't bind v4 on PF_INET sockets */
6284 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6285 			*error = EINVAL;
6286 			return;
6287 		}
6288 	}
6289 #endif
6290 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6291 		if (p == NULL) {
6292 			/* Can't get proc for Net/Open BSD */
6293 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6294 			*error = EINVAL;
6295 			return;
6296 		}
6297 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6298 		return;
6299 	}
6300 	/*
6301 	 * No locks required here since bind and mgmt_ep_sa all do their own
6302 	 * locking. If we do something for the FIX: below we may need to
6303 	 * lock in that case.
6304 	 */
6305 	if (assoc_id == 0) {
6306 		/* add the address */
6307 		struct sctp_inpcb *lep;
6308 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6309 
6310 		/* validate the incoming port */
6311 		if ((lsin->sin_port != 0) &&
6312 		    (lsin->sin_port != inp->sctp_lport)) {
6313 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6314 			*error = EINVAL;
6315 			return;
6316 		} else {
6317 			/* user specified 0 port, set it to existing port */
6318 			lsin->sin_port = inp->sctp_lport;
6319 		}
6320 
6321 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6322 		if (lep != NULL) {
6323 			/*
6324 			 * We must decrement the refcount since we have the
6325 			 * ep already and are binding. No remove going on
6326 			 * here.
6327 			 */
6328 			SCTP_INP_DECR_REF(lep);
6329 		}
6330 		if (lep == inp) {
6331 			/* already bound to it.. ok */
6332 			return;
6333 		} else if (lep == NULL) {
6334 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6335 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6336 			    SCTP_ADD_IP_ADDRESS,
6337 			    vrf_id, NULL);
6338 		} else {
6339 			*error = EADDRINUSE;
6340 		}
6341 		if (*error)
6342 			return;
6343 	} else {
6344 		/*
6345 		 * FIX: decide whether we allow assoc based bindx
6346 		 */
6347 	}
6348 }
6349 
6350 /*
6351  * sctp_bindx(DELETE) for one address.
6352  * assumes all arguments are valid/checked by caller.
6353  */
6354 void
6355 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6356     struct sockaddr *sa, sctp_assoc_t assoc_id,
6357     uint32_t vrf_id, int *error)
6358 {
6359 	struct sockaddr *addr_touse;
6360 
6361 #ifdef INET6
6362 	struct sockaddr_in sin;
6363 
6364 #endif
6365 
6366 	/* see if we're bound all already! */
6367 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6368 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6369 		*error = EINVAL;
6370 		return;
6371 	}
6372 	addr_touse = sa;
6373 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6374 	if (sa->sa_family == AF_INET6) {
6375 		struct sockaddr_in6 *sin6;
6376 
6377 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6378 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6379 			*error = EINVAL;
6380 			return;
6381 		}
6382 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6383 			/* can only bind v6 on PF_INET6 sockets */
6384 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6385 			*error = EINVAL;
6386 			return;
6387 		}
6388 		sin6 = (struct sockaddr_in6 *)addr_touse;
6389 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6390 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6391 			    SCTP_IPV6_V6ONLY(inp)) {
6392 				/* can't bind mapped-v4 on PF_INET sockets */
6393 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6394 				*error = EINVAL;
6395 				return;
6396 			}
6397 			in6_sin6_2_sin(&sin, sin6);
6398 			addr_touse = (struct sockaddr *)&sin;
6399 		}
6400 	}
6401 #endif
6402 #ifdef INET
6403 	if (sa->sa_family == AF_INET) {
6404 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6405 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6406 			*error = EINVAL;
6407 			return;
6408 		}
6409 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6410 		    SCTP_IPV6_V6ONLY(inp)) {
6411 			/* can't bind v4 on PF_INET sockets */
6412 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6413 			*error = EINVAL;
6414 			return;
6415 		}
6416 	}
6417 #endif
6418 	/*
6419 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6420 	 * below is ever changed we may need to lock before calling
6421 	 * association level binding.
6422 	 */
6423 	if (assoc_id == 0) {
6424 		/* delete the address */
6425 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6426 		    SCTP_DEL_IP_ADDRESS,
6427 		    vrf_id, NULL);
6428 	} else {
6429 		/*
6430 		 * FIX: decide whether we allow assoc based bindx
6431 		 */
6432 	}
6433 }
6434 
6435 /*
6436  * returns the valid local address count for an assoc, taking into account
6437  * all scoping rules
6438  */
6439 int
6440 sctp_local_addr_count(struct sctp_tcb *stcb)
6441 {
6442 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6443 	int ipv4_addr_legal, ipv6_addr_legal;
6444 	struct sctp_vrf *vrf;
6445 	struct sctp_ifn *sctp_ifn;
6446 	struct sctp_ifa *sctp_ifa;
6447 	int count = 0;
6448 
6449 	/* Turn on all the appropriate scopes */
6450 	loopback_scope = stcb->asoc.loopback_scope;
6451 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6452 	local_scope = stcb->asoc.local_scope;
6453 	site_scope = stcb->asoc.site_scope;
6454 	ipv4_addr_legal = ipv6_addr_legal = 0;
6455 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6456 		ipv6_addr_legal = 1;
6457 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6458 			ipv4_addr_legal = 1;
6459 		}
6460 	} else {
6461 		ipv4_addr_legal = 1;
6462 	}
6463 
6464 	SCTP_IPI_ADDR_RLOCK();
6465 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6466 	if (vrf == NULL) {
6467 		/* no vrf, no addresses */
6468 		SCTP_IPI_ADDR_RUNLOCK();
6469 		return (0);
6470 	}
6471 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6472 		/*
6473 		 * bound all case: go through all ifns on the vrf
6474 		 */
6475 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6476 			if ((loopback_scope == 0) &&
6477 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6478 				continue;
6479 			}
6480 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6481 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6482 					continue;
6483 				switch (sctp_ifa->address.sa.sa_family) {
6484 #ifdef INET
6485 				case AF_INET:
6486 					if (ipv4_addr_legal) {
6487 						struct sockaddr_in *sin;
6488 
6489 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6490 						if (sin->sin_addr.s_addr == 0) {
6491 							/*
6492 							 * skip unspecified
6493 							 * addrs
6494 							 */
6495 							continue;
6496 						}
6497 						if ((ipv4_local_scope == 0) &&
6498 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6499 							continue;
6500 						}
6501 						/* count this one */
6502 						count++;
6503 					} else {
6504 						continue;
6505 					}
6506 					break;
6507 #endif
6508 #ifdef INET6
6509 				case AF_INET6:
6510 					if (ipv6_addr_legal) {
6511 						struct sockaddr_in6 *sin6;
6512 
6513 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6514 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6515 							continue;
6516 						}
6517 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6518 							if (local_scope == 0)
6519 								continue;
6520 							if (sin6->sin6_scope_id == 0) {
6521 								if (sa6_recoverscope(sin6) != 0)
6522 									/*
6523 									 *
6524 									 * bad
6525 									 *
6526 									 * li
6527 									 * nk
6528 									 *
6529 									 * loc
6530 									 * al
6531 									 *
6532 									 * add
6533 									 * re
6534 									 * ss
6535 									 * */
6536 									continue;
6537 							}
6538 						}
6539 						if ((site_scope == 0) &&
6540 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6541 							continue;
6542 						}
6543 						/* count this one */
6544 						count++;
6545 					}
6546 					break;
6547 #endif
6548 				default:
6549 					/* TSNH */
6550 					break;
6551 				}
6552 			}
6553 		}
6554 	} else {
6555 		/*
6556 		 * subset bound case
6557 		 */
6558 		struct sctp_laddr *laddr;
6559 
6560 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6561 		    sctp_nxt_addr) {
6562 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6563 				continue;
6564 			}
6565 			/* count this one */
6566 			count++;
6567 		}
6568 	}
6569 	SCTP_IPI_ADDR_RUNLOCK();
6570 	return (count);
6571 }
6572 
6573 #if defined(SCTP_LOCAL_TRACE_BUF)
6574 
6575 void
6576 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6577 {
6578 	uint32_t saveindex, newindex;
6579 
6580 	do {
6581 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6582 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6583 			newindex = 1;
6584 		} else {
6585 			newindex = saveindex + 1;
6586 		}
6587 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6588 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6589 		saveindex = 0;
6590 	}
6591 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6592 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6593 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6594 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6595 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6596 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6597 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6598 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6599 }
6600 
6601 #endif
6602 /* XXX: Remove the #ifdef after tunneling over IPv6 works also on FreeBSD. */
6603 #ifdef INET
6604 /* We will need to add support
6605  * to bind the ports and such here
6606  * so we can do UDP tunneling. In
6607  * the mean-time, we return error
6608  */
6609 #include <netinet/udp.h>
6610 #include <netinet/udp_var.h>
6611 #include <sys/proc.h>
6612 #ifdef INET6
6613 #include <netinet6/sctp6_var.h>
6614 #endif
6615 
6616 static void
6617 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6618 {
6619 	struct ip *iph;
6620 	struct mbuf *sp, *last;
6621 	struct udphdr *uhdr;
6622 	uint16_t port = 0;
6623 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6624 
6625 	/*
6626 	 * Split out the mbuf chain. Leave the IP header in m, place the
6627 	 * rest in the sp.
6628 	 */
6629 	if ((m->m_flags & M_PKTHDR) == 0) {
6630 		/* Can't handle one that is not a pkt hdr */
6631 		goto out;
6632 	}
6633 	/* pull the src port */
6634 	iph = mtod(m, struct ip *);
6635 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6636 
6637 	port = uhdr->uh_sport;
6638 	sp = m_split(m, off, M_DONTWAIT);
6639 	if (sp == NULL) {
6640 		/* Gak, drop packet, we can't do a split */
6641 		goto out;
6642 	}
6643 	if (sp->m_pkthdr.len < header_size) {
6644 		/* Gak, packet can't have an SCTP header in it - to small */
6645 		m_freem(sp);
6646 		goto out;
6647 	}
6648 	/* ok now pull up the UDP header and SCTP header together */
6649 	sp = m_pullup(sp, header_size);
6650 	if (sp == NULL) {
6651 		/* Gak pullup failed */
6652 		goto out;
6653 	}
6654 	/* trim out the UDP header */
6655 	m_adj(sp, sizeof(struct udphdr));
6656 
6657 	/* Now reconstruct the mbuf chain */
6658 	/* 1) find last one */
6659 	last = m;
6660 	while (last->m_next != NULL) {
6661 		last = last->m_next;
6662 	}
6663 	last->m_next = sp;
6664 	m->m_pkthdr.len += sp->m_pkthdr.len;
6665 	last = m;
6666 	while (last != NULL) {
6667 		last = last->m_next;
6668 	}
6669 	/* Now its ready for sctp_input or sctp6_input */
6670 	iph = mtod(m, struct ip *);
6671 	switch (iph->ip_v) {
6672 #ifdef INET
6673 	case IPVERSION:
6674 		{
6675 			uint16_t len;
6676 
6677 			/* its IPv4 */
6678 			len = SCTP_GET_IPV4_LENGTH(iph);
6679 			len -= sizeof(struct udphdr);
6680 			SCTP_GET_IPV4_LENGTH(iph) = len;
6681 			sctp_input_with_port(m, off, port);
6682 			break;
6683 		}
6684 #endif
6685 #ifdef INET6
6686 	case IPV6_VERSION >> 4:
6687 		{
6688 			/* its IPv6 - NOT supported */
6689 			goto out;
6690 			break;
6691 
6692 		}
6693 #endif
6694 	default:
6695 		{
6696 			m_freem(m);
6697 			break;
6698 		}
6699 	}
6700 	return;
6701 out:
6702 	m_freem(m);
6703 }
6704 
6705 void
6706 sctp_over_udp_stop(void)
6707 {
6708 	struct socket *sop;
6709 
6710 	/*
6711 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6712 	 * for writting!
6713 	 */
6714 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6715 		/* Nothing to do */
6716 		return;
6717 	}
6718 	sop = SCTP_BASE_INFO(udp_tun_socket);
6719 	soclose(sop);
6720 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6721 }
6722 
6723 int
6724 sctp_over_udp_start(void)
6725 {
6726 	uint16_t port;
6727 	int ret;
6728 	struct sockaddr_in sin;
6729 	struct socket *sop = NULL;
6730 	struct thread *th;
6731 	struct ucred *cred;
6732 
6733 	/*
6734 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6735 	 * for writting!
6736 	 */
6737 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6738 	if (port == 0) {
6739 		/* Must have a port set */
6740 		return (EINVAL);
6741 	}
6742 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6743 		/* Already running -- must stop first */
6744 		return (EALREADY);
6745 	}
6746 	th = curthread;
6747 	cred = th->td_ucred;
6748 	if ((ret = socreate(PF_INET, &sop,
6749 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6750 		return (ret);
6751 	}
6752 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6753 	/* call the special UDP hook */
6754 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6755 	if (ret) {
6756 		goto exit_stage_left;
6757 	}
6758 	/* Ok we have a socket, bind it to the port */
6759 	memset(&sin, 0, sizeof(sin));
6760 	sin.sin_len = sizeof(sin);
6761 	sin.sin_family = AF_INET;
6762 	sin.sin_port = htons(port);
6763 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6764 	if (ret) {
6765 		/* Close up we cant get the port */
6766 exit_stage_left:
6767 		sctp_over_udp_stop();
6768 		return (ret);
6769 	}
6770 	/*
6771 	 * Ok we should now get UDP packets directly to our input routine
6772 	 * sctp_recv_upd_tunneled_packet().
6773 	 */
6774 	return (0);
6775 }
6776 
6777 #endif
6778