xref: /freebsd/sys/netinet/sctputil.c (revision 30d239bc4c510432e65a84fa1c14ed67a3ab1c92)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_crc32.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_cc_functions.h>
53 
54 #define NUMBER_OF_MTU_SIZES 18
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 void
62 sctp_sblog(struct sockbuf *sb,
63     struct sctp_tcb *stcb, int from, int incr)
64 {
65 	struct sctp_cwnd_log sctp_clog;
66 
67 	sctp_clog.x.sb.stcb = stcb;
68 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
69 	if (stcb)
70 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
71 	else
72 		sctp_clog.x.sb.stcb_sbcc = 0;
73 	sctp_clog.x.sb.incr = incr;
74 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
75 	    SCTP_LOG_EVENT_SB,
76 	    from,
77 	    sctp_clog.x.misc.log1,
78 	    sctp_clog.x.misc.log2,
79 	    sctp_clog.x.misc.log3,
80 	    sctp_clog.x.misc.log4);
81 }
82 
83 void
84 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
85 {
86 	struct sctp_cwnd_log sctp_clog;
87 
88 	sctp_clog.x.close.inp = (void *)inp;
89 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
90 	if (stcb) {
91 		sctp_clog.x.close.stcb = (void *)stcb;
92 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
93 	} else {
94 		sctp_clog.x.close.stcb = 0;
95 		sctp_clog.x.close.state = 0;
96 	}
97 	sctp_clog.x.close.loc = loc;
98 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
99 	    SCTP_LOG_EVENT_CLOSE,
100 	    0,
101 	    sctp_clog.x.misc.log1,
102 	    sctp_clog.x.misc.log2,
103 	    sctp_clog.x.misc.log3,
104 	    sctp_clog.x.misc.log4);
105 }
106 
107 
108 void
109 rto_logging(struct sctp_nets *net, int from)
110 {
111 	struct sctp_cwnd_log sctp_clog;
112 
113 	sctp_clog.x.rto.net = (void *)net;
114 	sctp_clog.x.rto.rtt = net->prev_rtt;
115 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 	    SCTP_LOG_EVENT_RTT,
117 	    from,
118 	    sctp_clog.x.misc.log1,
119 	    sctp_clog.x.misc.log2,
120 	    sctp_clog.x.misc.log3,
121 	    sctp_clog.x.misc.log4);
122 
123 }
124 
125 void
126 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
127 {
128 	struct sctp_cwnd_log sctp_clog;
129 
130 	sctp_clog.x.strlog.stcb = stcb;
131 	sctp_clog.x.strlog.n_tsn = tsn;
132 	sctp_clog.x.strlog.n_sseq = sseq;
133 	sctp_clog.x.strlog.e_tsn = 0;
134 	sctp_clog.x.strlog.e_sseq = 0;
135 	sctp_clog.x.strlog.strm = stream;
136 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
137 	    SCTP_LOG_EVENT_STRM,
138 	    from,
139 	    sctp_clog.x.misc.log1,
140 	    sctp_clog.x.misc.log2,
141 	    sctp_clog.x.misc.log3,
142 	    sctp_clog.x.misc.log4);
143 
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 	struct sctp_cwnd_log sctp_clog;
170 
171 	sctp_clog.x.sack.cumack = cumack;
172 	sctp_clog.x.sack.oldcumack = old_cumack;
173 	sctp_clog.x.sack.tsn = tsn;
174 	sctp_clog.x.sack.numGaps = gaps;
175 	sctp_clog.x.sack.numDups = dups;
176 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 	    SCTP_LOG_EVENT_SACK,
178 	    from,
179 	    sctp_clog.x.misc.log1,
180 	    sctp_clog.x.misc.log2,
181 	    sctp_clog.x.misc.log3,
182 	    sctp_clog.x.misc.log4);
183 }
184 
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
204     int from)
205 {
206 	struct sctp_cwnd_log sctp_clog;
207 
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 
219 }
220 
221 
222 void
223 sctp_log_mb(struct mbuf *m, int from)
224 {
225 	struct sctp_cwnd_log sctp_clog;
226 
227 	sctp_clog.x.mb.mp = m;
228 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
229 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
230 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
231 	if (SCTP_BUF_IS_EXTENDED(m)) {
232 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
233 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
234 	} else {
235 		sctp_clog.x.mb.ext = 0;
236 		sctp_clog.x.mb.refcnt = 0;
237 	}
238 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
239 	    SCTP_LOG_EVENT_MBUF,
240 	    from,
241 	    sctp_clog.x.misc.log1,
242 	    sctp_clog.x.misc.log2,
243 	    sctp_clog.x.misc.log3,
244 	    sctp_clog.x.misc.log4);
245 }
246 
247 
248 void
249 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
250     int from)
251 {
252 	struct sctp_cwnd_log sctp_clog;
253 
254 	if (control == NULL) {
255 		SCTP_PRINTF("Gak log of NULL?\n");
256 		return;
257 	}
258 	sctp_clog.x.strlog.stcb = control->stcb;
259 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
260 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
261 	sctp_clog.x.strlog.strm = control->sinfo_stream;
262 	if (poschk != NULL) {
263 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
264 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
265 	} else {
266 		sctp_clog.x.strlog.e_tsn = 0;
267 		sctp_clog.x.strlog.e_sseq = 0;
268 	}
269 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
270 	    SCTP_LOG_EVENT_STRM,
271 	    from,
272 	    sctp_clog.x.misc.log1,
273 	    sctp_clog.x.misc.log2,
274 	    sctp_clog.x.misc.log3,
275 	    sctp_clog.x.misc.log4);
276 
277 }
278 
279 void
280 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
281 {
282 	struct sctp_cwnd_log sctp_clog;
283 
284 	sctp_clog.x.cwnd.net = net;
285 	if (stcb->asoc.send_queue_cnt > 255)
286 		sctp_clog.x.cwnd.cnt_in_send = 255;
287 	else
288 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
289 	if (stcb->asoc.stream_queue_cnt > 255)
290 		sctp_clog.x.cwnd.cnt_in_str = 255;
291 	else
292 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
293 
294 	if (net) {
295 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
296 		sctp_clog.x.cwnd.inflight = net->flight_size;
297 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
298 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
299 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
300 	}
301 	if (SCTP_CWNDLOG_PRESEND == from) {
302 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
303 	}
304 	sctp_clog.x.cwnd.cwnd_augment = augment;
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_CWND,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 
313 }
314 
315 void
316 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
317 {
318 	struct sctp_cwnd_log sctp_clog;
319 
320 	if (inp) {
321 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
322 
323 	} else {
324 		sctp_clog.x.lock.sock = (void *)NULL;
325 	}
326 	sctp_clog.x.lock.inp = (void *)inp;
327 	if (stcb) {
328 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
329 	} else {
330 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
331 	}
332 	if (inp) {
333 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
334 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
335 	} else {
336 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
337 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
338 	}
339 	sctp_clog.x.lock.info_lock = rw_wowned(&sctppcbinfo.ipi_ep_mtx);
340 	if (inp->sctp_socket) {
341 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
342 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
343 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
344 	} else {
345 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
346 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
347 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
348 	}
349 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
350 	    SCTP_LOG_LOCK_EVENT,
351 	    from,
352 	    sctp_clog.x.misc.log1,
353 	    sctp_clog.x.misc.log2,
354 	    sctp_clog.x.misc.log3,
355 	    sctp_clog.x.misc.log4);
356 
357 }
358 
359 void
360 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
361 {
362 	struct sctp_cwnd_log sctp_clog;
363 
364 	sctp_clog.x.cwnd.net = net;
365 	sctp_clog.x.cwnd.cwnd_new_value = error;
366 	sctp_clog.x.cwnd.inflight = net->flight_size;
367 	sctp_clog.x.cwnd.cwnd_augment = burst;
368 	if (stcb->asoc.send_queue_cnt > 255)
369 		sctp_clog.x.cwnd.cnt_in_send = 255;
370 	else
371 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
372 	if (stcb->asoc.stream_queue_cnt > 255)
373 		sctp_clog.x.cwnd.cnt_in_str = 255;
374 	else
375 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
376 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
377 	    SCTP_LOG_EVENT_MAXBURST,
378 	    from,
379 	    sctp_clog.x.misc.log1,
380 	    sctp_clog.x.misc.log2,
381 	    sctp_clog.x.misc.log3,
382 	    sctp_clog.x.misc.log4);
383 
384 }
385 
386 void
387 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
388 {
389 	struct sctp_cwnd_log sctp_clog;
390 
391 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
392 	sctp_clog.x.rwnd.send_size = snd_size;
393 	sctp_clog.x.rwnd.overhead = overhead;
394 	sctp_clog.x.rwnd.new_rwnd = 0;
395 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
396 	    SCTP_LOG_EVENT_RWND,
397 	    from,
398 	    sctp_clog.x.misc.log1,
399 	    sctp_clog.x.misc.log2,
400 	    sctp_clog.x.misc.log3,
401 	    sctp_clog.x.misc.log4);
402 }
403 
404 void
405 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
406 {
407 	struct sctp_cwnd_log sctp_clog;
408 
409 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
410 	sctp_clog.x.rwnd.send_size = flight_size;
411 	sctp_clog.x.rwnd.overhead = overhead;
412 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
413 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
414 	    SCTP_LOG_EVENT_RWND,
415 	    from,
416 	    sctp_clog.x.misc.log1,
417 	    sctp_clog.x.misc.log2,
418 	    sctp_clog.x.misc.log3,
419 	    sctp_clog.x.misc.log4);
420 }
421 
422 void
423 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
424 {
425 	struct sctp_cwnd_log sctp_clog;
426 
427 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
428 	sctp_clog.x.mbcnt.size_change = book;
429 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
430 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
431 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
432 	    SCTP_LOG_EVENT_MBCNT,
433 	    from,
434 	    sctp_clog.x.misc.log1,
435 	    sctp_clog.x.misc.log2,
436 	    sctp_clog.x.misc.log3,
437 	    sctp_clog.x.misc.log4);
438 
439 }
440 
441 void
442 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
443 {
444 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
445 	    SCTP_LOG_MISC_EVENT,
446 	    from,
447 	    a, b, c, d);
448 }
449 
450 void
451 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
452 {
453 	struct sctp_cwnd_log sctp_clog;
454 
455 	sctp_clog.x.wake.stcb = (void *)stcb;
456 	sctp_clog.x.wake.wake_cnt = wake_cnt;
457 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
458 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
459 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
460 
461 	if (stcb->asoc.stream_queue_cnt < 0xff)
462 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
463 	else
464 		sctp_clog.x.wake.stream_qcnt = 0xff;
465 
466 	if (stcb->asoc.chunks_on_out_queue < 0xff)
467 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
468 	else
469 		sctp_clog.x.wake.chunks_on_oque = 0xff;
470 
471 	sctp_clog.x.wake.sctpflags = 0;
472 	/* set in the defered mode stuff */
473 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
474 		sctp_clog.x.wake.sctpflags |= 1;
475 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
476 		sctp_clog.x.wake.sctpflags |= 2;
477 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
478 		sctp_clog.x.wake.sctpflags |= 4;
479 	/* what about the sb */
480 	if (stcb->sctp_socket) {
481 		struct socket *so = stcb->sctp_socket;
482 
483 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
484 	} else {
485 		sctp_clog.x.wake.sbflags = 0xff;
486 	}
487 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
488 	    SCTP_LOG_EVENT_WAKE,
489 	    from,
490 	    sctp_clog.x.misc.log1,
491 	    sctp_clog.x.misc.log2,
492 	    sctp_clog.x.misc.log3,
493 	    sctp_clog.x.misc.log4);
494 
495 }
496 
497 void
498 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
499 {
500 	struct sctp_cwnd_log sctp_clog;
501 
502 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
503 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
504 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
505 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
506 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
507 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
508 	sctp_clog.x.blk.sndlen = sendlen;
509 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
510 	    SCTP_LOG_EVENT_BLOCK,
511 	    from,
512 	    sctp_clog.x.misc.log1,
513 	    sctp_clog.x.misc.log2,
514 	    sctp_clog.x.misc.log3,
515 	    sctp_clog.x.misc.log4);
516 
517 }
518 
519 int
520 sctp_fill_stat_log(void *optval, size_t *optsize)
521 {
522 	/* May need to fix this if ktrdump does not work */
523 	return (0);
524 }
525 
526 #ifdef SCTP_AUDITING_ENABLED
527 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
528 static int sctp_audit_indx = 0;
529 
530 static
531 void
532 sctp_print_audit_report(void)
533 {
534 	int i;
535 	int cnt;
536 
537 	cnt = 0;
538 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
539 		if ((sctp_audit_data[i][0] == 0xe0) &&
540 		    (sctp_audit_data[i][1] == 0x01)) {
541 			cnt = 0;
542 			SCTP_PRINTF("\n");
543 		} else if (sctp_audit_data[i][0] == 0xf0) {
544 			cnt = 0;
545 			SCTP_PRINTF("\n");
546 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			SCTP_PRINTF("\n");
549 			cnt = 0;
550 		}
551 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
552 		    (uint32_t) sctp_audit_data[i][1]);
553 		cnt++;
554 		if ((cnt % 14) == 0)
555 			SCTP_PRINTF("\n");
556 	}
557 	for (i = 0; i < sctp_audit_indx; i++) {
558 		if ((sctp_audit_data[i][0] == 0xe0) &&
559 		    (sctp_audit_data[i][1] == 0x01)) {
560 			cnt = 0;
561 			SCTP_PRINTF("\n");
562 		} else if (sctp_audit_data[i][0] == 0xf0) {
563 			cnt = 0;
564 			SCTP_PRINTF("\n");
565 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			SCTP_PRINTF("\n");
568 			cnt = 0;
569 		}
570 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
571 		    (uint32_t) sctp_audit_data[i][1]);
572 		cnt++;
573 		if ((cnt % 14) == 0)
574 			SCTP_PRINTF("\n");
575 	}
576 	SCTP_PRINTF("\n");
577 }
578 
579 void
580 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
581     struct sctp_nets *net)
582 {
583 	int resend_cnt, tot_out, rep, tot_book_cnt;
584 	struct sctp_nets *lnet;
585 	struct sctp_tmit_chunk *chk;
586 
587 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
588 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
589 	sctp_audit_indx++;
590 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
591 		sctp_audit_indx = 0;
592 	}
593 	if (inp == NULL) {
594 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
595 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
596 		sctp_audit_indx++;
597 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 			sctp_audit_indx = 0;
599 		}
600 		return;
601 	}
602 	if (stcb == NULL) {
603 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
604 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
605 		sctp_audit_indx++;
606 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 			sctp_audit_indx = 0;
608 		}
609 		return;
610 	}
611 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
612 	sctp_audit_data[sctp_audit_indx][1] =
613 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
614 	sctp_audit_indx++;
615 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
616 		sctp_audit_indx = 0;
617 	}
618 	rep = 0;
619 	tot_book_cnt = 0;
620 	resend_cnt = tot_out = 0;
621 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
622 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
623 			resend_cnt++;
624 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
625 			tot_out += chk->book_size;
626 			tot_book_cnt++;
627 		}
628 	}
629 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
630 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
631 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
632 		sctp_audit_indx++;
633 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
634 			sctp_audit_indx = 0;
635 		}
636 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
637 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
638 		rep = 1;
639 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
640 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
641 		sctp_audit_data[sctp_audit_indx][1] =
642 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
643 		sctp_audit_indx++;
644 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645 			sctp_audit_indx = 0;
646 		}
647 	}
648 	if (tot_out != stcb->asoc.total_flight) {
649 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
650 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
651 		sctp_audit_indx++;
652 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
653 			sctp_audit_indx = 0;
654 		}
655 		rep = 1;
656 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
657 		    (int)stcb->asoc.total_flight);
658 		stcb->asoc.total_flight = tot_out;
659 	}
660 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
661 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
662 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
663 		sctp_audit_indx++;
664 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
665 			sctp_audit_indx = 0;
666 		}
667 		rep = 1;
668 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
669 
670 		stcb->asoc.total_flight_count = tot_book_cnt;
671 	}
672 	tot_out = 0;
673 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
674 		tot_out += lnet->flight_size;
675 	}
676 	if (tot_out != stcb->asoc.total_flight) {
677 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
678 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
679 		sctp_audit_indx++;
680 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
681 			sctp_audit_indx = 0;
682 		}
683 		rep = 1;
684 		SCTP_PRINTF("real flight:%d net total was %d\n",
685 		    stcb->asoc.total_flight, tot_out);
686 		/* now corrective action */
687 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
688 
689 			tot_out = 0;
690 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
691 				if ((chk->whoTo == lnet) &&
692 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
693 					tot_out += chk->book_size;
694 				}
695 			}
696 			if (lnet->flight_size != tot_out) {
697 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
698 				    (uint32_t) lnet, lnet->flight_size,
699 				    tot_out);
700 				lnet->flight_size = tot_out;
701 			}
702 		}
703 	}
704 	if (rep) {
705 		sctp_print_audit_report();
706 	}
707 }
708 
709 void
710 sctp_audit_log(uint8_t ev, uint8_t fd)
711 {
712 
713 	sctp_audit_data[sctp_audit_indx][0] = ev;
714 	sctp_audit_data[sctp_audit_indx][1] = fd;
715 	sctp_audit_indx++;
716 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 		sctp_audit_indx = 0;
718 	}
719 }
720 
721 #endif
722 
723 /*
724  * a list of sizes based on typical mtu's, used only if next hop size not
725  * returned.
726  */
727 static int sctp_mtu_sizes[] = {
728 	68,
729 	296,
730 	508,
731 	512,
732 	544,
733 	576,
734 	1006,
735 	1492,
736 	1500,
737 	1536,
738 	2002,
739 	2048,
740 	4352,
741 	4464,
742 	8166,
743 	17914,
744 	32000,
745 	65535
746 };
747 
748 void
749 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
750 {
751 	struct sctp_association *asoc;
752 	struct sctp_nets *net;
753 
754 	asoc = &stcb->asoc;
755 
756 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
757 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
758 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
759 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
760 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
761 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
762 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
763 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
764 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
765 	}
766 }
767 
768 int
769 find_next_best_mtu(int totsz)
770 {
771 	int i, perfer;
772 
773 	/*
774 	 * if we are in here we must find the next best fit based on the
775 	 * size of the dg that failed to be sent.
776 	 */
777 	perfer = 0;
778 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
779 		if (totsz < sctp_mtu_sizes[i]) {
780 			perfer = i - 1;
781 			if (perfer < 0)
782 				perfer = 0;
783 			break;
784 		}
785 	}
786 	return (sctp_mtu_sizes[perfer]);
787 }
788 
789 void
790 sctp_fill_random_store(struct sctp_pcb *m)
791 {
792 	/*
793 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
794 	 * our counter. The result becomes our good random numbers and we
795 	 * then setup to give these out. Note that we do no locking to
796 	 * protect this. This is ok, since if competing folks call this we
797 	 * will get more gobbled gook in the random store which is what we
798 	 * want. There is a danger that two guys will use the same random
799 	 * numbers, but thats ok too since that is random as well :->
800 	 */
801 	m->store_at = 0;
802 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
803 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
804 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
805 	m->random_counter++;
806 }
807 
808 uint32_t
809 sctp_select_initial_TSN(struct sctp_pcb *inp)
810 {
811 	/*
812 	 * A true implementation should use random selection process to get
813 	 * the initial stream sequence number, using RFC1750 as a good
814 	 * guideline
815 	 */
816 	uint32_t x, *xp;
817 	uint8_t *p;
818 	int store_at, new_store;
819 
820 	if (inp->initial_sequence_debug != 0) {
821 		uint32_t ret;
822 
823 		ret = inp->initial_sequence_debug;
824 		inp->initial_sequence_debug++;
825 		return (ret);
826 	}
827 retry:
828 	store_at = inp->store_at;
829 	new_store = store_at + sizeof(uint32_t);
830 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
831 		new_store = 0;
832 	}
833 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
834 		goto retry;
835 	}
836 	if (new_store == 0) {
837 		/* Refill the random store */
838 		sctp_fill_random_store(inp);
839 	}
840 	p = &inp->random_store[store_at];
841 	xp = (uint32_t *) p;
842 	x = *xp;
843 	return (x);
844 }
845 
846 uint32_t
847 sctp_select_a_tag(struct sctp_inpcb *inp)
848 {
849 	u_long x, not_done;
850 	struct timeval now;
851 
852 	(void)SCTP_GETTIME_TIMEVAL(&now);
853 	not_done = 1;
854 	while (not_done) {
855 		x = sctp_select_initial_TSN(&inp->sctp_ep);
856 		if (x == 0) {
857 			/* we never use 0 */
858 			continue;
859 		}
860 		if (sctp_is_vtag_good(inp, x, &now)) {
861 			not_done = 0;
862 		}
863 	}
864 	return (x);
865 }
866 
867 int
868 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
869     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
870 {
871 	struct sctp_association *asoc;
872 
873 	/*
874 	 * Anything set to zero is taken care of by the allocation routine's
875 	 * bzero
876 	 */
877 
878 	/*
879 	 * Up front select what scoping to apply on addresses I tell my peer
880 	 * Not sure what to do with these right now, we will need to come up
881 	 * with a way to set them. We may need to pass them through from the
882 	 * caller in the sctp_aloc_assoc() function.
883 	 */
884 	int i;
885 
886 	asoc = &stcb->asoc;
887 	/* init all variables to a known value. */
888 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
889 	asoc->max_burst = m->sctp_ep.max_burst;
890 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
891 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
892 	asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off;
893 	/* JRS 5/21/07 - Init CMT PF variables */
894 	asoc->sctp_cmt_pf = (uint8_t) sctp_cmt_pf;
895 	asoc->sctp_frag_point = m->sctp_frag_point;
896 #ifdef INET
897 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
898 #else
899 	asoc->default_tos = 0;
900 #endif
901 
902 #ifdef INET6
903 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
904 #else
905 	asoc->default_flowlabel = 0;
906 #endif
907 	if (override_tag) {
908 		struct timeval now;
909 
910 		(void)SCTP_GETTIME_TIMEVAL(&now);
911 		if (sctp_is_vtag_good(m, override_tag, &now)) {
912 			asoc->my_vtag = override_tag;
913 		} else {
914 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
915 			return (ENOMEM);
916 		}
917 
918 	} else {
919 		asoc->my_vtag = sctp_select_a_tag(m);
920 	}
921 	/* Get the nonce tags */
922 	asoc->my_vtag_nonce = sctp_select_a_tag(m);
923 	asoc->peer_vtag_nonce = sctp_select_a_tag(m);
924 	asoc->vrf_id = vrf_id;
925 
926 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
927 		asoc->hb_is_disabled = 1;
928 	else
929 		asoc->hb_is_disabled = 0;
930 
931 #ifdef SCTP_ASOCLOG_OF_TSNS
932 	asoc->tsn_in_at = 0;
933 	asoc->tsn_out_at = 0;
934 	asoc->tsn_in_wrapped = 0;
935 	asoc->tsn_out_wrapped = 0;
936 	asoc->cumack_log_at = 0;
937 	asoc->cumack_log_atsnt = 0;
938 #endif
939 #ifdef SCTP_FS_SPEC_LOG
940 	asoc->fs_index = 0;
941 #endif
942 	asoc->refcnt = 0;
943 	asoc->assoc_up_sent = 0;
944 	asoc->assoc_id = asoc->my_vtag;
945 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
946 	    sctp_select_initial_TSN(&m->sctp_ep);
947 	/* we are optimisitic here */
948 	asoc->peer_supports_pktdrop = 1;
949 
950 	asoc->sent_queue_retran_cnt = 0;
951 
952 	/* for CMT */
953 	asoc->last_net_data_came_from = NULL;
954 
955 	/* This will need to be adjusted */
956 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
957 	asoc->last_acked_seq = asoc->init_seq_number - 1;
958 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
959 	asoc->asconf_seq_in = asoc->last_acked_seq;
960 
961 	/* here we are different, we hold the next one we expect */
962 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
963 
964 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
965 	asoc->initial_rto = m->sctp_ep.initial_rto;
966 
967 	asoc->max_init_times = m->sctp_ep.max_init_times;
968 	asoc->max_send_times = m->sctp_ep.max_send_times;
969 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
970 	asoc->free_chunk_cnt = 0;
971 
972 	asoc->iam_blocking = 0;
973 	/* ECN Nonce initialization */
974 	asoc->context = m->sctp_context;
975 	asoc->def_send = m->def_send;
976 	asoc->ecn_nonce_allowed = 0;
977 	asoc->receiver_nonce_sum = 1;
978 	asoc->nonce_sum_expect_base = 1;
979 	asoc->nonce_sum_check = 1;
980 	asoc->nonce_resync_tsn = 0;
981 	asoc->nonce_wait_for_ecne = 0;
982 	asoc->nonce_wait_tsn = 0;
983 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
984 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
985 	asoc->pr_sctp_cnt = 0;
986 	asoc->total_output_queue_size = 0;
987 
988 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
989 		struct in6pcb *inp6;
990 
991 		/* Its a V6 socket */
992 		inp6 = (struct in6pcb *)m;
993 		asoc->ipv6_addr_legal = 1;
994 		/* Now look at the binding flag to see if V4 will be legal */
995 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
996 			asoc->ipv4_addr_legal = 1;
997 		} else {
998 			/* V4 addresses are NOT legal on the association */
999 			asoc->ipv4_addr_legal = 0;
1000 		}
1001 	} else {
1002 		/* Its a V4 socket, no - V6 */
1003 		asoc->ipv4_addr_legal = 1;
1004 		asoc->ipv6_addr_legal = 0;
1005 	}
1006 
1007 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1008 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1009 
1010 	asoc->smallest_mtu = m->sctp_frag_point;
1011 #ifdef SCTP_PRINT_FOR_B_AND_M
1012 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1013 	    asoc->smallest_mtu);
1014 #endif
1015 	asoc->minrto = m->sctp_ep.sctp_minrto;
1016 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1017 
1018 	asoc->locked_on_sending = NULL;
1019 	asoc->stream_locked_on = 0;
1020 	asoc->ecn_echo_cnt_onq = 0;
1021 	asoc->stream_locked = 0;
1022 
1023 	asoc->send_sack = 1;
1024 
1025 	LIST_INIT(&asoc->sctp_restricted_addrs);
1026 
1027 	TAILQ_INIT(&asoc->nets);
1028 	TAILQ_INIT(&asoc->pending_reply_queue);
1029 	TAILQ_INIT(&asoc->asconf_ack_sent);
1030 	/* Setup to fill the hb random cache at first HB */
1031 	asoc->hb_random_idx = 4;
1032 
1033 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1034 
1035 	/*
1036 	 * JRS - Pick the default congestion control module based on the
1037 	 * sysctl.
1038 	 */
1039 	switch (m->sctp_ep.sctp_default_cc_module) {
1040 		/* JRS - Standard TCP congestion control */
1041 	case SCTP_CC_RFC2581:
1042 		{
1043 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1044 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1045 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1046 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1047 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1048 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1049 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1050 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1051 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1052 			break;
1053 		}
1054 		/* JRS - High Speed TCP congestion control (Floyd) */
1055 	case SCTP_CC_HSTCP:
1056 		{
1057 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1058 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1059 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1060 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1061 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1064 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1065 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1066 			break;
1067 		}
1068 		/* JRS - HTCP congestion control */
1069 	case SCTP_CC_HTCP:
1070 		{
1071 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1072 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1073 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1074 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1075 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1078 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1079 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1080 			break;
1081 		}
1082 		/* JRS - By default, use RFC2581 */
1083 	default:
1084 		{
1085 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1086 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1087 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1088 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1089 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1092 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1093 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1094 			break;
1095 		}
1096 	}
1097 
1098 	/*
1099 	 * Now the stream parameters, here we allocate space for all streams
1100 	 * that we request by default.
1101 	 */
1102 	asoc->streamoutcnt = asoc->pre_open_streams =
1103 	    m->sctp_ep.pre_open_stream_count;
1104 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1105 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1106 	    SCTP_M_STRMO);
1107 	if (asoc->strmout == NULL) {
1108 		/* big trouble no memory */
1109 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1110 		return (ENOMEM);
1111 	}
1112 	for (i = 0; i < asoc->streamoutcnt; i++) {
1113 		/*
1114 		 * inbound side must be set to 0xffff, also NOTE when we get
1115 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1116 		 * count (streamoutcnt) but first check if we sent to any of
1117 		 * the upper streams that were dropped (if some were). Those
1118 		 * that were dropped must be notified to the upper layer as
1119 		 * failed to send.
1120 		 */
1121 		asoc->strmout[i].next_sequence_sent = 0x0;
1122 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1123 		asoc->strmout[i].stream_no = i;
1124 		asoc->strmout[i].last_msg_incomplete = 0;
1125 		asoc->strmout[i].next_spoke.tqe_next = 0;
1126 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1127 	}
1128 	/* Now the mapping array */
1129 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1130 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1131 	    SCTP_M_MAP);
1132 	if (asoc->mapping_array == NULL) {
1133 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1134 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1135 		return (ENOMEM);
1136 	}
1137 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1138 	/* Now the init of the other outqueues */
1139 	TAILQ_INIT(&asoc->free_chunks);
1140 	TAILQ_INIT(&asoc->out_wheel);
1141 	TAILQ_INIT(&asoc->control_send_queue);
1142 	TAILQ_INIT(&asoc->send_queue);
1143 	TAILQ_INIT(&asoc->sent_queue);
1144 	TAILQ_INIT(&asoc->reasmqueue);
1145 	TAILQ_INIT(&asoc->resetHead);
1146 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1147 	TAILQ_INIT(&asoc->asconf_queue);
1148 	/* authentication fields */
1149 	asoc->authinfo.random = NULL;
1150 	asoc->authinfo.assoc_key = NULL;
1151 	asoc->authinfo.assoc_keyid = 0;
1152 	asoc->authinfo.recv_key = NULL;
1153 	asoc->authinfo.recv_keyid = 0;
1154 	LIST_INIT(&asoc->shared_keys);
1155 	asoc->marked_retrans = 0;
1156 	asoc->timoinit = 0;
1157 	asoc->timodata = 0;
1158 	asoc->timosack = 0;
1159 	asoc->timoshutdown = 0;
1160 	asoc->timoheartbeat = 0;
1161 	asoc->timocookie = 0;
1162 	asoc->timoshutdownack = 0;
1163 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1164 	asoc->discontinuity_time = asoc->start_time;
1165 	/*
1166 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1167 	 * freed later whe the association is freed.
1168 	 */
1169 	return (0);
1170 }
1171 
1172 int
1173 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1174 {
1175 	/* mapping array needs to grow */
1176 	uint8_t *new_array;
1177 	uint32_t new_size;
1178 
1179 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1180 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1181 	if (new_array == NULL) {
1182 		/* can't get more, forget it */
1183 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1184 		    new_size);
1185 		return (-1);
1186 	}
1187 	memset(new_array, 0, new_size);
1188 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1189 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1190 	asoc->mapping_array = new_array;
1191 	asoc->mapping_array_size = new_size;
1192 	return (0);
1193 }
1194 
1195 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1196 static void
1197 sctp_iterator_work(struct sctp_iterator *it)
1198 {
1199 	int iteration_count = 0;
1200 	int inp_skip = 0;
1201 
1202 	SCTP_ITERATOR_LOCK();
1203 	if (it->inp) {
1204 		SCTP_INP_DECR_REF(it->inp);
1205 	}
1206 	if (it->inp == NULL) {
1207 		/* iterator is complete */
1208 done_with_iterator:
1209 		SCTP_ITERATOR_UNLOCK();
1210 		if (it->function_atend != NULL) {
1211 			(*it->function_atend) (it->pointer, it->val);
1212 		}
1213 		SCTP_FREE(it, SCTP_M_ITER);
1214 		return;
1215 	}
1216 select_a_new_ep:
1217 	SCTP_INP_WLOCK(it->inp);
1218 	while (((it->pcb_flags) &&
1219 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1220 	    ((it->pcb_features) &&
1221 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1222 		/* endpoint flags or features don't match, so keep looking */
1223 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1224 			SCTP_INP_WUNLOCK(it->inp);
1225 			goto done_with_iterator;
1226 		}
1227 		SCTP_INP_WUNLOCK(it->inp);
1228 		it->inp = LIST_NEXT(it->inp, sctp_list);
1229 		if (it->inp == NULL) {
1230 			goto done_with_iterator;
1231 		}
1232 		SCTP_INP_WLOCK(it->inp);
1233 	}
1234 
1235 	SCTP_INP_WUNLOCK(it->inp);
1236 	SCTP_INP_RLOCK(it->inp);
1237 
1238 	/* now go through each assoc which is in the desired state */
1239 	if (it->done_current_ep == 0) {
1240 		if (it->function_inp != NULL)
1241 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1242 		it->done_current_ep = 1;
1243 	}
1244 	if (it->stcb == NULL) {
1245 		/* run the per instance function */
1246 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1247 	}
1248 	if ((inp_skip) || it->stcb == NULL) {
1249 		if (it->function_inp_end != NULL) {
1250 			inp_skip = (*it->function_inp_end) (it->inp,
1251 			    it->pointer,
1252 			    it->val);
1253 		}
1254 		SCTP_INP_RUNLOCK(it->inp);
1255 		goto no_stcb;
1256 	}
1257 	while (it->stcb) {
1258 		SCTP_TCB_LOCK(it->stcb);
1259 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1260 			/* not in the right state... keep looking */
1261 			SCTP_TCB_UNLOCK(it->stcb);
1262 			goto next_assoc;
1263 		}
1264 		/* see if we have limited out the iterator loop */
1265 		iteration_count++;
1266 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1267 			/* Pause to let others grab the lock */
1268 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1269 			SCTP_TCB_UNLOCK(it->stcb);
1270 
1271 			SCTP_INP_INCR_REF(it->inp);
1272 			SCTP_INP_RUNLOCK(it->inp);
1273 			SCTP_ITERATOR_UNLOCK();
1274 			SCTP_ITERATOR_LOCK();
1275 			SCTP_INP_RLOCK(it->inp);
1276 
1277 			SCTP_INP_DECR_REF(it->inp);
1278 			SCTP_TCB_LOCK(it->stcb);
1279 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1280 			iteration_count = 0;
1281 		}
1282 		/* run function on this one */
1283 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1284 
1285 		/*
1286 		 * we lie here, it really needs to have its own type but
1287 		 * first I must verify that this won't effect things :-0
1288 		 */
1289 		if (it->no_chunk_output == 0)
1290 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1291 
1292 		SCTP_TCB_UNLOCK(it->stcb);
1293 next_assoc:
1294 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1295 		if (it->stcb == NULL) {
1296 			/* Run last function */
1297 			if (it->function_inp_end != NULL) {
1298 				inp_skip = (*it->function_inp_end) (it->inp,
1299 				    it->pointer,
1300 				    it->val);
1301 			}
1302 		}
1303 	}
1304 	SCTP_INP_RUNLOCK(it->inp);
1305 no_stcb:
1306 	/* done with all assocs on this endpoint, move on to next endpoint */
1307 	it->done_current_ep = 0;
1308 	SCTP_INP_WLOCK(it->inp);
1309 	SCTP_INP_WUNLOCK(it->inp);
1310 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1311 		it->inp = NULL;
1312 	} else {
1313 		SCTP_INP_INFO_RLOCK();
1314 		it->inp = LIST_NEXT(it->inp, sctp_list);
1315 		SCTP_INP_INFO_RUNLOCK();
1316 	}
1317 	if (it->inp == NULL) {
1318 		goto done_with_iterator;
1319 	}
1320 	goto select_a_new_ep;
1321 }
1322 
1323 void
1324 sctp_iterator_worker(void)
1325 {
1326 	struct sctp_iterator *it = NULL;
1327 
1328 	/* This function is called with the WQ lock in place */
1329 
1330 	sctppcbinfo.iterator_running = 1;
1331 again:
1332 	it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
1333 	while (it) {
1334 		/* now lets work on this one */
1335 		TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
1336 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1337 		sctp_iterator_work(it);
1338 		SCTP_IPI_ITERATOR_WQ_LOCK();
1339 		/* sa_ignore FREED_MEMORY */
1340 		it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
1341 	}
1342 	if (TAILQ_FIRST(&sctppcbinfo.iteratorhead)) {
1343 		goto again;
1344 	}
1345 	sctppcbinfo.iterator_running = 0;
1346 	return;
1347 }
1348 
1349 #endif
1350 
1351 
1352 static void
1353 sctp_handle_addr_wq(void)
1354 {
1355 	/* deal with the ADDR wq from the rtsock calls */
1356 	struct sctp_laddr *wi;
1357 	struct sctp_asconf_iterator *asc;
1358 
1359 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1360 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1361 	if (asc == NULL) {
1362 		/* Try later, no memory */
1363 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1364 		    (struct sctp_inpcb *)NULL,
1365 		    (struct sctp_tcb *)NULL,
1366 		    (struct sctp_nets *)NULL);
1367 		return;
1368 	}
1369 	LIST_INIT(&asc->list_of_work);
1370 	asc->cnt = 0;
1371 	SCTP_IPI_ITERATOR_WQ_LOCK();
1372 	wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1373 	while (wi != NULL) {
1374 		LIST_REMOVE(wi, sctp_nxt_addr);
1375 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1376 		asc->cnt++;
1377 		wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1378 	}
1379 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1380 	if (asc->cnt == 0) {
1381 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1382 	} else {
1383 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1384 		    sctp_asconf_iterator_stcb,
1385 		    NULL,	/* No ep end for boundall */
1386 		    SCTP_PCB_FLAGS_BOUNDALL,
1387 		    SCTP_PCB_ANY_FEATURES,
1388 		    SCTP_ASOC_ANY_STATE,
1389 		    (void *)asc, 0,
1390 		    sctp_asconf_iterator_end, NULL, 0);
1391 	}
1392 }
1393 
1394 int retcode = 0;
1395 int cur_oerr = 0;
1396 
1397 void
1398 sctp_timeout_handler(void *t)
1399 {
1400 	struct sctp_inpcb *inp;
1401 	struct sctp_tcb *stcb;
1402 	struct sctp_nets *net;
1403 	struct sctp_timer *tmr;
1404 
1405 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1406 	struct socket *so;
1407 
1408 #endif
1409 	int did_output;
1410 	struct sctp_iterator *it = NULL;
1411 
1412 	tmr = (struct sctp_timer *)t;
1413 	inp = (struct sctp_inpcb *)tmr->ep;
1414 	stcb = (struct sctp_tcb *)tmr->tcb;
1415 	net = (struct sctp_nets *)tmr->net;
1416 	did_output = 1;
1417 
1418 #ifdef SCTP_AUDITING_ENABLED
1419 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1420 	sctp_auditing(3, inp, stcb, net);
1421 #endif
1422 
1423 	/* sanity checks... */
1424 	if (tmr->self != (void *)tmr) {
1425 		/*
1426 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1427 		 * tmr);
1428 		 */
1429 		return;
1430 	}
1431 	tmr->stopped_from = 0xa001;
1432 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1433 		/*
1434 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1435 		 * tmr->type);
1436 		 */
1437 		return;
1438 	}
1439 	tmr->stopped_from = 0xa002;
1440 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1441 		return;
1442 	}
1443 	/* if this is an iterator timeout, get the struct and clear inp */
1444 	tmr->stopped_from = 0xa003;
1445 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1446 		it = (struct sctp_iterator *)inp;
1447 		inp = NULL;
1448 	}
1449 	if (inp) {
1450 		SCTP_INP_INCR_REF(inp);
1451 		if ((inp->sctp_socket == 0) &&
1452 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1453 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1454 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1455 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1456 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1457 		    ) {
1458 			SCTP_INP_DECR_REF(inp);
1459 			return;
1460 		}
1461 	}
1462 	tmr->stopped_from = 0xa004;
1463 	if (stcb) {
1464 		atomic_add_int(&stcb->asoc.refcnt, 1);
1465 		if (stcb->asoc.state == 0) {
1466 			atomic_add_int(&stcb->asoc.refcnt, -1);
1467 			if (inp) {
1468 				SCTP_INP_DECR_REF(inp);
1469 			}
1470 			return;
1471 		}
1472 	}
1473 	tmr->stopped_from = 0xa005;
1474 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1475 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1476 		if (inp) {
1477 			SCTP_INP_DECR_REF(inp);
1478 		}
1479 		if (stcb) {
1480 			atomic_add_int(&stcb->asoc.refcnt, -1);
1481 		}
1482 		return;
1483 	}
1484 	tmr->stopped_from = 0xa006;
1485 
1486 	if (stcb) {
1487 		SCTP_TCB_LOCK(stcb);
1488 		atomic_add_int(&stcb->asoc.refcnt, -1);
1489 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1490 		    ((stcb->asoc.state == 0) ||
1491 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1492 			SCTP_TCB_UNLOCK(stcb);
1493 			if (inp) {
1494 				SCTP_INP_DECR_REF(inp);
1495 			}
1496 			return;
1497 		}
1498 	}
1499 	/* record in stopped what t-o occured */
1500 	tmr->stopped_from = tmr->type;
1501 
1502 	/* mark as being serviced now */
1503 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1504 		/*
1505 		 * Callout has been rescheduled.
1506 		 */
1507 		goto get_out;
1508 	}
1509 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1510 		/*
1511 		 * Not active, so no action.
1512 		 */
1513 		goto get_out;
1514 	}
1515 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1516 
1517 	/* call the handler for the appropriate timer type */
1518 	switch (tmr->type) {
1519 	case SCTP_TIMER_TYPE_ZERO_COPY:
1520 		if (inp == NULL) {
1521 			break;
1522 		}
1523 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1524 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1525 		}
1526 		break;
1527 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1528 		if (inp == NULL) {
1529 			break;
1530 		}
1531 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1532 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1533 		}
1534 		break;
1535 	case SCTP_TIMER_TYPE_ADDR_WQ:
1536 		sctp_handle_addr_wq();
1537 		break;
1538 	case SCTP_TIMER_TYPE_ITERATOR:
1539 		SCTP_STAT_INCR(sctps_timoiterator);
1540 		sctp_iterator_timer(it);
1541 		break;
1542 	case SCTP_TIMER_TYPE_SEND:
1543 		if ((stcb == NULL) || (inp == NULL)) {
1544 			break;
1545 		}
1546 		SCTP_STAT_INCR(sctps_timodata);
1547 		stcb->asoc.timodata++;
1548 		stcb->asoc.num_send_timers_up--;
1549 		if (stcb->asoc.num_send_timers_up < 0) {
1550 			stcb->asoc.num_send_timers_up = 0;
1551 		}
1552 		SCTP_TCB_LOCK_ASSERT(stcb);
1553 		cur_oerr = stcb->asoc.overall_error_count;
1554 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1555 		if (retcode) {
1556 			/* no need to unlock on tcb its gone */
1557 
1558 			goto out_decr;
1559 		}
1560 		SCTP_TCB_LOCK_ASSERT(stcb);
1561 #ifdef SCTP_AUDITING_ENABLED
1562 		sctp_auditing(4, inp, stcb, net);
1563 #endif
1564 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1565 		if ((stcb->asoc.num_send_timers_up == 0) &&
1566 		    (stcb->asoc.sent_queue_cnt > 0)
1567 		    ) {
1568 			struct sctp_tmit_chunk *chk;
1569 
1570 			/*
1571 			 * safeguard. If there on some on the sent queue
1572 			 * somewhere but no timers running something is
1573 			 * wrong... so we start a timer on the first chunk
1574 			 * on the send queue on whatever net it is sent to.
1575 			 */
1576 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1577 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1578 			    chk->whoTo);
1579 		}
1580 		break;
1581 	case SCTP_TIMER_TYPE_INIT:
1582 		if ((stcb == NULL) || (inp == NULL)) {
1583 			break;
1584 		}
1585 		SCTP_STAT_INCR(sctps_timoinit);
1586 		stcb->asoc.timoinit++;
1587 		if (sctp_t1init_timer(inp, stcb, net)) {
1588 			/* no need to unlock on tcb its gone */
1589 			goto out_decr;
1590 		}
1591 		/* We do output but not here */
1592 		did_output = 0;
1593 		break;
1594 	case SCTP_TIMER_TYPE_RECV:
1595 		if ((stcb == NULL) || (inp == NULL)) {
1596 			break;
1597 		} {
1598 			int abort_flag;
1599 
1600 			SCTP_STAT_INCR(sctps_timosack);
1601 			stcb->asoc.timosack++;
1602 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1603 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1604 			sctp_send_sack(stcb);
1605 		}
1606 #ifdef SCTP_AUDITING_ENABLED
1607 		sctp_auditing(4, inp, stcb, net);
1608 #endif
1609 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1610 		break;
1611 	case SCTP_TIMER_TYPE_SHUTDOWN:
1612 		if ((stcb == NULL) || (inp == NULL)) {
1613 			break;
1614 		}
1615 		if (sctp_shutdown_timer(inp, stcb, net)) {
1616 			/* no need to unlock on tcb its gone */
1617 			goto out_decr;
1618 		}
1619 		SCTP_STAT_INCR(sctps_timoshutdown);
1620 		stcb->asoc.timoshutdown++;
1621 #ifdef SCTP_AUDITING_ENABLED
1622 		sctp_auditing(4, inp, stcb, net);
1623 #endif
1624 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1625 		break;
1626 	case SCTP_TIMER_TYPE_HEARTBEAT:
1627 		{
1628 			struct sctp_nets *lnet;
1629 			int cnt_of_unconf = 0;
1630 
1631 			if ((stcb == NULL) || (inp == NULL)) {
1632 				break;
1633 			}
1634 			SCTP_STAT_INCR(sctps_timoheartbeat);
1635 			stcb->asoc.timoheartbeat++;
1636 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1637 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1638 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1639 					cnt_of_unconf++;
1640 				}
1641 			}
1642 			if (cnt_of_unconf == 0) {
1643 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1644 				    cnt_of_unconf)) {
1645 					/* no need to unlock on tcb its gone */
1646 					goto out_decr;
1647 				}
1648 			}
1649 #ifdef SCTP_AUDITING_ENABLED
1650 			sctp_auditing(4, inp, stcb, lnet);
1651 #endif
1652 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1653 			    stcb->sctp_ep, stcb, lnet);
1654 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1655 		}
1656 		break;
1657 	case SCTP_TIMER_TYPE_COOKIE:
1658 		if ((stcb == NULL) || (inp == NULL)) {
1659 			break;
1660 		}
1661 		if (sctp_cookie_timer(inp, stcb, net)) {
1662 			/* no need to unlock on tcb its gone */
1663 			goto out_decr;
1664 		}
1665 		SCTP_STAT_INCR(sctps_timocookie);
1666 		stcb->asoc.timocookie++;
1667 #ifdef SCTP_AUDITING_ENABLED
1668 		sctp_auditing(4, inp, stcb, net);
1669 #endif
1670 		/*
1671 		 * We consider T3 and Cookie timer pretty much the same with
1672 		 * respect to where from in chunk_output.
1673 		 */
1674 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1675 		break;
1676 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1677 		{
1678 			struct timeval tv;
1679 			int i, secret;
1680 
1681 			if (inp == NULL) {
1682 				break;
1683 			}
1684 			SCTP_STAT_INCR(sctps_timosecret);
1685 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1686 			SCTP_INP_WLOCK(inp);
1687 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1688 			inp->sctp_ep.last_secret_number =
1689 			    inp->sctp_ep.current_secret_number;
1690 			inp->sctp_ep.current_secret_number++;
1691 			if (inp->sctp_ep.current_secret_number >=
1692 			    SCTP_HOW_MANY_SECRETS) {
1693 				inp->sctp_ep.current_secret_number = 0;
1694 			}
1695 			secret = (int)inp->sctp_ep.current_secret_number;
1696 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1697 				inp->sctp_ep.secret_key[secret][i] =
1698 				    sctp_select_initial_TSN(&inp->sctp_ep);
1699 			}
1700 			SCTP_INP_WUNLOCK(inp);
1701 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1702 		}
1703 		did_output = 0;
1704 		break;
1705 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1706 		if ((stcb == NULL) || (inp == NULL)) {
1707 			break;
1708 		}
1709 		SCTP_STAT_INCR(sctps_timopathmtu);
1710 		sctp_pathmtu_timer(inp, stcb, net);
1711 		did_output = 0;
1712 		break;
1713 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1714 		if ((stcb == NULL) || (inp == NULL)) {
1715 			break;
1716 		}
1717 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1718 			/* no need to unlock on tcb its gone */
1719 			goto out_decr;
1720 		}
1721 		SCTP_STAT_INCR(sctps_timoshutdownack);
1722 		stcb->asoc.timoshutdownack++;
1723 #ifdef SCTP_AUDITING_ENABLED
1724 		sctp_auditing(4, inp, stcb, net);
1725 #endif
1726 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1727 		break;
1728 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1729 		if ((stcb == NULL) || (inp == NULL)) {
1730 			break;
1731 		}
1732 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1733 		sctp_abort_an_association(inp, stcb,
1734 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1735 		/* no need to unlock on tcb its gone */
1736 		goto out_decr;
1737 
1738 	case SCTP_TIMER_TYPE_STRRESET:
1739 		if ((stcb == NULL) || (inp == NULL)) {
1740 			break;
1741 		}
1742 		if (sctp_strreset_timer(inp, stcb, net)) {
1743 			/* no need to unlock on tcb its gone */
1744 			goto out_decr;
1745 		}
1746 		SCTP_STAT_INCR(sctps_timostrmrst);
1747 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1748 		break;
1749 	case SCTP_TIMER_TYPE_EARLYFR:
1750 		/* Need to do FR of things for net */
1751 		if ((stcb == NULL) || (inp == NULL)) {
1752 			break;
1753 		}
1754 		SCTP_STAT_INCR(sctps_timoearlyfr);
1755 		sctp_early_fr_timer(inp, stcb, net);
1756 		break;
1757 	case SCTP_TIMER_TYPE_ASCONF:
1758 		if ((stcb == NULL) || (inp == NULL)) {
1759 			break;
1760 		}
1761 		if (sctp_asconf_timer(inp, stcb, net)) {
1762 			/* no need to unlock on tcb its gone */
1763 			goto out_decr;
1764 		}
1765 		SCTP_STAT_INCR(sctps_timoasconf);
1766 #ifdef SCTP_AUDITING_ENABLED
1767 		sctp_auditing(4, inp, stcb, net);
1768 #endif
1769 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1770 		break;
1771 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1772 		if ((stcb == NULL) || (inp == NULL)) {
1773 			break;
1774 		}
1775 		sctp_delete_prim_timer(inp, stcb, net);
1776 		SCTP_STAT_INCR(sctps_timodelprim);
1777 		break;
1778 
1779 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1780 		if ((stcb == NULL) || (inp == NULL)) {
1781 			break;
1782 		}
1783 		SCTP_STAT_INCR(sctps_timoautoclose);
1784 		sctp_autoclose_timer(inp, stcb, net);
1785 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1786 		did_output = 0;
1787 		break;
1788 	case SCTP_TIMER_TYPE_ASOCKILL:
1789 		if ((stcb == NULL) || (inp == NULL)) {
1790 			break;
1791 		}
1792 		SCTP_STAT_INCR(sctps_timoassockill);
1793 		/* Can we free it yet? */
1794 		SCTP_INP_DECR_REF(inp);
1795 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1796 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1797 		so = SCTP_INP_SO(inp);
1798 		atomic_add_int(&stcb->asoc.refcnt, 1);
1799 		SCTP_TCB_UNLOCK(stcb);
1800 		SCTP_SOCKET_LOCK(so, 1);
1801 		SCTP_TCB_LOCK(stcb);
1802 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1803 #endif
1804 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1805 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1806 		SCTP_SOCKET_UNLOCK(so, 1);
1807 #endif
1808 		/*
1809 		 * free asoc, always unlocks (or destroy's) so prevent
1810 		 * duplicate unlock or unlock of a free mtx :-0
1811 		 */
1812 		stcb = NULL;
1813 		goto out_no_decr;
1814 	case SCTP_TIMER_TYPE_INPKILL:
1815 		SCTP_STAT_INCR(sctps_timoinpkill);
1816 		if (inp == NULL) {
1817 			break;
1818 		}
1819 		/*
1820 		 * special case, take away our increment since WE are the
1821 		 * killer
1822 		 */
1823 		SCTP_INP_DECR_REF(inp);
1824 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1825 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1826 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1827 		goto out_no_decr;
1828 	default:
1829 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1830 		    tmr->type);
1831 		break;
1832 	};
1833 #ifdef SCTP_AUDITING_ENABLED
1834 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1835 	if (inp)
1836 		sctp_auditing(5, inp, stcb, net);
1837 #endif
1838 	if ((did_output) && stcb) {
1839 		/*
1840 		 * Now we need to clean up the control chunk chain if an
1841 		 * ECNE is on it. It must be marked as UNSENT again so next
1842 		 * call will continue to send it until such time that we get
1843 		 * a CWR, to remove it. It is, however, less likely that we
1844 		 * will find a ecn echo on the chain though.
1845 		 */
1846 		sctp_fix_ecn_echo(&stcb->asoc);
1847 	}
1848 get_out:
1849 	if (stcb) {
1850 		SCTP_TCB_UNLOCK(stcb);
1851 	}
1852 out_decr:
1853 	if (inp) {
1854 		SCTP_INP_DECR_REF(inp);
1855 	}
1856 out_no_decr:
1857 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1858 	    tmr->type);
1859 	if (inp) {
1860 	}
1861 }
1862 
1863 void
1864 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1865     struct sctp_nets *net)
1866 {
1867 	int to_ticks;
1868 	struct sctp_timer *tmr;
1869 
1870 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1871 		return;
1872 
1873 	to_ticks = 0;
1874 
1875 	tmr = NULL;
1876 	if (stcb) {
1877 		SCTP_TCB_LOCK_ASSERT(stcb);
1878 	}
1879 	switch (t_type) {
1880 	case SCTP_TIMER_TYPE_ZERO_COPY:
1881 		tmr = &inp->sctp_ep.zero_copy_timer;
1882 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1883 		break;
1884 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1885 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1886 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1887 		break;
1888 	case SCTP_TIMER_TYPE_ADDR_WQ:
1889 		/* Only 1 tick away :-) */
1890 		tmr = &sctppcbinfo.addr_wq_timer;
1891 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1892 		break;
1893 	case SCTP_TIMER_TYPE_ITERATOR:
1894 		{
1895 			struct sctp_iterator *it;
1896 
1897 			it = (struct sctp_iterator *)inp;
1898 			tmr = &it->tmr;
1899 			to_ticks = SCTP_ITERATOR_TICKS;
1900 		}
1901 		break;
1902 	case SCTP_TIMER_TYPE_SEND:
1903 		/* Here we use the RTO timer */
1904 		{
1905 			int rto_val;
1906 
1907 			if ((stcb == NULL) || (net == NULL)) {
1908 				return;
1909 			}
1910 			tmr = &net->rxt_timer;
1911 			if (net->RTO == 0) {
1912 				rto_val = stcb->asoc.initial_rto;
1913 			} else {
1914 				rto_val = net->RTO;
1915 			}
1916 			to_ticks = MSEC_TO_TICKS(rto_val);
1917 		}
1918 		break;
1919 	case SCTP_TIMER_TYPE_INIT:
1920 		/*
1921 		 * Here we use the INIT timer default usually about 1
1922 		 * minute.
1923 		 */
1924 		if ((stcb == NULL) || (net == NULL)) {
1925 			return;
1926 		}
1927 		tmr = &net->rxt_timer;
1928 		if (net->RTO == 0) {
1929 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1930 		} else {
1931 			to_ticks = MSEC_TO_TICKS(net->RTO);
1932 		}
1933 		break;
1934 	case SCTP_TIMER_TYPE_RECV:
1935 		/*
1936 		 * Here we use the Delayed-Ack timer value from the inp
1937 		 * ususually about 200ms.
1938 		 */
1939 		if (stcb == NULL) {
1940 			return;
1941 		}
1942 		tmr = &stcb->asoc.dack_timer;
1943 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1944 		break;
1945 	case SCTP_TIMER_TYPE_SHUTDOWN:
1946 		/* Here we use the RTO of the destination. */
1947 		if ((stcb == NULL) || (net == NULL)) {
1948 			return;
1949 		}
1950 		if (net->RTO == 0) {
1951 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1952 		} else {
1953 			to_ticks = MSEC_TO_TICKS(net->RTO);
1954 		}
1955 		tmr = &net->rxt_timer;
1956 		break;
1957 	case SCTP_TIMER_TYPE_HEARTBEAT:
1958 		/*
1959 		 * the net is used here so that we can add in the RTO. Even
1960 		 * though we use a different timer. We also add the HB timer
1961 		 * PLUS a random jitter.
1962 		 */
1963 		if ((inp == NULL) || (stcb == NULL)) {
1964 			return;
1965 		} else {
1966 			uint32_t rndval;
1967 			uint8_t this_random;
1968 			int cnt_of_unconf = 0;
1969 			struct sctp_nets *lnet;
1970 
1971 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1972 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1973 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1974 					cnt_of_unconf++;
1975 				}
1976 			}
1977 			if (cnt_of_unconf) {
1978 				net = lnet = NULL;
1979 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
1980 			}
1981 			if (stcb->asoc.hb_random_idx > 3) {
1982 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1983 				memcpy(stcb->asoc.hb_random_values, &rndval,
1984 				    sizeof(stcb->asoc.hb_random_values));
1985 				stcb->asoc.hb_random_idx = 0;
1986 			}
1987 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1988 			stcb->asoc.hb_random_idx++;
1989 			stcb->asoc.hb_ect_randombit = 0;
1990 			/*
1991 			 * this_random will be 0 - 256 ms RTO is in ms.
1992 			 */
1993 			if ((stcb->asoc.hb_is_disabled) &&
1994 			    (cnt_of_unconf == 0)) {
1995 				return;
1996 			}
1997 			if (net) {
1998 				int delay;
1999 
2000 				delay = stcb->asoc.heart_beat_delay;
2001 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2002 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2003 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2004 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2005 						delay = 0;
2006 					}
2007 				}
2008 				if (net->RTO == 0) {
2009 					/* Never been checked */
2010 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2011 				} else {
2012 					/* set rto_val to the ms */
2013 					to_ticks = delay + net->RTO + this_random;
2014 				}
2015 			} else {
2016 				if (cnt_of_unconf) {
2017 					to_ticks = this_random + stcb->asoc.initial_rto;
2018 				} else {
2019 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2020 				}
2021 			}
2022 			/*
2023 			 * Now we must convert the to_ticks that are now in
2024 			 * ms to ticks.
2025 			 */
2026 			to_ticks = MSEC_TO_TICKS(to_ticks);
2027 			tmr = &stcb->asoc.hb_timer;
2028 		}
2029 		break;
2030 	case SCTP_TIMER_TYPE_COOKIE:
2031 		/*
2032 		 * Here we can use the RTO timer from the network since one
2033 		 * RTT was compelete. If a retran happened then we will be
2034 		 * using the RTO initial value.
2035 		 */
2036 		if ((stcb == NULL) || (net == NULL)) {
2037 			return;
2038 		}
2039 		if (net->RTO == 0) {
2040 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2041 		} else {
2042 			to_ticks = MSEC_TO_TICKS(net->RTO);
2043 		}
2044 		tmr = &net->rxt_timer;
2045 		break;
2046 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2047 		/*
2048 		 * nothing needed but the endpoint here ususually about 60
2049 		 * minutes.
2050 		 */
2051 		if (inp == NULL) {
2052 			return;
2053 		}
2054 		tmr = &inp->sctp_ep.signature_change;
2055 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2056 		break;
2057 	case SCTP_TIMER_TYPE_ASOCKILL:
2058 		if (stcb == NULL) {
2059 			return;
2060 		}
2061 		tmr = &stcb->asoc.strreset_timer;
2062 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2063 		break;
2064 	case SCTP_TIMER_TYPE_INPKILL:
2065 		/*
2066 		 * The inp is setup to die. We re-use the signature_chage
2067 		 * timer since that has stopped and we are in the GONE
2068 		 * state.
2069 		 */
2070 		if (inp == NULL) {
2071 			return;
2072 		}
2073 		tmr = &inp->sctp_ep.signature_change;
2074 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2075 		break;
2076 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2077 		/*
2078 		 * Here we use the value found in the EP for PMTU ususually
2079 		 * about 10 minutes.
2080 		 */
2081 		if ((stcb == NULL) || (inp == NULL)) {
2082 			return;
2083 		}
2084 		if (net == NULL) {
2085 			return;
2086 		}
2087 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2088 		tmr = &net->pmtu_timer;
2089 		break;
2090 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2091 		/* Here we use the RTO of the destination */
2092 		if ((stcb == NULL) || (net == NULL)) {
2093 			return;
2094 		}
2095 		if (net->RTO == 0) {
2096 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2097 		} else {
2098 			to_ticks = MSEC_TO_TICKS(net->RTO);
2099 		}
2100 		tmr = &net->rxt_timer;
2101 		break;
2102 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2103 		/*
2104 		 * Here we use the endpoints shutdown guard timer usually
2105 		 * about 3 minutes.
2106 		 */
2107 		if ((inp == NULL) || (stcb == NULL)) {
2108 			return;
2109 		}
2110 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2111 		tmr = &stcb->asoc.shut_guard_timer;
2112 		break;
2113 	case SCTP_TIMER_TYPE_STRRESET:
2114 		/*
2115 		 * Here the timer comes from the stcb but its value is from
2116 		 * the net's RTO.
2117 		 */
2118 		if ((stcb == NULL) || (net == NULL)) {
2119 			return;
2120 		}
2121 		if (net->RTO == 0) {
2122 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2123 		} else {
2124 			to_ticks = MSEC_TO_TICKS(net->RTO);
2125 		}
2126 		tmr = &stcb->asoc.strreset_timer;
2127 		break;
2128 
2129 	case SCTP_TIMER_TYPE_EARLYFR:
2130 		{
2131 			unsigned int msec;
2132 
2133 			if ((stcb == NULL) || (net == NULL)) {
2134 				return;
2135 			}
2136 			if (net->flight_size > net->cwnd) {
2137 				/* no need to start */
2138 				return;
2139 			}
2140 			SCTP_STAT_INCR(sctps_earlyfrstart);
2141 			if (net->lastsa == 0) {
2142 				/* Hmm no rtt estimate yet? */
2143 				msec = stcb->asoc.initial_rto >> 2;
2144 			} else {
2145 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2146 			}
2147 			if (msec < sctp_early_fr_msec) {
2148 				msec = sctp_early_fr_msec;
2149 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2150 					msec = SCTP_MINFR_MSEC_FLOOR;
2151 				}
2152 			}
2153 			to_ticks = MSEC_TO_TICKS(msec);
2154 			tmr = &net->fr_timer;
2155 		}
2156 		break;
2157 	case SCTP_TIMER_TYPE_ASCONF:
2158 		/*
2159 		 * Here the timer comes from the stcb but its value is from
2160 		 * the net's RTO.
2161 		 */
2162 		if ((stcb == NULL) || (net == NULL)) {
2163 			return;
2164 		}
2165 		if (net->RTO == 0) {
2166 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2167 		} else {
2168 			to_ticks = MSEC_TO_TICKS(net->RTO);
2169 		}
2170 		tmr = &stcb->asoc.asconf_timer;
2171 		break;
2172 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2173 		if ((stcb == NULL) || (net != NULL)) {
2174 			return;
2175 		}
2176 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2177 		tmr = &stcb->asoc.delete_prim_timer;
2178 		break;
2179 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2180 		if (stcb == NULL) {
2181 			return;
2182 		}
2183 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2184 			/*
2185 			 * Really an error since stcb is NOT set to
2186 			 * autoclose
2187 			 */
2188 			return;
2189 		}
2190 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2191 		tmr = &stcb->asoc.autoclose_timer;
2192 		break;
2193 	default:
2194 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2195 		    __FUNCTION__, t_type);
2196 		return;
2197 		break;
2198 	};
2199 	if ((to_ticks <= 0) || (tmr == NULL)) {
2200 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2201 		    __FUNCTION__, t_type, to_ticks, tmr);
2202 		return;
2203 	}
2204 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2205 		/*
2206 		 * we do NOT allow you to have it already running. if it is
2207 		 * we leave the current one up unchanged
2208 		 */
2209 		return;
2210 	}
2211 	/* At this point we can proceed */
2212 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2213 		stcb->asoc.num_send_timers_up++;
2214 	}
2215 	tmr->stopped_from = 0;
2216 	tmr->type = t_type;
2217 	tmr->ep = (void *)inp;
2218 	tmr->tcb = (void *)stcb;
2219 	tmr->net = (void *)net;
2220 	tmr->self = (void *)tmr;
2221 	tmr->ticks = sctp_get_tick_count();
2222 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2223 	return;
2224 }
2225 
2226 void
2227 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2228     struct sctp_nets *net, uint32_t from)
2229 {
2230 	struct sctp_timer *tmr;
2231 
2232 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2233 	    (inp == NULL))
2234 		return;
2235 
2236 	tmr = NULL;
2237 	if (stcb) {
2238 		SCTP_TCB_LOCK_ASSERT(stcb);
2239 	}
2240 	switch (t_type) {
2241 	case SCTP_TIMER_TYPE_ZERO_COPY:
2242 		tmr = &inp->sctp_ep.zero_copy_timer;
2243 		break;
2244 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2245 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2246 		break;
2247 	case SCTP_TIMER_TYPE_ADDR_WQ:
2248 		tmr = &sctppcbinfo.addr_wq_timer;
2249 		break;
2250 	case SCTP_TIMER_TYPE_EARLYFR:
2251 		if ((stcb == NULL) || (net == NULL)) {
2252 			return;
2253 		}
2254 		tmr = &net->fr_timer;
2255 		SCTP_STAT_INCR(sctps_earlyfrstop);
2256 		break;
2257 	case SCTP_TIMER_TYPE_ITERATOR:
2258 		{
2259 			struct sctp_iterator *it;
2260 
2261 			it = (struct sctp_iterator *)inp;
2262 			tmr = &it->tmr;
2263 		}
2264 		break;
2265 	case SCTP_TIMER_TYPE_SEND:
2266 		if ((stcb == NULL) || (net == NULL)) {
2267 			return;
2268 		}
2269 		tmr = &net->rxt_timer;
2270 		break;
2271 	case SCTP_TIMER_TYPE_INIT:
2272 		if ((stcb == NULL) || (net == NULL)) {
2273 			return;
2274 		}
2275 		tmr = &net->rxt_timer;
2276 		break;
2277 	case SCTP_TIMER_TYPE_RECV:
2278 		if (stcb == NULL) {
2279 			return;
2280 		}
2281 		tmr = &stcb->asoc.dack_timer;
2282 		break;
2283 	case SCTP_TIMER_TYPE_SHUTDOWN:
2284 		if ((stcb == NULL) || (net == NULL)) {
2285 			return;
2286 		}
2287 		tmr = &net->rxt_timer;
2288 		break;
2289 	case SCTP_TIMER_TYPE_HEARTBEAT:
2290 		if (stcb == NULL) {
2291 			return;
2292 		}
2293 		tmr = &stcb->asoc.hb_timer;
2294 		break;
2295 	case SCTP_TIMER_TYPE_COOKIE:
2296 		if ((stcb == NULL) || (net == NULL)) {
2297 			return;
2298 		}
2299 		tmr = &net->rxt_timer;
2300 		break;
2301 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2302 		/* nothing needed but the endpoint here */
2303 		tmr = &inp->sctp_ep.signature_change;
2304 		/*
2305 		 * We re-use the newcookie timer for the INP kill timer. We
2306 		 * must assure that we do not kill it by accident.
2307 		 */
2308 		break;
2309 	case SCTP_TIMER_TYPE_ASOCKILL:
2310 		/*
2311 		 * Stop the asoc kill timer.
2312 		 */
2313 		if (stcb == NULL) {
2314 			return;
2315 		}
2316 		tmr = &stcb->asoc.strreset_timer;
2317 		break;
2318 
2319 	case SCTP_TIMER_TYPE_INPKILL:
2320 		/*
2321 		 * The inp is setup to die. We re-use the signature_chage
2322 		 * timer since that has stopped and we are in the GONE
2323 		 * state.
2324 		 */
2325 		tmr = &inp->sctp_ep.signature_change;
2326 		break;
2327 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2328 		if ((stcb == NULL) || (net == NULL)) {
2329 			return;
2330 		}
2331 		tmr = &net->pmtu_timer;
2332 		break;
2333 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2334 		if ((stcb == NULL) || (net == NULL)) {
2335 			return;
2336 		}
2337 		tmr = &net->rxt_timer;
2338 		break;
2339 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2340 		if (stcb == NULL) {
2341 			return;
2342 		}
2343 		tmr = &stcb->asoc.shut_guard_timer;
2344 		break;
2345 	case SCTP_TIMER_TYPE_STRRESET:
2346 		if (stcb == NULL) {
2347 			return;
2348 		}
2349 		tmr = &stcb->asoc.strreset_timer;
2350 		break;
2351 	case SCTP_TIMER_TYPE_ASCONF:
2352 		if (stcb == NULL) {
2353 			return;
2354 		}
2355 		tmr = &stcb->asoc.asconf_timer;
2356 		break;
2357 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2358 		if (stcb == NULL) {
2359 			return;
2360 		}
2361 		tmr = &stcb->asoc.delete_prim_timer;
2362 		break;
2363 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2364 		if (stcb == NULL) {
2365 			return;
2366 		}
2367 		tmr = &stcb->asoc.autoclose_timer;
2368 		break;
2369 	default:
2370 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2371 		    __FUNCTION__, t_type);
2372 		break;
2373 	};
2374 	if (tmr == NULL) {
2375 		return;
2376 	}
2377 	if ((tmr->type != t_type) && tmr->type) {
2378 		/*
2379 		 * Ok we have a timer that is under joint use. Cookie timer
2380 		 * per chance with the SEND timer. We therefore are NOT
2381 		 * running the timer that the caller wants stopped.  So just
2382 		 * return.
2383 		 */
2384 		return;
2385 	}
2386 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2387 		stcb->asoc.num_send_timers_up--;
2388 		if (stcb->asoc.num_send_timers_up < 0) {
2389 			stcb->asoc.num_send_timers_up = 0;
2390 		}
2391 	}
2392 	tmr->self = NULL;
2393 	tmr->stopped_from = from;
2394 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2395 	return;
2396 }
2397 
2398 #ifdef SCTP_USE_ADLER32
2399 static uint32_t
2400 update_adler32(uint32_t adler, uint8_t * buf, int32_t len)
2401 {
2402 	uint32_t s1 = adler & 0xffff;
2403 	uint32_t s2 = (adler >> 16) & 0xffff;
2404 	int n;
2405 
2406 	for (n = 0; n < len; n++, buf++) {
2407 		/* s1 = (s1 + buf[n]) % BASE */
2408 		/* first we add */
2409 		s1 = (s1 + *buf);
2410 		/*
2411 		 * now if we need to, we do a mod by subtracting. It seems a
2412 		 * bit faster since I really will only ever do one subtract
2413 		 * at the MOST, since buf[n] is a max of 255.
2414 		 */
2415 		if (s1 >= SCTP_ADLER32_BASE) {
2416 			s1 -= SCTP_ADLER32_BASE;
2417 		}
2418 		/* s2 = (s2 + s1) % BASE */
2419 		/* first we add */
2420 		s2 = (s2 + s1);
2421 		/*
2422 		 * again, it is more efficent (it seems) to subtract since
2423 		 * the most s2 will ever be is (BASE-1 + BASE-1) in the
2424 		 * worse case. This would then be (2 * BASE) - 2, which will
2425 		 * still only do one subtract. On Intel this is much better
2426 		 * to do this way and avoid the divide. Have not -pg'd on
2427 		 * sparc.
2428 		 */
2429 		if (s2 >= SCTP_ADLER32_BASE) {
2430 			s2 -= SCTP_ADLER32_BASE;
2431 		}
2432 	}
2433 	/* Return the adler32 of the bytes buf[0..len-1] */
2434 	return ((s2 << 16) + s1);
2435 }
2436 
2437 #endif
2438 
2439 
2440 uint32_t
2441 sctp_calculate_len(struct mbuf *m)
2442 {
2443 	uint32_t tlen = 0;
2444 	struct mbuf *at;
2445 
2446 	at = m;
2447 	while (at) {
2448 		tlen += SCTP_BUF_LEN(at);
2449 		at = SCTP_BUF_NEXT(at);
2450 	}
2451 	return (tlen);
2452 }
2453 
2454 #if defined(SCTP_WITH_NO_CSUM)
2455 
2456 uint32_t
2457 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2458 {
2459 	/*
2460 	 * given a mbuf chain with a packetheader offset by 'offset'
2461 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2462 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2463 	 * has a side bonus as it will calculate the total length of the
2464 	 * mbuf chain. Note: if offset is greater than the total mbuf
2465 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2466 	 */
2467 	if (pktlen == NULL)
2468 		return (0);
2469 	*pktlen = sctp_calculate_len(m);
2470 	return (0);
2471 }
2472 
2473 #elif defined(SCTP_USE_INCHKSUM)
2474 
2475 #include <machine/in_cksum.h>
2476 
2477 uint32_t
2478 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2479 {
2480 	/*
2481 	 * given a mbuf chain with a packetheader offset by 'offset'
2482 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2483 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2484 	 * has a side bonus as it will calculate the total length of the
2485 	 * mbuf chain. Note: if offset is greater than the total mbuf
2486 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2487 	 */
2488 	int32_t tlen = 0;
2489 	struct mbuf *at;
2490 	uint32_t the_sum, retsum;
2491 
2492 	at = m;
2493 	while (at) {
2494 		tlen += SCTP_BUF_LEN(at);
2495 		at = SCTP_BUF_NEXT(at);
2496 	}
2497 	the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset));
2498 	if (pktlen != NULL)
2499 		*pktlen = (tlen - offset);
2500 	retsum = htons(the_sum);
2501 	return (the_sum);
2502 }
2503 
2504 #else
2505 
2506 uint32_t
2507 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2508 {
2509 	/*
2510 	 * given a mbuf chain with a packetheader offset by 'offset'
2511 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2512 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2513 	 * has a side bonus as it will calculate the total length of the
2514 	 * mbuf chain. Note: if offset is greater than the total mbuf
2515 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2516 	 */
2517 	int32_t tlen = 0;
2518 
2519 #ifdef SCTP_USE_ADLER32
2520 	uint32_t base = 1L;
2521 
2522 #else
2523 	uint32_t base = 0xffffffff;
2524 
2525 #endif
2526 	struct mbuf *at;
2527 
2528 	at = m;
2529 	/* find the correct mbuf and offset into mbuf */
2530 	while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
2531 		offset -= SCTP_BUF_LEN(at);	/* update remaining offset
2532 						 * left */
2533 		at = SCTP_BUF_NEXT(at);
2534 	}
2535 	while (at != NULL) {
2536 		if ((SCTP_BUF_LEN(at) - offset) > 0) {
2537 #ifdef SCTP_USE_ADLER32
2538 			base = update_adler32(base,
2539 			    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2540 			    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2541 #else
2542 			if ((SCTP_BUF_LEN(at) - offset) < 4) {
2543 				/* Use old method if less than 4 bytes */
2544 				base = old_update_crc32(base,
2545 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2546 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2547 			} else {
2548 				base = update_crc32(base,
2549 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2550 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2551 			}
2552 #endif
2553 			tlen += SCTP_BUF_LEN(at) - offset;
2554 			/* we only offset once into the first mbuf */
2555 		}
2556 		if (offset) {
2557 			if (offset < (uint32_t) SCTP_BUF_LEN(at))
2558 				offset = 0;
2559 			else
2560 				offset -= SCTP_BUF_LEN(at);
2561 		}
2562 		at = SCTP_BUF_NEXT(at);
2563 	}
2564 	if (pktlen != NULL) {
2565 		*pktlen = tlen;
2566 	}
2567 #ifdef SCTP_USE_ADLER32
2568 	/* Adler32 */
2569 	base = htonl(base);
2570 #else
2571 	/* CRC-32c */
2572 	base = sctp_csum_finalize(base);
2573 #endif
2574 	return (base);
2575 }
2576 
2577 
2578 #endif
2579 
2580 void
2581 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2582     struct sctp_association *asoc, uint32_t mtu)
2583 {
2584 	/*
2585 	 * Reset the P-MTU size on this association, this involves changing
2586 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2587 	 * allow the DF flag to be cleared.
2588 	 */
2589 	struct sctp_tmit_chunk *chk;
2590 	unsigned int eff_mtu, ovh;
2591 
2592 #ifdef SCTP_PRINT_FOR_B_AND_M
2593 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2594 	    inp, asoc, mtu);
2595 #endif
2596 	asoc->smallest_mtu = mtu;
2597 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2598 		ovh = SCTP_MIN_OVERHEAD;
2599 	} else {
2600 		ovh = SCTP_MIN_V4_OVERHEAD;
2601 	}
2602 	eff_mtu = mtu - ovh;
2603 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2604 
2605 		if (chk->send_size > eff_mtu) {
2606 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2607 		}
2608 	}
2609 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2610 		if (chk->send_size > eff_mtu) {
2611 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2612 		}
2613 	}
2614 }
2615 
2616 
2617 /*
2618  * given an association and starting time of the current RTT period return
2619  * RTO in number of msecs net should point to the current network
2620  */
2621 uint32_t
2622 sctp_calculate_rto(struct sctp_tcb *stcb,
2623     struct sctp_association *asoc,
2624     struct sctp_nets *net,
2625     struct timeval *told,
2626     int safe)
2627 {
2628 	/*-
2629 	 * given an association and the starting time of the current RTT
2630 	 * period (in value1/value2) return RTO in number of msecs.
2631 	 */
2632 	int calc_time = 0;
2633 	int o_calctime;
2634 	uint32_t new_rto = 0;
2635 	int first_measure = 0;
2636 	struct timeval now, then, *old;
2637 
2638 	/* Copy it out for sparc64 */
2639 	if (safe == sctp_align_unsafe_makecopy) {
2640 		old = &then;
2641 		memcpy(&then, told, sizeof(struct timeval));
2642 	} else if (safe == sctp_align_safe_nocopy) {
2643 		old = told;
2644 	} else {
2645 		/* error */
2646 		SCTP_PRINTF("Huh, bad rto calc call\n");
2647 		return (0);
2648 	}
2649 	/************************/
2650 	/* 1. calculate new RTT */
2651 	/************************/
2652 	/* get the current time */
2653 	(void)SCTP_GETTIME_TIMEVAL(&now);
2654 	/* compute the RTT value */
2655 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2656 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2657 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2658 			calc_time += (((u_long)now.tv_usec -
2659 			    (u_long)old->tv_usec) / 1000);
2660 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2661 			/* Borrow 1,000ms from current calculation */
2662 			calc_time -= 1000;
2663 			/* Add in the slop over */
2664 			calc_time += ((int)now.tv_usec / 1000);
2665 			/* Add in the pre-second ms's */
2666 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2667 		}
2668 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2669 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2670 			calc_time = ((u_long)now.tv_usec -
2671 			    (u_long)old->tv_usec) / 1000;
2672 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2673 			/* impossible .. garbage in nothing out */
2674 			goto calc_rto;
2675 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2676 			/*
2677 			 * We have to have 1 usec :-D this must be the
2678 			 * loopback.
2679 			 */
2680 			calc_time = 1;
2681 		} else {
2682 			/* impossible .. garbage in nothing out */
2683 			goto calc_rto;
2684 		}
2685 	} else {
2686 		/* Clock wrapped? */
2687 		goto calc_rto;
2688 	}
2689 	/***************************/
2690 	/* 2. update RTTVAR & SRTT */
2691 	/***************************/
2692 	o_calctime = calc_time;
2693 	/* this is Van Jacobson's integer version */
2694 	if (net->RTO_measured) {
2695 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2696 								 * shift=3 */
2697 		if (sctp_logging_level & SCTP_RTTVAR_LOGGING_ENABLE) {
2698 			rto_logging(net, SCTP_LOG_RTTVAR);
2699 		}
2700 		net->prev_rtt = o_calctime;
2701 		net->lastsa += calc_time;	/* add 7/8th into sa when
2702 						 * shift=3 */
2703 		if (calc_time < 0) {
2704 			calc_time = -calc_time;
2705 		}
2706 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2707 									 * VAR shift=2 */
2708 		net->lastsv += calc_time;
2709 		if (net->lastsv == 0) {
2710 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2711 		}
2712 	} else {
2713 		/* First RTO measurment */
2714 		net->RTO_measured = 1;
2715 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2716 								 * shift=3 */
2717 		net->lastsv = calc_time;
2718 		if (net->lastsv == 0) {
2719 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2720 		}
2721 		first_measure = 1;
2722 		net->prev_rtt = o_calctime;
2723 		if (sctp_logging_level & SCTP_RTTVAR_LOGGING_ENABLE) {
2724 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2725 		}
2726 	}
2727 calc_rto:
2728 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2729 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2730 	    (stcb->asoc.sat_network_lockout == 0)) {
2731 		stcb->asoc.sat_network = 1;
2732 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2733 		stcb->asoc.sat_network = 0;
2734 		stcb->asoc.sat_network_lockout = 1;
2735 	}
2736 	/* bound it, per C6/C7 in Section 5.3.1 */
2737 	if (new_rto < stcb->asoc.minrto) {
2738 		new_rto = stcb->asoc.minrto;
2739 	}
2740 	if (new_rto > stcb->asoc.maxrto) {
2741 		new_rto = stcb->asoc.maxrto;
2742 	}
2743 	/* we are now returning the RTO */
2744 	return (new_rto);
2745 }
2746 
2747 /*
2748  * return a pointer to a contiguous piece of data from the given mbuf chain
2749  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2750  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2751  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2752  */
2753 caddr_t
2754 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2755 {
2756 	uint32_t count;
2757 	uint8_t *ptr;
2758 
2759 	ptr = in_ptr;
2760 	if ((off < 0) || (len <= 0))
2761 		return (NULL);
2762 
2763 	/* find the desired start location */
2764 	while ((m != NULL) && (off > 0)) {
2765 		if (off < SCTP_BUF_LEN(m))
2766 			break;
2767 		off -= SCTP_BUF_LEN(m);
2768 		m = SCTP_BUF_NEXT(m);
2769 	}
2770 	if (m == NULL)
2771 		return (NULL);
2772 
2773 	/* is the current mbuf large enough (eg. contiguous)? */
2774 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2775 		return (mtod(m, caddr_t)+off);
2776 	} else {
2777 		/* else, it spans more than one mbuf, so save a temp copy... */
2778 		while ((m != NULL) && (len > 0)) {
2779 			count = min(SCTP_BUF_LEN(m) - off, len);
2780 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2781 			len -= count;
2782 			ptr += count;
2783 			off = 0;
2784 			m = SCTP_BUF_NEXT(m);
2785 		}
2786 		if ((m == NULL) && (len > 0))
2787 			return (NULL);
2788 		else
2789 			return ((caddr_t)in_ptr);
2790 	}
2791 }
2792 
2793 
2794 
2795 struct sctp_paramhdr *
2796 sctp_get_next_param(struct mbuf *m,
2797     int offset,
2798     struct sctp_paramhdr *pull,
2799     int pull_limit)
2800 {
2801 	/* This just provides a typed signature to Peter's Pull routine */
2802 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2803 	    (uint8_t *) pull));
2804 }
2805 
2806 
2807 int
2808 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2809 {
2810 	/*
2811 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2812 	 * padlen is > 3 this routine will fail.
2813 	 */
2814 	uint8_t *dp;
2815 	int i;
2816 
2817 	if (padlen > 3) {
2818 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2819 		return (ENOBUFS);
2820 	}
2821 	if (M_TRAILINGSPACE(m)) {
2822 		/*
2823 		 * The easy way. We hope the majority of the time we hit
2824 		 * here :)
2825 		 */
2826 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2827 		SCTP_BUF_LEN(m) += padlen;
2828 	} else {
2829 		/* Hard way we must grow the mbuf */
2830 		struct mbuf *tmp;
2831 
2832 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2833 		if (tmp == NULL) {
2834 			/* Out of space GAK! we are in big trouble. */
2835 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2836 			return (ENOSPC);
2837 		}
2838 		/* setup and insert in middle */
2839 		SCTP_BUF_NEXT(tmp) = SCTP_BUF_NEXT(m);
2840 		SCTP_BUF_LEN(tmp) = padlen;
2841 		SCTP_BUF_NEXT(m) = tmp;
2842 		dp = mtod(tmp, uint8_t *);
2843 	}
2844 	/* zero out the pad */
2845 	for (i = 0; i < padlen; i++) {
2846 		*dp = 0;
2847 		dp++;
2848 	}
2849 	return (0);
2850 }
2851 
2852 int
2853 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2854 {
2855 	/* find the last mbuf in chain and pad it */
2856 	struct mbuf *m_at;
2857 
2858 	m_at = m;
2859 	if (last_mbuf) {
2860 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2861 	} else {
2862 		while (m_at) {
2863 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2864 				return (sctp_add_pad_tombuf(m_at, padval));
2865 			}
2866 			m_at = SCTP_BUF_NEXT(m_at);
2867 		}
2868 	}
2869 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2870 	return (EFAULT);
2871 }
2872 
2873 int sctp_asoc_change_wake = 0;
2874 
2875 static void
2876 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2877     uint32_t error, void *data, int so_locked
2878 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2879     SCTP_UNUSED
2880 #endif
2881 )
2882 {
2883 	struct mbuf *m_notify;
2884 	struct sctp_assoc_change *sac;
2885 	struct sctp_queued_to_read *control;
2886 
2887 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2888 	struct socket *so;
2889 
2890 #endif
2891 
2892 	/*
2893 	 * First if we are are going down dump everything we can to the
2894 	 * socket rcv queue.
2895 	 */
2896 
2897 	if ((stcb == NULL) ||
2898 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2899 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2900 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
2901 	    ) {
2902 		/* If the socket is gone we are out of here */
2903 		return;
2904 	}
2905 	/*
2906 	 * For TCP model AND UDP connected sockets we will send an error up
2907 	 * when an ABORT comes in.
2908 	 */
2909 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2910 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2911 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2912 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2913 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2914 			stcb->sctp_socket->so_error = ECONNREFUSED;
2915 		} else {
2916 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2917 			stcb->sctp_socket->so_error = ECONNRESET;
2918 		}
2919 		/* Wake ANY sleepers */
2920 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2921 		so = SCTP_INP_SO(stcb->sctp_ep);
2922 		if (!so_locked) {
2923 			atomic_add_int(&stcb->asoc.refcnt, 1);
2924 			SCTP_TCB_UNLOCK(stcb);
2925 			SCTP_SOCKET_LOCK(so, 1);
2926 			SCTP_TCB_LOCK(stcb);
2927 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2928 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2929 				SCTP_SOCKET_UNLOCK(so, 1);
2930 				return;
2931 			}
2932 		}
2933 #endif
2934 		sorwakeup(stcb->sctp_socket);
2935 		sowwakeup(stcb->sctp_socket);
2936 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2937 		if (!so_locked) {
2938 			SCTP_SOCKET_UNLOCK(so, 1);
2939 		}
2940 #endif
2941 		sctp_asoc_change_wake++;
2942 	}
2943 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2944 		/* event not enabled */
2945 		return;
2946 	}
2947 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2948 	if (m_notify == NULL)
2949 		/* no space left */
2950 		return;
2951 	SCTP_BUF_LEN(m_notify) = 0;
2952 
2953 	sac = mtod(m_notify, struct sctp_assoc_change *);
2954 	sac->sac_type = SCTP_ASSOC_CHANGE;
2955 	sac->sac_flags = 0;
2956 	sac->sac_length = sizeof(struct sctp_assoc_change);
2957 	sac->sac_state = event;
2958 	sac->sac_error = error;
2959 	/* XXX verify these stream counts */
2960 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2961 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2962 	sac->sac_assoc_id = sctp_get_associd(stcb);
2963 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2964 	SCTP_BUF_NEXT(m_notify) = NULL;
2965 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2966 	    0, 0, 0, 0, 0, 0,
2967 	    m_notify);
2968 	if (control == NULL) {
2969 		/* no memory */
2970 		sctp_m_freem(m_notify);
2971 		return;
2972 	}
2973 	control->length = SCTP_BUF_LEN(m_notify);
2974 	/* not that we need this */
2975 	control->tail_mbuf = m_notify;
2976 	control->spec_flags = M_NOTIFICATION;
2977 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2978 	    control,
2979 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
2980 	if (event == SCTP_COMM_LOST) {
2981 		/* Wake up any sleeper */
2982 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2983 		so = SCTP_INP_SO(stcb->sctp_ep);
2984 		if (!so_locked) {
2985 			atomic_add_int(&stcb->asoc.refcnt, 1);
2986 			SCTP_TCB_UNLOCK(stcb);
2987 			SCTP_SOCKET_LOCK(so, 1);
2988 			SCTP_TCB_LOCK(stcb);
2989 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2990 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2991 				SCTP_SOCKET_UNLOCK(so, 1);
2992 				return;
2993 			}
2994 		}
2995 #endif
2996 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2997 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2998 		if (!so_locked) {
2999 			SCTP_SOCKET_UNLOCK(so, 1);
3000 		}
3001 #endif
3002 	}
3003 }
3004 
3005 static void
3006 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3007     struct sockaddr *sa, uint32_t error)
3008 {
3009 	struct mbuf *m_notify;
3010 	struct sctp_paddr_change *spc;
3011 	struct sctp_queued_to_read *control;
3012 
3013 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)))
3014 		/* event not enabled */
3015 		return;
3016 
3017 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
3018 	if (m_notify == NULL)
3019 		return;
3020 	SCTP_BUF_LEN(m_notify) = 0;
3021 	spc = mtod(m_notify, struct sctp_paddr_change *);
3022 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3023 	spc->spc_flags = 0;
3024 	spc->spc_length = sizeof(struct sctp_paddr_change);
3025 	if (sa->sa_family == AF_INET) {
3026 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3027 	} else {
3028 		struct sockaddr_in6 *sin6;
3029 
3030 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3031 
3032 		sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3033 		if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3034 			if (sin6->sin6_scope_id == 0) {
3035 				/* recover scope_id for user */
3036 				(void)sa6_recoverscope(sin6);
3037 			} else {
3038 				/* clear embedded scope_id for user */
3039 				in6_clearscope(&sin6->sin6_addr);
3040 			}
3041 		}
3042 	}
3043 	spc->spc_state = state;
3044 	spc->spc_error = error;
3045 	spc->spc_assoc_id = sctp_get_associd(stcb);
3046 
3047 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3048 	SCTP_BUF_NEXT(m_notify) = NULL;
3049 
3050 	/* append to socket */
3051 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3052 	    0, 0, 0, 0, 0, 0,
3053 	    m_notify);
3054 	if (control == NULL) {
3055 		/* no memory */
3056 		sctp_m_freem(m_notify);
3057 		return;
3058 	}
3059 	control->length = SCTP_BUF_LEN(m_notify);
3060 	control->spec_flags = M_NOTIFICATION;
3061 	/* not that we need this */
3062 	control->tail_mbuf = m_notify;
3063 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3064 	    control,
3065 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3066 }
3067 
3068 
3069 static void
3070 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
3071     struct sctp_tmit_chunk *chk, int so_locked
3072 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3073     SCTP_UNUSED
3074 #endif
3075 )
3076 {
3077 	struct mbuf *m_notify;
3078 	struct sctp_send_failed *ssf;
3079 	struct sctp_queued_to_read *control;
3080 	int length;
3081 
3082 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3083 		/* event not enabled */
3084 		return;
3085 
3086 	length = sizeof(struct sctp_send_failed) + chk->send_size;
3087 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3088 	if (m_notify == NULL)
3089 		/* no space left */
3090 		return;
3091 	SCTP_BUF_LEN(m_notify) = 0;
3092 	ssf = mtod(m_notify, struct sctp_send_failed *);
3093 	ssf->ssf_type = SCTP_SEND_FAILED;
3094 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3095 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3096 	else
3097 		ssf->ssf_flags = SCTP_DATA_SENT;
3098 	ssf->ssf_length = length;
3099 	ssf->ssf_error = error;
3100 	/* not exactly what the user sent in, but should be close :) */
3101 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3102 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3103 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3104 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3105 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3106 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3107 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3108 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3109 	SCTP_BUF_NEXT(m_notify) = chk->data;
3110 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3111 
3112 	/* Steal off the mbuf */
3113 	chk->data = NULL;
3114 	/*
3115 	 * For this case, we check the actual socket buffer, since the assoc
3116 	 * is going away we don't want to overfill the socket buffer for a
3117 	 * non-reader
3118 	 */
3119 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3120 		sctp_m_freem(m_notify);
3121 		return;
3122 	}
3123 	/* append to socket */
3124 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3125 	    0, 0, 0, 0, 0, 0,
3126 	    m_notify);
3127 	if (control == NULL) {
3128 		/* no memory */
3129 		sctp_m_freem(m_notify);
3130 		return;
3131 	}
3132 	control->spec_flags = M_NOTIFICATION;
3133 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3134 	    control,
3135 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3136 }
3137 
3138 
3139 static void
3140 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3141     struct sctp_stream_queue_pending *sp, int so_locked
3142 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3143     SCTP_UNUSED
3144 #endif
3145 )
3146 {
3147 	struct mbuf *m_notify;
3148 	struct sctp_send_failed *ssf;
3149 	struct sctp_queued_to_read *control;
3150 	int length;
3151 
3152 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3153 		/* event not enabled */
3154 		return;
3155 
3156 	length = sizeof(struct sctp_send_failed) + sp->length;
3157 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3158 	if (m_notify == NULL)
3159 		/* no space left */
3160 		return;
3161 	SCTP_BUF_LEN(m_notify) = 0;
3162 	ssf = mtod(m_notify, struct sctp_send_failed *);
3163 	ssf->ssf_type = SCTP_SEND_FAILED;
3164 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3165 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3166 	else
3167 		ssf->ssf_flags = SCTP_DATA_SENT;
3168 	ssf->ssf_length = length;
3169 	ssf->ssf_error = error;
3170 	/* not exactly what the user sent in, but should be close :) */
3171 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3172 	ssf->ssf_info.sinfo_stream = sp->stream;
3173 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3174 	ssf->ssf_info.sinfo_flags = sp->sinfo_flags;
3175 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3176 	ssf->ssf_info.sinfo_context = sp->context;
3177 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3178 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3179 	SCTP_BUF_NEXT(m_notify) = sp->data;
3180 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3181 
3182 	/* Steal off the mbuf */
3183 	sp->data = NULL;
3184 	/*
3185 	 * For this case, we check the actual socket buffer, since the assoc
3186 	 * is going away we don't want to overfill the socket buffer for a
3187 	 * non-reader
3188 	 */
3189 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3190 		sctp_m_freem(m_notify);
3191 		return;
3192 	}
3193 	/* append to socket */
3194 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3195 	    0, 0, 0, 0, 0, 0,
3196 	    m_notify);
3197 	if (control == NULL) {
3198 		/* no memory */
3199 		sctp_m_freem(m_notify);
3200 		return;
3201 	}
3202 	control->spec_flags = M_NOTIFICATION;
3203 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3204 	    control,
3205 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3206 }
3207 
3208 
3209 
3210 static void
3211 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3212     uint32_t error)
3213 {
3214 	struct mbuf *m_notify;
3215 	struct sctp_adaptation_event *sai;
3216 	struct sctp_queued_to_read *control;
3217 
3218 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)))
3219 		/* event not enabled */
3220 		return;
3221 
3222 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3223 	if (m_notify == NULL)
3224 		/* no space left */
3225 		return;
3226 	SCTP_BUF_LEN(m_notify) = 0;
3227 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3228 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3229 	sai->sai_flags = 0;
3230 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3231 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3232 	sai->sai_assoc_id = sctp_get_associd(stcb);
3233 
3234 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3235 	SCTP_BUF_NEXT(m_notify) = NULL;
3236 
3237 	/* append to socket */
3238 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3239 	    0, 0, 0, 0, 0, 0,
3240 	    m_notify);
3241 	if (control == NULL) {
3242 		/* no memory */
3243 		sctp_m_freem(m_notify);
3244 		return;
3245 	}
3246 	control->length = SCTP_BUF_LEN(m_notify);
3247 	control->spec_flags = M_NOTIFICATION;
3248 	/* not that we need this */
3249 	control->tail_mbuf = m_notify;
3250 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3251 	    control,
3252 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3253 }
3254 
3255 /* This always must be called with the read-queue LOCKED in the INP */
3256 void
3257 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3258     int nolock, uint32_t val)
3259 {
3260 	struct mbuf *m_notify;
3261 	struct sctp_pdapi_event *pdapi;
3262 	struct sctp_queued_to_read *control;
3263 	struct sockbuf *sb;
3264 
3265 	if ((stcb == NULL) || (stcb->sctp_socket == NULL) ||
3266 	    sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
3267 		/* event not enabled */
3268 		return;
3269 
3270 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3271 	if (m_notify == NULL)
3272 		/* no space left */
3273 		return;
3274 	SCTP_BUF_LEN(m_notify) = 0;
3275 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3276 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3277 	pdapi->pdapi_flags = 0;
3278 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3279 	pdapi->pdapi_indication = error;
3280 	pdapi->pdapi_stream = (val >> 16);
3281 	pdapi->pdapi_seq = (val & 0x0000ffff);
3282 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3283 
3284 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3285 	SCTP_BUF_NEXT(m_notify) = NULL;
3286 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3287 	    0, 0, 0, 0, 0, 0,
3288 	    m_notify);
3289 	if (control == NULL) {
3290 		/* no memory */
3291 		sctp_m_freem(m_notify);
3292 		return;
3293 	}
3294 	control->spec_flags = M_NOTIFICATION;
3295 	control->length = SCTP_BUF_LEN(m_notify);
3296 	/* not that we need this */
3297 	control->tail_mbuf = m_notify;
3298 	control->held_length = 0;
3299 	control->length = 0;
3300 	if (nolock == 0) {
3301 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
3302 	}
3303 	sb = &stcb->sctp_socket->so_rcv;
3304 	if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
3305 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3306 	}
3307 	sctp_sballoc(stcb, sb, m_notify);
3308 	if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
3309 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3310 	}
3311 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3312 	control->end_added = 1;
3313 	if (stcb->asoc.control_pdapi)
3314 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3315 	else {
3316 		/* we really should not see this case */
3317 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3318 	}
3319 	if (nolock == 0) {
3320 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3321 	}
3322 	if (stcb->sctp_ep && stcb->sctp_socket) {
3323 		/* This should always be the case */
3324 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3325 	}
3326 }
3327 
3328 static void
3329 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3330 {
3331 	struct mbuf *m_notify;
3332 	struct sctp_shutdown_event *sse;
3333 	struct sctp_queued_to_read *control;
3334 
3335 	/*
3336 	 * For TCP model AND UDP connected sockets we will send an error up
3337 	 * when an SHUTDOWN completes
3338 	 */
3339 	if (stcb == NULL) {
3340 		return;
3341 	}
3342 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3343 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3344 		/* mark socket closed for read/write and wakeup! */
3345 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3346 		struct socket *so;
3347 
3348 		so = SCTP_INP_SO(stcb->sctp_ep);
3349 		atomic_add_int(&stcb->asoc.refcnt, 1);
3350 		SCTP_TCB_UNLOCK(stcb);
3351 		SCTP_SOCKET_LOCK(so, 1);
3352 		SCTP_TCB_LOCK(stcb);
3353 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3354 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3355 			SCTP_SOCKET_UNLOCK(so, 1);
3356 			return;
3357 		}
3358 #endif
3359 		socantsendmore(stcb->sctp_socket);
3360 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3361 		SCTP_SOCKET_UNLOCK(so, 1);
3362 #endif
3363 	}
3364 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
3365 		/* event not enabled */
3366 		return;
3367 
3368 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3369 	if (m_notify == NULL)
3370 		/* no space left */
3371 		return;
3372 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3373 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3374 	sse->sse_flags = 0;
3375 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3376 	sse->sse_assoc_id = sctp_get_associd(stcb);
3377 
3378 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3379 	SCTP_BUF_NEXT(m_notify) = NULL;
3380 
3381 	/* append to socket */
3382 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3383 	    0, 0, 0, 0, 0, 0,
3384 	    m_notify);
3385 	if (control == NULL) {
3386 		/* no memory */
3387 		sctp_m_freem(m_notify);
3388 		return;
3389 	}
3390 	control->spec_flags = M_NOTIFICATION;
3391 	control->length = SCTP_BUF_LEN(m_notify);
3392 	/* not that we need this */
3393 	control->tail_mbuf = m_notify;
3394 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3395 	    control,
3396 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3397 }
3398 
3399 static void
3400 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3401     int number_entries, uint16_t * list, int flag)
3402 {
3403 	struct mbuf *m_notify;
3404 	struct sctp_queued_to_read *control;
3405 	struct sctp_stream_reset_event *strreset;
3406 	int len;
3407 
3408 	if (stcb == NULL) {
3409 		return;
3410 	}
3411 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
3412 		/* event not enabled */
3413 		return;
3414 
3415 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3416 	if (m_notify == NULL)
3417 		/* no space left */
3418 		return;
3419 	SCTP_BUF_LEN(m_notify) = 0;
3420 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3421 	if (len > M_TRAILINGSPACE(m_notify)) {
3422 		/* never enough room */
3423 		sctp_m_freem(m_notify);
3424 		return;
3425 	}
3426 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3427 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3428 	if (number_entries == 0) {
3429 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3430 	} else {
3431 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3432 	}
3433 	strreset->strreset_length = len;
3434 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3435 	if (number_entries) {
3436 		int i;
3437 
3438 		for (i = 0; i < number_entries; i++) {
3439 			strreset->strreset_list[i] = ntohs(list[i]);
3440 		}
3441 	}
3442 	SCTP_BUF_LEN(m_notify) = len;
3443 	SCTP_BUF_NEXT(m_notify) = NULL;
3444 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3445 		/* no space */
3446 		sctp_m_freem(m_notify);
3447 		return;
3448 	}
3449 	/* append to socket */
3450 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3451 	    0, 0, 0, 0, 0, 0,
3452 	    m_notify);
3453 	if (control == NULL) {
3454 		/* no memory */
3455 		sctp_m_freem(m_notify);
3456 		return;
3457 	}
3458 	control->spec_flags = M_NOTIFICATION;
3459 	control->length = SCTP_BUF_LEN(m_notify);
3460 	/* not that we need this */
3461 	control->tail_mbuf = m_notify;
3462 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3463 	    control,
3464 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3465 }
3466 
3467 
3468 void
3469 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3470     uint32_t error, void *data, int so_locked
3471 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3472     SCTP_UNUSED
3473 #endif
3474 )
3475 {
3476 	if (stcb == NULL) {
3477 		/* unlikely but */
3478 		return;
3479 	}
3480 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3481 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3482 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
3483 	    ) {
3484 		/* No notifications up when we are in a no socket state */
3485 		return;
3486 	}
3487 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3488 		/* Can't send up to a closed socket any notifications */
3489 		return;
3490 	}
3491 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3492 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3493 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3494 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3495 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3496 			/* Don't report these in front states */
3497 			return;
3498 		}
3499 	}
3500 	switch (notification) {
3501 	case SCTP_NOTIFY_ASSOC_UP:
3502 		if (stcb->asoc.assoc_up_sent == 0) {
3503 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3504 			stcb->asoc.assoc_up_sent = 1;
3505 		}
3506 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3507 			sctp_notify_adaptation_layer(stcb, error);
3508 		}
3509 		break;
3510 	case SCTP_NOTIFY_ASSOC_DOWN:
3511 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3512 		break;
3513 	case SCTP_NOTIFY_INTERFACE_DOWN:
3514 		{
3515 			struct sctp_nets *net;
3516 
3517 			net = (struct sctp_nets *)data;
3518 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3519 			    (struct sockaddr *)&net->ro._l_addr, error);
3520 			break;
3521 		}
3522 	case SCTP_NOTIFY_INTERFACE_UP:
3523 		{
3524 			struct sctp_nets *net;
3525 
3526 			net = (struct sctp_nets *)data;
3527 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3528 			    (struct sockaddr *)&net->ro._l_addr, error);
3529 			break;
3530 		}
3531 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3532 		{
3533 			struct sctp_nets *net;
3534 
3535 			net = (struct sctp_nets *)data;
3536 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3537 			    (struct sockaddr *)&net->ro._l_addr, error);
3538 			break;
3539 		}
3540 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3541 		sctp_notify_send_failed2(stcb, error,
3542 		    (struct sctp_stream_queue_pending *)data, so_locked);
3543 		break;
3544 	case SCTP_NOTIFY_DG_FAIL:
3545 		sctp_notify_send_failed(stcb, error,
3546 		    (struct sctp_tmit_chunk *)data, so_locked);
3547 		break;
3548 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3549 		{
3550 			uint32_t val;
3551 
3552 			val = *((uint32_t *) data);
3553 
3554 			sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3555 		}
3556 		break;
3557 	case SCTP_NOTIFY_STRDATA_ERR:
3558 		break;
3559 	case SCTP_NOTIFY_ASSOC_ABORTED:
3560 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3561 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3562 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3563 		} else {
3564 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3565 		}
3566 		break;
3567 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3568 		break;
3569 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3570 		break;
3571 	case SCTP_NOTIFY_ASSOC_RESTART:
3572 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3573 		break;
3574 	case SCTP_NOTIFY_HB_RESP:
3575 		break;
3576 	case SCTP_NOTIFY_STR_RESET_SEND:
3577 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3578 		break;
3579 	case SCTP_NOTIFY_STR_RESET_RECV:
3580 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3581 		break;
3582 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3583 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3584 		break;
3585 
3586 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3587 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3588 		break;
3589 
3590 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3591 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3592 		    error);
3593 		break;
3594 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3595 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3596 		    error);
3597 		break;
3598 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3599 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3600 		    error);
3601 		break;
3602 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3603 		break;
3604 	case SCTP_NOTIFY_ASCONF_FAILED:
3605 		break;
3606 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3607 		sctp_notify_shutdown_event(stcb);
3608 		break;
3609 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3610 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3611 		    (uint16_t) (uintptr_t) data);
3612 		break;
3613 #if 0
3614 	case SCTP_NOTIFY_AUTH_KEY_CONFLICT:
3615 		sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT,
3616 		    error, (uint16_t) (uintptr_t) data);
3617 		break;
3618 #endif				/* not yet? remove? */
3619 
3620 
3621 	default:
3622 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3623 		    __FUNCTION__, notification, notification);
3624 		break;
3625 	}			/* end switch */
3626 }
3627 
3628 void
3629 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3630 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3631     SCTP_UNUSED
3632 #endif
3633 )
3634 {
3635 	struct sctp_association *asoc;
3636 	struct sctp_stream_out *outs;
3637 	struct sctp_tmit_chunk *chk;
3638 	struct sctp_stream_queue_pending *sp;
3639 	int i;
3640 
3641 	asoc = &stcb->asoc;
3642 
3643 	if (stcb == NULL) {
3644 		return;
3645 	}
3646 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3647 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3648 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3649 		return;
3650 	}
3651 	/* now through all the gunk freeing chunks */
3652 	if (holds_lock == 0) {
3653 		SCTP_TCB_SEND_LOCK(stcb);
3654 	}
3655 	/* sent queue SHOULD be empty */
3656 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3657 		chk = TAILQ_FIRST(&asoc->sent_queue);
3658 		while (chk) {
3659 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3660 			asoc->sent_queue_cnt--;
3661 			if (chk->data) {
3662 				/*
3663 				 * trim off the sctp chunk header(it should
3664 				 * be there)
3665 				 */
3666 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3667 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3668 					sctp_mbuf_crush(chk->data);
3669 					chk->send_size -= sizeof(struct sctp_data_chunk);
3670 				}
3671 			}
3672 			sctp_free_bufspace(stcb, asoc, chk, 1);
3673 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3674 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3675 			if (chk->data) {
3676 				sctp_m_freem(chk->data);
3677 				chk->data = NULL;
3678 			}
3679 			sctp_free_a_chunk(stcb, chk);
3680 			/* sa_ignore FREED_MEMORY */
3681 			chk = TAILQ_FIRST(&asoc->sent_queue);
3682 		}
3683 	}
3684 	/* pending send queue SHOULD be empty */
3685 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3686 		chk = TAILQ_FIRST(&asoc->send_queue);
3687 		while (chk) {
3688 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3689 			asoc->send_queue_cnt--;
3690 			if (chk->data) {
3691 				/*
3692 				 * trim off the sctp chunk header(it should
3693 				 * be there)
3694 				 */
3695 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3696 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3697 					sctp_mbuf_crush(chk->data);
3698 					chk->send_size -= sizeof(struct sctp_data_chunk);
3699 				}
3700 			}
3701 			sctp_free_bufspace(stcb, asoc, chk, 1);
3702 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3703 			if (chk->data) {
3704 				sctp_m_freem(chk->data);
3705 				chk->data = NULL;
3706 			}
3707 			sctp_free_a_chunk(stcb, chk);
3708 			/* sa_ignore FREED_MEMORY */
3709 			chk = TAILQ_FIRST(&asoc->send_queue);
3710 		}
3711 	}
3712 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3713 		/* For each stream */
3714 		outs = &stcb->asoc.strmout[i];
3715 		/* clean up any sends there */
3716 		stcb->asoc.locked_on_sending = NULL;
3717 		sp = TAILQ_FIRST(&outs->outqueue);
3718 		while (sp) {
3719 			stcb->asoc.stream_queue_cnt--;
3720 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3721 			sctp_free_spbufspace(stcb, asoc, sp);
3722 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3723 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3724 			if (sp->data) {
3725 				sctp_m_freem(sp->data);
3726 				sp->data = NULL;
3727 			}
3728 			if (sp->net)
3729 				sctp_free_remote_addr(sp->net);
3730 			sp->net = NULL;
3731 			/* Free the chunk */
3732 			sctp_free_a_strmoq(stcb, sp);
3733 			/* sa_ignore FREED_MEMORY */
3734 			sp = TAILQ_FIRST(&outs->outqueue);
3735 		}
3736 	}
3737 
3738 	if (holds_lock == 0) {
3739 		SCTP_TCB_SEND_UNLOCK(stcb);
3740 	}
3741 }
3742 
3743 void
3744 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3745 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3746     SCTP_UNUSED
3747 #endif
3748 )
3749 {
3750 
3751 	if (stcb == NULL) {
3752 		return;
3753 	}
3754 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3755 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3756 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3757 		return;
3758 	}
3759 	/* Tell them we lost the asoc */
3760 	sctp_report_all_outbound(stcb, 1, so_locked);
3761 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3762 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3763 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3764 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3765 	}
3766 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3767 }
3768 
3769 void
3770 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3771     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3772     uint32_t vrf_id)
3773 {
3774 	uint32_t vtag;
3775 
3776 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3777 	struct socket *so;
3778 
3779 #endif
3780 
3781 	vtag = 0;
3782 	if (stcb != NULL) {
3783 		/* We have a TCB to abort, send notification too */
3784 		vtag = stcb->asoc.peer_vtag;
3785 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3786 		/* get the assoc vrf id and table id */
3787 		vrf_id = stcb->asoc.vrf_id;
3788 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3789 	}
3790 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id);
3791 	if (stcb != NULL) {
3792 		/* Ok, now lets free it */
3793 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3794 		so = SCTP_INP_SO(inp);
3795 		atomic_add_int(&stcb->asoc.refcnt, 1);
3796 		SCTP_TCB_UNLOCK(stcb);
3797 		SCTP_SOCKET_LOCK(so, 1);
3798 		SCTP_TCB_LOCK(stcb);
3799 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3800 #endif
3801 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3802 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3803 		SCTP_SOCKET_UNLOCK(so, 1);
3804 #endif
3805 	} else {
3806 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3807 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3808 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3809 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3810 			}
3811 		}
3812 	}
3813 }
3814 
3815 #ifdef SCTP_ASOCLOG_OF_TSNS
3816 void
3817 sctp_print_out_track_log(struct sctp_tcb *stcb)
3818 {
3819 #ifdef NOSIY_PRINTS
3820 	int i;
3821 
3822 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3823 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3824 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3825 		SCTP_PRINTF("None rcvd\n");
3826 		goto none_in;
3827 	}
3828 	if (stcb->asoc.tsn_in_wrapped) {
3829 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3830 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3831 			    stcb->asoc.in_tsnlog[i].tsn,
3832 			    stcb->asoc.in_tsnlog[i].strm,
3833 			    stcb->asoc.in_tsnlog[i].seq,
3834 			    stcb->asoc.in_tsnlog[i].flgs,
3835 			    stcb->asoc.in_tsnlog[i].sz);
3836 		}
3837 	}
3838 	if (stcb->asoc.tsn_in_at) {
3839 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3840 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3841 			    stcb->asoc.in_tsnlog[i].tsn,
3842 			    stcb->asoc.in_tsnlog[i].strm,
3843 			    stcb->asoc.in_tsnlog[i].seq,
3844 			    stcb->asoc.in_tsnlog[i].flgs,
3845 			    stcb->asoc.in_tsnlog[i].sz);
3846 		}
3847 	}
3848 none_in:
3849 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3850 	if ((stcb->asoc.tsn_out_at == 0) &&
3851 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3852 		SCTP_PRINTF("None sent\n");
3853 	}
3854 	if (stcb->asoc.tsn_out_wrapped) {
3855 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3856 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3857 			    stcb->asoc.out_tsnlog[i].tsn,
3858 			    stcb->asoc.out_tsnlog[i].strm,
3859 			    stcb->asoc.out_tsnlog[i].seq,
3860 			    stcb->asoc.out_tsnlog[i].flgs,
3861 			    stcb->asoc.out_tsnlog[i].sz);
3862 		}
3863 	}
3864 	if (stcb->asoc.tsn_out_at) {
3865 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3866 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3867 			    stcb->asoc.out_tsnlog[i].tsn,
3868 			    stcb->asoc.out_tsnlog[i].strm,
3869 			    stcb->asoc.out_tsnlog[i].seq,
3870 			    stcb->asoc.out_tsnlog[i].flgs,
3871 			    stcb->asoc.out_tsnlog[i].sz);
3872 		}
3873 	}
3874 #endif
3875 }
3876 
3877 #endif
3878 
3879 void
3880 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3881     int error, struct mbuf *op_err,
3882     int so_locked
3883 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3884     SCTP_UNUSED
3885 #endif
3886 )
3887 {
3888 	uint32_t vtag;
3889 
3890 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3891 	struct socket *so;
3892 
3893 #endif
3894 
3895 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3896 	so = SCTP_INP_SO(inp);
3897 #endif
3898 	if (stcb == NULL) {
3899 		/* Got to have a TCB */
3900 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3901 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3902 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3903 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3904 			}
3905 		}
3906 		return;
3907 	} else {
3908 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3909 	}
3910 	vtag = stcb->asoc.peer_vtag;
3911 	/* notify the ulp */
3912 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3913 		sctp_abort_notification(stcb, error, so_locked);
3914 	/* notify the peer */
3915 #if defined(SCTP_PANIC_ON_ABORT)
3916 	panic("aborting an association");
3917 #endif
3918 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3919 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3920 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3921 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3922 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3923 	}
3924 	/* now free the asoc */
3925 #ifdef SCTP_ASOCLOG_OF_TSNS
3926 	sctp_print_out_track_log(stcb);
3927 #endif
3928 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3929 	if (!so_locked) {
3930 		atomic_add_int(&stcb->asoc.refcnt, 1);
3931 		SCTP_TCB_UNLOCK(stcb);
3932 		SCTP_SOCKET_LOCK(so, 1);
3933 		SCTP_TCB_LOCK(stcb);
3934 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3935 	}
3936 #endif
3937 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3938 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3939 	if (!so_locked) {
3940 		SCTP_SOCKET_UNLOCK(so, 1);
3941 	}
3942 #endif
3943 }
3944 
3945 void
3946 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3947     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id)
3948 {
3949 	struct sctp_chunkhdr *ch, chunk_buf;
3950 	unsigned int chk_length;
3951 
3952 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3953 	/* Generate a TO address for future reference */
3954 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3955 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3956 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3957 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3958 		}
3959 	}
3960 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3961 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3962 	while (ch != NULL) {
3963 		chk_length = ntohs(ch->chunk_length);
3964 		if (chk_length < sizeof(*ch)) {
3965 			/* break to abort land */
3966 			break;
3967 		}
3968 		switch (ch->chunk_type) {
3969 		case SCTP_COOKIE_ECHO:
3970 			/* We hit here only if the assoc is being freed */
3971 			return;
3972 		case SCTP_PACKET_DROPPED:
3973 			/* we don't respond to pkt-dropped */
3974 			return;
3975 		case SCTP_ABORT_ASSOCIATION:
3976 			/* we don't respond with an ABORT to an ABORT */
3977 			return;
3978 		case SCTP_SHUTDOWN_COMPLETE:
3979 			/*
3980 			 * we ignore it since we are not waiting for it and
3981 			 * peer is gone
3982 			 */
3983 			return;
3984 		case SCTP_SHUTDOWN_ACK:
3985 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id);
3986 			return;
3987 		default:
3988 			break;
3989 		}
3990 		offset += SCTP_SIZE32(chk_length);
3991 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3992 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3993 	}
3994 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id);
3995 }
3996 
3997 /*
3998  * check the inbound datagram to make sure there is not an abort inside it,
3999  * if there is return 1, else return 0.
4000  */
4001 int
4002 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4003 {
4004 	struct sctp_chunkhdr *ch;
4005 	struct sctp_init_chunk *init_chk, chunk_buf;
4006 	int offset;
4007 	unsigned int chk_length;
4008 
4009 	offset = iphlen + sizeof(struct sctphdr);
4010 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4011 	    (uint8_t *) & chunk_buf);
4012 	while (ch != NULL) {
4013 		chk_length = ntohs(ch->chunk_length);
4014 		if (chk_length < sizeof(*ch)) {
4015 			/* packet is probably corrupt */
4016 			break;
4017 		}
4018 		/* we seem to be ok, is it an abort? */
4019 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4020 			/* yep, tell them */
4021 			return (1);
4022 		}
4023 		if (ch->chunk_type == SCTP_INITIATION) {
4024 			/* need to update the Vtag */
4025 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4026 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4027 			if (init_chk != NULL) {
4028 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4029 			}
4030 		}
4031 		/* Nope, move to the next chunk */
4032 		offset += SCTP_SIZE32(chk_length);
4033 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4034 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4035 	}
4036 	return (0);
4037 }
4038 
4039 /*
4040  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4041  * set (i.e. it's 0) so, create this function to compare link local scopes
4042  */
4043 uint32_t
4044 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4045 {
4046 	struct sockaddr_in6 a, b;
4047 
4048 	/* save copies */
4049 	a = *addr1;
4050 	b = *addr2;
4051 
4052 	if (a.sin6_scope_id == 0)
4053 		if (sa6_recoverscope(&a)) {
4054 			/* can't get scope, so can't match */
4055 			return (0);
4056 		}
4057 	if (b.sin6_scope_id == 0)
4058 		if (sa6_recoverscope(&b)) {
4059 			/* can't get scope, so can't match */
4060 			return (0);
4061 		}
4062 	if (a.sin6_scope_id != b.sin6_scope_id)
4063 		return (0);
4064 
4065 	return (1);
4066 }
4067 
4068 /*
4069  * returns a sockaddr_in6 with embedded scope recovered and removed
4070  */
4071 struct sockaddr_in6 *
4072 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4073 {
4074 	/* check and strip embedded scope junk */
4075 	if (addr->sin6_family == AF_INET6) {
4076 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4077 			if (addr->sin6_scope_id == 0) {
4078 				*store = *addr;
4079 				if (!sa6_recoverscope(store)) {
4080 					/* use the recovered scope */
4081 					addr = store;
4082 				}
4083 			} else {
4084 				/* else, return the original "to" addr */
4085 				in6_clearscope(&addr->sin6_addr);
4086 			}
4087 		}
4088 	}
4089 	return (addr);
4090 }
4091 
4092 /*
4093  * are the two addresses the same?  currently a "scopeless" check returns: 1
4094  * if same, 0 if not
4095  */
4096 int
4097 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4098 {
4099 
4100 	/* must be valid */
4101 	if (sa1 == NULL || sa2 == NULL)
4102 		return (0);
4103 
4104 	/* must be the same family */
4105 	if (sa1->sa_family != sa2->sa_family)
4106 		return (0);
4107 
4108 	if (sa1->sa_family == AF_INET6) {
4109 		/* IPv6 addresses */
4110 		struct sockaddr_in6 *sin6_1, *sin6_2;
4111 
4112 		sin6_1 = (struct sockaddr_in6 *)sa1;
4113 		sin6_2 = (struct sockaddr_in6 *)sa2;
4114 		return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
4115 		    &sin6_2->sin6_addr));
4116 	} else if (sa1->sa_family == AF_INET) {
4117 		/* IPv4 addresses */
4118 		struct sockaddr_in *sin_1, *sin_2;
4119 
4120 		sin_1 = (struct sockaddr_in *)sa1;
4121 		sin_2 = (struct sockaddr_in *)sa2;
4122 		return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4123 	} else {
4124 		/* we don't do these... */
4125 		return (0);
4126 	}
4127 }
4128 
4129 void
4130 sctp_print_address(struct sockaddr *sa)
4131 {
4132 	char ip6buf[INET6_ADDRSTRLEN];
4133 
4134 	ip6buf[0] = 0;
4135 	if (sa->sa_family == AF_INET6) {
4136 		struct sockaddr_in6 *sin6;
4137 
4138 		sin6 = (struct sockaddr_in6 *)sa;
4139 		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4140 		    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4141 		    ntohs(sin6->sin6_port),
4142 		    sin6->sin6_scope_id);
4143 	} else if (sa->sa_family == AF_INET) {
4144 		struct sockaddr_in *sin;
4145 		unsigned char *p;
4146 
4147 		sin = (struct sockaddr_in *)sa;
4148 		p = (unsigned char *)&sin->sin_addr;
4149 		SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4150 		    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4151 	} else {
4152 		SCTP_PRINTF("?\n");
4153 	}
4154 }
4155 
4156 void
4157 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4158 {
4159 	if (iph->ip_v == IPVERSION) {
4160 		struct sockaddr_in lsa, fsa;
4161 
4162 		bzero(&lsa, sizeof(lsa));
4163 		lsa.sin_len = sizeof(lsa);
4164 		lsa.sin_family = AF_INET;
4165 		lsa.sin_addr = iph->ip_src;
4166 		lsa.sin_port = sh->src_port;
4167 		bzero(&fsa, sizeof(fsa));
4168 		fsa.sin_len = sizeof(fsa);
4169 		fsa.sin_family = AF_INET;
4170 		fsa.sin_addr = iph->ip_dst;
4171 		fsa.sin_port = sh->dest_port;
4172 		SCTP_PRINTF("src: ");
4173 		sctp_print_address((struct sockaddr *)&lsa);
4174 		SCTP_PRINTF("dest: ");
4175 		sctp_print_address((struct sockaddr *)&fsa);
4176 	} else if (iph->ip_v == (IPV6_VERSION >> 4)) {
4177 		struct ip6_hdr *ip6;
4178 		struct sockaddr_in6 lsa6, fsa6;
4179 
4180 		ip6 = (struct ip6_hdr *)iph;
4181 		bzero(&lsa6, sizeof(lsa6));
4182 		lsa6.sin6_len = sizeof(lsa6);
4183 		lsa6.sin6_family = AF_INET6;
4184 		lsa6.sin6_addr = ip6->ip6_src;
4185 		lsa6.sin6_port = sh->src_port;
4186 		bzero(&fsa6, sizeof(fsa6));
4187 		fsa6.sin6_len = sizeof(fsa6);
4188 		fsa6.sin6_family = AF_INET6;
4189 		fsa6.sin6_addr = ip6->ip6_dst;
4190 		fsa6.sin6_port = sh->dest_port;
4191 		SCTP_PRINTF("src: ");
4192 		sctp_print_address((struct sockaddr *)&lsa6);
4193 		SCTP_PRINTF("dest: ");
4194 		sctp_print_address((struct sockaddr *)&fsa6);
4195 	}
4196 }
4197 
4198 void
4199 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4200     struct sctp_inpcb *new_inp,
4201     struct sctp_tcb *stcb,
4202     int waitflags)
4203 {
4204 	/*
4205 	 * go through our old INP and pull off any control structures that
4206 	 * belong to stcb and move then to the new inp.
4207 	 */
4208 	struct socket *old_so, *new_so;
4209 	struct sctp_queued_to_read *control, *nctl;
4210 	struct sctp_readhead tmp_queue;
4211 	struct mbuf *m;
4212 	int error = 0;
4213 
4214 	old_so = old_inp->sctp_socket;
4215 	new_so = new_inp->sctp_socket;
4216 	TAILQ_INIT(&tmp_queue);
4217 	error = sblock(&old_so->so_rcv, waitflags);
4218 	if (error) {
4219 		/*
4220 		 * Gak, can't get sblock, we have a problem. data will be
4221 		 * left stranded.. and we don't dare look at it since the
4222 		 * other thread may be reading something. Oh well, its a
4223 		 * screwed up app that does a peeloff OR a accept while
4224 		 * reading from the main socket... actually its only the
4225 		 * peeloff() case, since I think read will fail on a
4226 		 * listening socket..
4227 		 */
4228 		return;
4229 	}
4230 	/* lock the socket buffers */
4231 	SCTP_INP_READ_LOCK(old_inp);
4232 	control = TAILQ_FIRST(&old_inp->read_queue);
4233 	/* Pull off all for out target stcb */
4234 	while (control) {
4235 		nctl = TAILQ_NEXT(control, next);
4236 		if (control->stcb == stcb) {
4237 			/* remove it we want it */
4238 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4239 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4240 			m = control->data;
4241 			while (m) {
4242 				if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4243 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4244 				}
4245 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4246 				if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4247 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4248 				}
4249 				m = SCTP_BUF_NEXT(m);
4250 			}
4251 		}
4252 		control = nctl;
4253 	}
4254 	SCTP_INP_READ_UNLOCK(old_inp);
4255 	/* Remove the sb-lock on the old socket */
4256 
4257 	sbunlock(&old_so->so_rcv);
4258 	/* Now we move them over to the new socket buffer */
4259 	control = TAILQ_FIRST(&tmp_queue);
4260 	SCTP_INP_READ_LOCK(new_inp);
4261 	while (control) {
4262 		nctl = TAILQ_NEXT(control, next);
4263 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4264 		m = control->data;
4265 		while (m) {
4266 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4267 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4268 			}
4269 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4270 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4271 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4272 			}
4273 			m = SCTP_BUF_NEXT(m);
4274 		}
4275 		control = nctl;
4276 	}
4277 	SCTP_INP_READ_UNLOCK(new_inp);
4278 }
4279 
4280 
4281 void
4282 sctp_add_to_readq(struct sctp_inpcb *inp,
4283     struct sctp_tcb *stcb,
4284     struct sctp_queued_to_read *control,
4285     struct sockbuf *sb,
4286     int end,
4287     int so_locked
4288 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4289     SCTP_UNUSED
4290 #endif
4291 )
4292 {
4293 	/*
4294 	 * Here we must place the control on the end of the socket read
4295 	 * queue AND increment sb_cc so that select will work properly on
4296 	 * read.
4297 	 */
4298 	struct mbuf *m, *prev = NULL;
4299 
4300 	if (inp == NULL) {
4301 		/* Gak, TSNH!! */
4302 #ifdef INVARIANTS
4303 		panic("Gak, inp NULL on add_to_readq");
4304 #endif
4305 		return;
4306 	}
4307 	SCTP_INP_READ_LOCK(inp);
4308 	if (!(control->spec_flags & M_NOTIFICATION)) {
4309 		atomic_add_int(&inp->total_recvs, 1);
4310 		if (!control->do_not_ref_stcb) {
4311 			atomic_add_int(&stcb->total_recvs, 1);
4312 		}
4313 	}
4314 	m = control->data;
4315 	control->held_length = 0;
4316 	control->length = 0;
4317 	while (m) {
4318 		if (SCTP_BUF_LEN(m) == 0) {
4319 			/* Skip mbufs with NO length */
4320 			if (prev == NULL) {
4321 				/* First one */
4322 				control->data = sctp_m_free(m);
4323 				m = control->data;
4324 			} else {
4325 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4326 				m = SCTP_BUF_NEXT(prev);
4327 			}
4328 			if (m == NULL) {
4329 				control->tail_mbuf = prev;;
4330 			}
4331 			continue;
4332 		}
4333 		prev = m;
4334 		if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4335 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4336 		}
4337 		sctp_sballoc(stcb, sb, m);
4338 		if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4339 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4340 		}
4341 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4342 		m = SCTP_BUF_NEXT(m);
4343 	}
4344 	if (prev != NULL) {
4345 		control->tail_mbuf = prev;
4346 	} else {
4347 		/* Everything got collapsed out?? */
4348 		return;
4349 	}
4350 	if (end) {
4351 		control->end_added = 1;
4352 	}
4353 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4354 	SCTP_INP_READ_UNLOCK(inp);
4355 	if (inp && inp->sctp_socket) {
4356 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4357 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4358 		} else {
4359 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4360 			struct socket *so;
4361 
4362 			so = SCTP_INP_SO(inp);
4363 			if (!so_locked) {
4364 				atomic_add_int(&stcb->asoc.refcnt, 1);
4365 				SCTP_TCB_UNLOCK(stcb);
4366 				SCTP_SOCKET_LOCK(so, 1);
4367 				SCTP_TCB_LOCK(stcb);
4368 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4369 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4370 					SCTP_SOCKET_UNLOCK(so, 1);
4371 					return;
4372 				}
4373 			}
4374 #endif
4375 			sctp_sorwakeup(inp, inp->sctp_socket);
4376 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4377 			if (!so_locked) {
4378 				SCTP_SOCKET_UNLOCK(so, 1);
4379 			}
4380 #endif
4381 		}
4382 	}
4383 }
4384 
4385 
4386 int
4387 sctp_append_to_readq(struct sctp_inpcb *inp,
4388     struct sctp_tcb *stcb,
4389     struct sctp_queued_to_read *control,
4390     struct mbuf *m,
4391     int end,
4392     int ctls_cumack,
4393     struct sockbuf *sb)
4394 {
4395 	/*
4396 	 * A partial delivery API event is underway. OR we are appending on
4397 	 * the reassembly queue.
4398 	 *
4399 	 * If PDAPI this means we need to add m to the end of the data.
4400 	 * Increase the length in the control AND increment the sb_cc.
4401 	 * Otherwise sb is NULL and all we need to do is put it at the end
4402 	 * of the mbuf chain.
4403 	 */
4404 	int len = 0;
4405 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4406 
4407 	if (inp) {
4408 		SCTP_INP_READ_LOCK(inp);
4409 	}
4410 	if (control == NULL) {
4411 get_out:
4412 		if (inp) {
4413 			SCTP_INP_READ_UNLOCK(inp);
4414 		}
4415 		return (-1);
4416 	}
4417 	if (control->end_added) {
4418 		/* huh this one is complete? */
4419 		goto get_out;
4420 	}
4421 	mm = m;
4422 	if (mm == NULL) {
4423 		goto get_out;
4424 	}
4425 	while (mm) {
4426 		if (SCTP_BUF_LEN(mm) == 0) {
4427 			/* Skip mbufs with NO lenght */
4428 			if (prev == NULL) {
4429 				/* First one */
4430 				m = sctp_m_free(mm);
4431 				mm = m;
4432 			} else {
4433 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4434 				mm = SCTP_BUF_NEXT(prev);
4435 			}
4436 			continue;
4437 		}
4438 		prev = mm;
4439 		len += SCTP_BUF_LEN(mm);
4440 		if (sb) {
4441 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4442 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4443 			}
4444 			sctp_sballoc(stcb, sb, mm);
4445 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4446 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4447 			}
4448 		}
4449 		mm = SCTP_BUF_NEXT(mm);
4450 	}
4451 	if (prev) {
4452 		tail = prev;
4453 	} else {
4454 		/* Really there should always be a prev */
4455 		if (m == NULL) {
4456 			/* Huh nothing left? */
4457 #ifdef INVARIANTS
4458 			panic("Nothing left to add?");
4459 #else
4460 			goto get_out;
4461 #endif
4462 		}
4463 		tail = m;
4464 	}
4465 	if (control->tail_mbuf) {
4466 		/* append */
4467 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4468 		control->tail_mbuf = tail;
4469 	} else {
4470 		/* nothing there */
4471 #ifdef INVARIANTS
4472 		if (control->data != NULL) {
4473 			panic("This should NOT happen");
4474 		}
4475 #endif
4476 		control->data = m;
4477 		control->tail_mbuf = tail;
4478 	}
4479 	atomic_add_int(&control->length, len);
4480 	if (end) {
4481 		/* message is complete */
4482 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4483 			stcb->asoc.control_pdapi = NULL;
4484 		}
4485 		control->held_length = 0;
4486 		control->end_added = 1;
4487 	}
4488 	if (stcb == NULL) {
4489 		control->do_not_ref_stcb = 1;
4490 	}
4491 	/*
4492 	 * When we are appending in partial delivery, the cum-ack is used
4493 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4494 	 * is populated in the outbound sinfo structure from the true cumack
4495 	 * if the association exists...
4496 	 */
4497 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4498 	if (inp) {
4499 		SCTP_INP_READ_UNLOCK(inp);
4500 	}
4501 	if (inp && inp->sctp_socket) {
4502 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4503 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4504 		} else {
4505 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4506 			struct socket *so;
4507 
4508 			so = SCTP_INP_SO(inp);
4509 			atomic_add_int(&stcb->asoc.refcnt, 1);
4510 			SCTP_TCB_UNLOCK(stcb);
4511 			SCTP_SOCKET_LOCK(so, 1);
4512 			SCTP_TCB_LOCK(stcb);
4513 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4514 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4515 				SCTP_SOCKET_UNLOCK(so, 1);
4516 				return (0);
4517 			}
4518 #endif
4519 			sctp_sorwakeup(inp, inp->sctp_socket);
4520 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4521 			SCTP_SOCKET_UNLOCK(so, 1);
4522 #endif
4523 		}
4524 	}
4525 	return (0);
4526 }
4527 
4528 
4529 
4530 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4531  *************ALTERNATE ROUTING CODE
4532  */
4533 
4534 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4535  *************ALTERNATE ROUTING CODE
4536  */
4537 
4538 struct mbuf *
4539 sctp_generate_invmanparam(int err)
4540 {
4541 	/* Return a MBUF with a invalid mandatory parameter */
4542 	struct mbuf *m;
4543 
4544 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4545 	if (m) {
4546 		struct sctp_paramhdr *ph;
4547 
4548 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4549 		ph = mtod(m, struct sctp_paramhdr *);
4550 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4551 		ph->param_type = htons(err);
4552 	}
4553 	return (m);
4554 }
4555 
4556 #ifdef SCTP_MBCNT_LOGGING
4557 void
4558 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4559     struct sctp_tmit_chunk *tp1, int chk_cnt)
4560 {
4561 	if (tp1->data == NULL) {
4562 		return;
4563 	}
4564 	asoc->chunks_on_out_queue -= chk_cnt;
4565 	if (sctp_logging_level & SCTP_MBCNT_LOGGING_ENABLE) {
4566 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4567 		    asoc->total_output_queue_size,
4568 		    tp1->book_size,
4569 		    0,
4570 		    tp1->mbcnt);
4571 	}
4572 	if (asoc->total_output_queue_size >= tp1->book_size) {
4573 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4574 	} else {
4575 		asoc->total_output_queue_size = 0;
4576 	}
4577 
4578 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4579 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4580 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4581 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4582 		} else {
4583 			stcb->sctp_socket->so_snd.sb_cc = 0;
4584 
4585 		}
4586 	}
4587 }
4588 
4589 #endif
4590 
4591 int
4592 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4593     int reason, struct sctpchunk_listhead *queue, int so_locked
4594 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4595     SCTP_UNUSED
4596 #endif
4597 )
4598 {
4599 	int ret_sz = 0;
4600 	int notdone;
4601 	uint8_t foundeom = 0;
4602 
4603 	do {
4604 		ret_sz += tp1->book_size;
4605 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4606 		if (tp1->data) {
4607 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4608 			struct socket *so;
4609 
4610 #endif
4611 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4612 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, SCTP_SO_NOT_LOCKED);
4613 			sctp_m_freem(tp1->data);
4614 			tp1->data = NULL;
4615 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4616 			so = SCTP_INP_SO(stcb->sctp_ep);
4617 			if (!so_locked) {
4618 				atomic_add_int(&stcb->asoc.refcnt, 1);
4619 				SCTP_TCB_UNLOCK(stcb);
4620 				SCTP_SOCKET_LOCK(so, 1);
4621 				SCTP_TCB_LOCK(stcb);
4622 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4623 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4624 					/*
4625 					 * assoc was freed while we were
4626 					 * unlocked
4627 					 */
4628 					SCTP_SOCKET_UNLOCK(so, 1);
4629 					return (ret_sz);
4630 				}
4631 			}
4632 #endif
4633 			sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4634 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4635 			if (!so_locked) {
4636 				SCTP_SOCKET_UNLOCK(so, 1);
4637 			}
4638 #endif
4639 		}
4640 		if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4641 			stcb->asoc.sent_queue_cnt_removeable--;
4642 		}
4643 		if (queue == &stcb->asoc.send_queue) {
4644 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4645 			/* on to the sent queue */
4646 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4647 			    sctp_next);
4648 			stcb->asoc.sent_queue_cnt++;
4649 		}
4650 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4651 		    SCTP_DATA_NOT_FRAG) {
4652 			/* not frag'ed we ae done   */
4653 			notdone = 0;
4654 			foundeom = 1;
4655 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4656 			/* end of frag, we are done */
4657 			notdone = 0;
4658 			foundeom = 1;
4659 		} else {
4660 			/*
4661 			 * Its a begin or middle piece, we must mark all of
4662 			 * it
4663 			 */
4664 			notdone = 1;
4665 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4666 		}
4667 	} while (tp1 && notdone);
4668 	if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
4669 		/*
4670 		 * The multi-part message was scattered across the send and
4671 		 * sent queue.
4672 		 */
4673 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4674 		/*
4675 		 * recurse throught the send_queue too, starting at the
4676 		 * beginning.
4677 		 */
4678 		if (tp1) {
4679 			ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
4680 			    &stcb->asoc.send_queue, so_locked);
4681 		} else {
4682 			SCTP_PRINTF("hmm, nothing on the send queue and no EOM?\n");
4683 		}
4684 	}
4685 	return (ret_sz);
4686 }
4687 
4688 /*
4689  * checks to see if the given address, sa, is one that is currently known by
4690  * the kernel note: can't distinguish the same address on multiple interfaces
4691  * and doesn't handle multiple addresses with different zone/scope id's note:
4692  * ifa_ifwithaddr() compares the entire sockaddr struct
4693  */
4694 struct sctp_ifa *
4695 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4696     int holds_lock)
4697 {
4698 	struct sctp_laddr *laddr;
4699 
4700 	if (holds_lock == 0) {
4701 		SCTP_INP_RLOCK(inp);
4702 	}
4703 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4704 		if (laddr->ifa == NULL)
4705 			continue;
4706 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4707 			continue;
4708 		if (addr->sa_family == AF_INET) {
4709 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4710 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4711 				/* found him. */
4712 				if (holds_lock == 0) {
4713 					SCTP_INP_RUNLOCK(inp);
4714 				}
4715 				return (laddr->ifa);
4716 				break;
4717 			}
4718 		} else if (addr->sa_family == AF_INET6) {
4719 			if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
4720 			    &laddr->ifa->address.sin6.sin6_addr)) {
4721 				/* found him. */
4722 				if (holds_lock == 0) {
4723 					SCTP_INP_RUNLOCK(inp);
4724 				}
4725 				return (laddr->ifa);
4726 				break;
4727 			}
4728 		}
4729 	}
4730 	if (holds_lock == 0) {
4731 		SCTP_INP_RUNLOCK(inp);
4732 	}
4733 	return (NULL);
4734 }
4735 
4736 uint32_t
4737 sctp_get_ifa_hash_val(struct sockaddr *addr)
4738 {
4739 	if (addr->sa_family == AF_INET) {
4740 		struct sockaddr_in *sin;
4741 
4742 		sin = (struct sockaddr_in *)addr;
4743 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4744 	} else if (addr->sa_family == AF_INET6) {
4745 		struct sockaddr_in6 *sin6;
4746 		uint32_t hash_of_addr;
4747 
4748 		sin6 = (struct sockaddr_in6 *)addr;
4749 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4750 		    sin6->sin6_addr.s6_addr32[1] +
4751 		    sin6->sin6_addr.s6_addr32[2] +
4752 		    sin6->sin6_addr.s6_addr32[3]);
4753 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4754 		return (hash_of_addr);
4755 	}
4756 	return (0);
4757 }
4758 
4759 struct sctp_ifa *
4760 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4761 {
4762 	struct sctp_ifa *sctp_ifap;
4763 	struct sctp_vrf *vrf;
4764 	struct sctp_ifalist *hash_head;
4765 	uint32_t hash_of_addr;
4766 
4767 	if (holds_lock == 0)
4768 		SCTP_IPI_ADDR_RLOCK();
4769 
4770 	vrf = sctp_find_vrf(vrf_id);
4771 	if (vrf == NULL) {
4772 		if (holds_lock == 0)
4773 			SCTP_IPI_ADDR_RUNLOCK();
4774 		return (NULL);
4775 	}
4776 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4777 
4778 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4779 	if (hash_head == NULL) {
4780 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4781 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4782 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4783 		sctp_print_address(addr);
4784 		SCTP_PRINTF("No such bucket for address\n");
4785 		if (holds_lock == 0)
4786 			SCTP_IPI_ADDR_RUNLOCK();
4787 
4788 		return (NULL);
4789 	}
4790 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4791 		if (sctp_ifap == NULL) {
4792 			panic("Huh LIST_FOREACH corrupt");
4793 		}
4794 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4795 			continue;
4796 		if (addr->sa_family == AF_INET) {
4797 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4798 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4799 				/* found him. */
4800 				if (holds_lock == 0)
4801 					SCTP_IPI_ADDR_RUNLOCK();
4802 				return (sctp_ifap);
4803 				break;
4804 			}
4805 		} else if (addr->sa_family == AF_INET6) {
4806 			if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
4807 			    &sctp_ifap->address.sin6.sin6_addr)) {
4808 				/* found him. */
4809 				if (holds_lock == 0)
4810 					SCTP_IPI_ADDR_RUNLOCK();
4811 				return (sctp_ifap);
4812 				break;
4813 			}
4814 		}
4815 	}
4816 	if (holds_lock == 0)
4817 		SCTP_IPI_ADDR_RUNLOCK();
4818 	return (NULL);
4819 }
4820 
4821 static void
4822 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4823     uint32_t rwnd_req)
4824 {
4825 	/* User pulled some data, do we need a rwnd update? */
4826 	int r_unlocked = 0;
4827 	uint32_t dif, rwnd;
4828 	struct socket *so = NULL;
4829 
4830 	if (stcb == NULL)
4831 		return;
4832 
4833 	atomic_add_int(&stcb->asoc.refcnt, 1);
4834 
4835 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4836 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4837 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4838 		/* Pre-check If we are freeing no update */
4839 		goto no_lock;
4840 	}
4841 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4842 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4843 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4844 		goto out;
4845 	}
4846 	so = stcb->sctp_socket;
4847 	if (so == NULL) {
4848 		goto out;
4849 	}
4850 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4851 	/* Have you have freed enough to look */
4852 	*freed_so_far = 0;
4853 	/* Yep, its worth a look and the lock overhead */
4854 
4855 	/* Figure out what the rwnd would be */
4856 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4857 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4858 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4859 	} else {
4860 		dif = 0;
4861 	}
4862 	if (dif >= rwnd_req) {
4863 		if (hold_rlock) {
4864 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4865 			r_unlocked = 1;
4866 		}
4867 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4868 			/*
4869 			 * One last check before we allow the guy possibly
4870 			 * to get in. There is a race, where the guy has not
4871 			 * reached the gate. In that case
4872 			 */
4873 			goto out;
4874 		}
4875 		SCTP_TCB_LOCK(stcb);
4876 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4877 			/* No reports here */
4878 			SCTP_TCB_UNLOCK(stcb);
4879 			goto out;
4880 		}
4881 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4882 		sctp_send_sack(stcb);
4883 		sctp_chunk_output(stcb->sctp_ep, stcb,
4884 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4885 		/* make sure no timer is running */
4886 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4887 		SCTP_TCB_UNLOCK(stcb);
4888 	} else {
4889 		/* Update how much we have pending */
4890 		stcb->freed_by_sorcv_sincelast = dif;
4891 	}
4892 out:
4893 	if (so && r_unlocked && hold_rlock) {
4894 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4895 	}
4896 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4897 no_lock:
4898 	atomic_add_int(&stcb->asoc.refcnt, -1);
4899 	return;
4900 }
4901 
4902 int
4903 sctp_sorecvmsg(struct socket *so,
4904     struct uio *uio,
4905     struct mbuf **mp,
4906     struct sockaddr *from,
4907     int fromlen,
4908     int *msg_flags,
4909     struct sctp_sndrcvinfo *sinfo,
4910     int filling_sinfo)
4911 {
4912 	/*
4913 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4914 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4915 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4916 	 * On the way out we may send out any combination of:
4917 	 * MSG_NOTIFICATION MSG_EOR
4918 	 *
4919 	 */
4920 	struct sctp_inpcb *inp = NULL;
4921 	int my_len = 0;
4922 	int cp_len = 0, error = 0;
4923 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4924 	struct mbuf *m = NULL, *embuf = NULL;
4925 	struct sctp_tcb *stcb = NULL;
4926 	int wakeup_read_socket = 0;
4927 	int freecnt_applied = 0;
4928 	int out_flags = 0, in_flags = 0;
4929 	int block_allowed = 1;
4930 	uint32_t freed_so_far = 0;
4931 	int copied_so_far = 0;
4932 	int in_eeor_mode = 0;
4933 	int no_rcv_needed = 0;
4934 	uint32_t rwnd_req = 0;
4935 	int hold_sblock = 0;
4936 	int hold_rlock = 0;
4937 	int slen = 0;
4938 	uint32_t held_length = 0;
4939 	int sockbuf_lock = 0;
4940 
4941 	if (uio == NULL) {
4942 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
4943 		return (EINVAL);
4944 	}
4945 	if (from && fromlen <= 0) {
4946 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
4947 		return (EINVAL);
4948 	}
4949 	if (msg_flags) {
4950 		in_flags = *msg_flags;
4951 		if (in_flags & MSG_PEEK)
4952 			SCTP_STAT_INCR(sctps_read_peeks);
4953 	} else {
4954 		in_flags = 0;
4955 	}
4956 	slen = uio->uio_resid;
4957 
4958 	/* Pull in and set up our int flags */
4959 	if (in_flags & MSG_OOB) {
4960 		/* Out of band's NOT supported */
4961 		return (EOPNOTSUPP);
4962 	}
4963 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
4964 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
4965 		return (EINVAL);
4966 	}
4967 	if ((in_flags & (MSG_DONTWAIT
4968 	    | MSG_NBIO
4969 	    )) ||
4970 	    SCTP_SO_IS_NBIO(so)) {
4971 		block_allowed = 0;
4972 	}
4973 	/* setup the endpoint */
4974 	inp = (struct sctp_inpcb *)so->so_pcb;
4975 	if (inp == NULL) {
4976 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
4977 		return (EFAULT);
4978 	}
4979 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
4980 	/* Must be at least a MTU's worth */
4981 	if (rwnd_req < SCTP_MIN_RWND)
4982 		rwnd_req = SCTP_MIN_RWND;
4983 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
4984 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
4985 		sctp_misc_ints(SCTP_SORECV_ENTER,
4986 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
4987 	}
4988 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
4989 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
4990 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
4991 	}
4992 	error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
4993 	sockbuf_lock = 1;
4994 	if (error) {
4995 		goto release_unlocked;
4996 	}
4997 restart:
4998 
4999 
5000 restart_nosblocks:
5001 	if (hold_sblock == 0) {
5002 		SOCKBUF_LOCK(&so->so_rcv);
5003 		hold_sblock = 1;
5004 	}
5005 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5006 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5007 		goto out;
5008 	}
5009 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5010 		if (so->so_error) {
5011 			error = so->so_error;
5012 			if ((in_flags & MSG_PEEK) == 0)
5013 				so->so_error = 0;
5014 		} else {
5015 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5016 			/* indicate EOF */
5017 			error = 0;
5018 		}
5019 		goto out;
5020 	}
5021 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5022 		/* we need to wait for data */
5023 		if ((so->so_rcv.sb_cc == 0) &&
5024 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5025 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5026 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5027 				/*
5028 				 * For active open side clear flags for
5029 				 * re-use passive open is blocked by
5030 				 * connect.
5031 				 */
5032 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5033 					/*
5034 					 * You were aborted, passive side
5035 					 * always hits here
5036 					 */
5037 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5038 					error = ECONNRESET;
5039 					/*
5040 					 * You get this once if you are
5041 					 * active open side
5042 					 */
5043 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5044 						/*
5045 						 * Remove flag if on the
5046 						 * active open side
5047 						 */
5048 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5049 					}
5050 				}
5051 				so->so_state &= ~(SS_ISCONNECTING |
5052 				    SS_ISDISCONNECTING |
5053 				    SS_ISCONFIRMING |
5054 				    SS_ISCONNECTED);
5055 				if (error == 0) {
5056 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5057 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5058 						error = ENOTCONN;
5059 					} else {
5060 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5061 					}
5062 				}
5063 				goto out;
5064 			}
5065 		}
5066 		error = sbwait(&so->so_rcv);
5067 		if (error) {
5068 			goto out;
5069 		}
5070 		held_length = 0;
5071 		goto restart_nosblocks;
5072 	} else if (so->so_rcv.sb_cc == 0) {
5073 		if (so->so_error) {
5074 			error = so->so_error;
5075 			if ((in_flags & MSG_PEEK) == 0)
5076 				so->so_error = 0;
5077 		} else {
5078 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5079 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5080 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5081 					/*
5082 					 * For active open side clear flags
5083 					 * for re-use passive open is
5084 					 * blocked by connect.
5085 					 */
5086 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5087 						/*
5088 						 * You were aborted, passive
5089 						 * side always hits here
5090 						 */
5091 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5092 						error = ECONNRESET;
5093 						/*
5094 						 * You get this once if you
5095 						 * are active open side
5096 						 */
5097 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5098 							/*
5099 							 * Remove flag if on
5100 							 * the active open
5101 							 * side
5102 							 */
5103 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5104 						}
5105 					}
5106 					so->so_state &= ~(SS_ISCONNECTING |
5107 					    SS_ISDISCONNECTING |
5108 					    SS_ISCONFIRMING |
5109 					    SS_ISCONNECTED);
5110 					if (error == 0) {
5111 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5112 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5113 							error = ENOTCONN;
5114 						} else {
5115 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5116 						}
5117 					}
5118 					goto out;
5119 				}
5120 			}
5121 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5122 			error = EWOULDBLOCK;
5123 		}
5124 		goto out;
5125 	}
5126 	if (hold_sblock == 1) {
5127 		SOCKBUF_UNLOCK(&so->so_rcv);
5128 		hold_sblock = 0;
5129 	}
5130 	/* we possibly have data we can read */
5131 	/* sa_ignore FREED_MEMORY */
5132 	control = TAILQ_FIRST(&inp->read_queue);
5133 	if (control == NULL) {
5134 		/*
5135 		 * This could be happening since the appender did the
5136 		 * increment but as not yet did the tailq insert onto the
5137 		 * read_queue
5138 		 */
5139 		if (hold_rlock == 0) {
5140 			SCTP_INP_READ_LOCK(inp);
5141 			hold_rlock = 1;
5142 		}
5143 		control = TAILQ_FIRST(&inp->read_queue);
5144 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5145 #ifdef INVARIANTS
5146 			panic("Huh, its non zero and nothing on control?");
5147 #endif
5148 			so->so_rcv.sb_cc = 0;
5149 		}
5150 		SCTP_INP_READ_UNLOCK(inp);
5151 		hold_rlock = 0;
5152 		goto restart;
5153 	}
5154 	if ((control->length == 0) &&
5155 	    (control->do_not_ref_stcb)) {
5156 		/*
5157 		 * Clean up code for freeing assoc that left behind a
5158 		 * pdapi.. maybe a peer in EEOR that just closed after
5159 		 * sending and never indicated a EOR.
5160 		 */
5161 		if (hold_rlock == 0) {
5162 			hold_rlock = 1;
5163 			SCTP_INP_READ_LOCK(inp);
5164 		}
5165 		control->held_length = 0;
5166 		if (control->data) {
5167 			/* Hmm there is data here .. fix */
5168 			struct mbuf *m_tmp;
5169 			int cnt = 0;
5170 
5171 			m_tmp = control->data;
5172 			while (m_tmp) {
5173 				cnt += SCTP_BUF_LEN(m_tmp);
5174 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5175 					control->tail_mbuf = m_tmp;
5176 					control->end_added = 1;
5177 				}
5178 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5179 			}
5180 			control->length = cnt;
5181 		} else {
5182 			/* remove it */
5183 			TAILQ_REMOVE(&inp->read_queue, control, next);
5184 			/* Add back any hiddend data */
5185 			sctp_free_remote_addr(control->whoFrom);
5186 			sctp_free_a_readq(stcb, control);
5187 		}
5188 		if (hold_rlock) {
5189 			hold_rlock = 0;
5190 			SCTP_INP_READ_UNLOCK(inp);
5191 		}
5192 		goto restart;
5193 	}
5194 	if (control->length == 0) {
5195 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5196 		    (filling_sinfo)) {
5197 			/* find a more suitable one then this */
5198 			ctl = TAILQ_NEXT(control, next);
5199 			while (ctl) {
5200 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5201 				    (ctl->some_taken ||
5202 				    (ctl->spec_flags & M_NOTIFICATION) ||
5203 				    ((ctl->do_not_ref_stcb == 0) &&
5204 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5205 				    ) {
5206 					/*-
5207 					 * If we have a different TCB next, and there is data
5208 					 * present. If we have already taken some (pdapi), OR we can
5209 					 * ref the tcb and no delivery as started on this stream, we
5210 					 * take it. Note we allow a notification on a different
5211 					 * assoc to be delivered..
5212 					 */
5213 					control = ctl;
5214 					goto found_one;
5215 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5216 					    (ctl->length) &&
5217 					    ((ctl->some_taken) ||
5218 					    ((ctl->do_not_ref_stcb == 0) &&
5219 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5220 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5221 				    ) {
5222 					/*-
5223 					 * If we have the same tcb, and there is data present, and we
5224 					 * have the strm interleave feature present. Then if we have
5225 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5226 					 * not started a delivery for this stream, we can take it.
5227 					 * Note we do NOT allow a notificaiton on the same assoc to
5228 					 * be delivered.
5229 					 */
5230 					control = ctl;
5231 					goto found_one;
5232 				}
5233 				ctl = TAILQ_NEXT(ctl, next);
5234 			}
5235 		}
5236 		/*
5237 		 * if we reach here, not suitable replacement is available
5238 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5239 		 * into the our held count, and its time to sleep again.
5240 		 */
5241 		held_length = so->so_rcv.sb_cc;
5242 		control->held_length = so->so_rcv.sb_cc;
5243 		goto restart;
5244 	}
5245 	/* Clear the held length since there is something to read */
5246 	control->held_length = 0;
5247 	if (hold_rlock) {
5248 		SCTP_INP_READ_UNLOCK(inp);
5249 		hold_rlock = 0;
5250 	}
5251 found_one:
5252 	/*
5253 	 * If we reach here, control has a some data for us to read off.
5254 	 * Note that stcb COULD be NULL.
5255 	 */
5256 	control->some_taken = 1;
5257 	if (hold_sblock) {
5258 		SOCKBUF_UNLOCK(&so->so_rcv);
5259 		hold_sblock = 0;
5260 	}
5261 	stcb = control->stcb;
5262 	if (stcb) {
5263 		if ((control->do_not_ref_stcb == 0) &&
5264 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5265 			if (freecnt_applied == 0)
5266 				stcb = NULL;
5267 		} else if (control->do_not_ref_stcb == 0) {
5268 			/* you can't free it on me please */
5269 			/*
5270 			 * The lock on the socket buffer protects us so the
5271 			 * free code will stop. But since we used the
5272 			 * socketbuf lock and the sender uses the tcb_lock
5273 			 * to increment, we need to use the atomic add to
5274 			 * the refcnt
5275 			 */
5276 			if (freecnt_applied) {
5277 #ifdef INVARIANTS
5278 				panic("refcnt already incremented");
5279 #else
5280 				printf("refcnt already incremented?\n");
5281 #endif
5282 			} else {
5283 				atomic_add_int(&stcb->asoc.refcnt, 1);
5284 				freecnt_applied = 1;
5285 			}
5286 			/*
5287 			 * Setup to remember how much we have not yet told
5288 			 * the peer our rwnd has opened up. Note we grab the
5289 			 * value from the tcb from last time. Note too that
5290 			 * sack sending clears this when a sack is sent,
5291 			 * which is fine. Once we hit the rwnd_req, we then
5292 			 * will go to the sctp_user_rcvd() that will not
5293 			 * lock until it KNOWs it MUST send a WUP-SACK.
5294 			 */
5295 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5296 			stcb->freed_by_sorcv_sincelast = 0;
5297 		}
5298 	}
5299 	if (stcb &&
5300 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5301 	    control->do_not_ref_stcb == 0) {
5302 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5303 	}
5304 	/* First lets get off the sinfo and sockaddr info */
5305 	if ((sinfo) && filling_sinfo) {
5306 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5307 		nxt = TAILQ_NEXT(control, next);
5308 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5309 			struct sctp_extrcvinfo *s_extra;
5310 
5311 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5312 			if ((nxt) &&
5313 			    (nxt->length)) {
5314 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5315 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5316 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5317 				}
5318 				if (nxt->spec_flags & M_NOTIFICATION) {
5319 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5320 				}
5321 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5322 				s_extra->sreinfo_next_length = nxt->length;
5323 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5324 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5325 				if (nxt->tail_mbuf != NULL) {
5326 					if (nxt->end_added) {
5327 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5328 					}
5329 				}
5330 			} else {
5331 				/*
5332 				 * we explicitly 0 this, since the memcpy
5333 				 * got some other things beyond the older
5334 				 * sinfo_ that is on the control's structure
5335 				 * :-D
5336 				 */
5337 				nxt = NULL;
5338 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5339 				s_extra->sreinfo_next_aid = 0;
5340 				s_extra->sreinfo_next_length = 0;
5341 				s_extra->sreinfo_next_ppid = 0;
5342 				s_extra->sreinfo_next_stream = 0;
5343 			}
5344 		}
5345 		/*
5346 		 * update off the real current cum-ack, if we have an stcb.
5347 		 */
5348 		if ((control->do_not_ref_stcb == 0) && stcb)
5349 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5350 		/*
5351 		 * mask off the high bits, we keep the actual chunk bits in
5352 		 * there.
5353 		 */
5354 		sinfo->sinfo_flags &= 0x00ff;
5355 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5356 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5357 		}
5358 	}
5359 #ifdef SCTP_ASOCLOG_OF_TSNS
5360 	{
5361 		int index, newindex;
5362 		struct sctp_pcbtsn_rlog *entry;
5363 
5364 		do {
5365 			index = inp->readlog_index;
5366 			newindex = index + 1;
5367 			if (newindex >= SCTP_READ_LOG_SIZE) {
5368 				newindex = 0;
5369 			}
5370 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5371 		entry = &inp->readlog[index];
5372 		entry->vtag = control->sinfo_assoc_id;
5373 		entry->strm = control->sinfo_stream;
5374 		entry->seq = control->sinfo_ssn;
5375 		entry->sz = control->length;
5376 		entry->flgs = control->sinfo_flags;
5377 	}
5378 #endif
5379 	if (fromlen && from) {
5380 		struct sockaddr *to;
5381 
5382 #ifdef INET
5383 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5384 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5385 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5386 #else
5387 		/* No AF_INET use AF_INET6 */
5388 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5389 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5390 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5391 #endif
5392 
5393 		to = from;
5394 #if defined(INET) && defined(INET6)
5395 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
5396 		    (to->sa_family == AF_INET) &&
5397 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5398 			struct sockaddr_in *sin;
5399 			struct sockaddr_in6 sin6;
5400 
5401 			sin = (struct sockaddr_in *)to;
5402 			bzero(&sin6, sizeof(sin6));
5403 			sin6.sin6_family = AF_INET6;
5404 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5405 			sin6.sin6_addr.s6_addr16[2] = 0xffff;
5406 			bcopy(&sin->sin_addr,
5407 			    &sin6.sin6_addr.s6_addr16[3],
5408 			    sizeof(sin6.sin6_addr.s6_addr16[3]));
5409 			sin6.sin6_port = sin->sin_port;
5410 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5411 		}
5412 #endif
5413 #if defined(INET6)
5414 		{
5415 			struct sockaddr_in6 lsa6, *to6;
5416 
5417 			to6 = (struct sockaddr_in6 *)to;
5418 			sctp_recover_scope_mac(to6, (&lsa6));
5419 		}
5420 #endif
5421 	}
5422 	/* now copy out what data we can */
5423 	if (mp == NULL) {
5424 		/* copy out each mbuf in the chain up to length */
5425 get_more_data:
5426 		m = control->data;
5427 		while (m) {
5428 			/* Move out all we can */
5429 			cp_len = (int)uio->uio_resid;
5430 			my_len = (int)SCTP_BUF_LEN(m);
5431 			if (cp_len > my_len) {
5432 				/* not enough in this buf */
5433 				cp_len = my_len;
5434 			}
5435 			if (hold_rlock) {
5436 				SCTP_INP_READ_UNLOCK(inp);
5437 				hold_rlock = 0;
5438 			}
5439 			if (cp_len > 0)
5440 				error = uiomove(mtod(m, char *), cp_len, uio);
5441 			/* re-read */
5442 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5443 				goto release;
5444 			}
5445 			if ((control->do_not_ref_stcb == 0) && stcb &&
5446 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5447 				no_rcv_needed = 1;
5448 			}
5449 			if (error) {
5450 				/* error we are out of here */
5451 				goto release;
5452 			}
5453 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5454 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5455 			    ((control->end_added == 0) ||
5456 			    (control->end_added &&
5457 			    (TAILQ_NEXT(control, next) == NULL)))
5458 			    ) {
5459 				SCTP_INP_READ_LOCK(inp);
5460 				hold_rlock = 1;
5461 			}
5462 			if (cp_len == SCTP_BUF_LEN(m)) {
5463 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5464 				    (control->end_added)) {
5465 					out_flags |= MSG_EOR;
5466 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5467 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5468 				}
5469 				if (control->spec_flags & M_NOTIFICATION) {
5470 					out_flags |= MSG_NOTIFICATION;
5471 				}
5472 				/* we ate up the mbuf */
5473 				if (in_flags & MSG_PEEK) {
5474 					/* just looking */
5475 					m = SCTP_BUF_NEXT(m);
5476 					copied_so_far += cp_len;
5477 				} else {
5478 					/* dispose of the mbuf */
5479 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5480 						sctp_sblog(&so->so_rcv,
5481 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5482 					}
5483 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5484 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5485 						sctp_sblog(&so->so_rcv,
5486 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5487 					}
5488 					embuf = m;
5489 					copied_so_far += cp_len;
5490 					freed_so_far += cp_len;
5491 					freed_so_far += MSIZE;
5492 					atomic_subtract_int(&control->length, cp_len);
5493 					control->data = sctp_m_free(m);
5494 					m = control->data;
5495 					/*
5496 					 * been through it all, must hold sb
5497 					 * lock ok to null tail
5498 					 */
5499 					if (control->data == NULL) {
5500 #ifdef INVARIANTS
5501 						if ((control->end_added == 0) ||
5502 						    (TAILQ_NEXT(control, next) == NULL)) {
5503 							/*
5504 							 * If the end is not
5505 							 * added, OR the
5506 							 * next is NOT null
5507 							 * we MUST have the
5508 							 * lock.
5509 							 */
5510 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5511 								panic("Hmm we don't own the lock?");
5512 							}
5513 						}
5514 #endif
5515 						control->tail_mbuf = NULL;
5516 #ifdef INVARIANTS
5517 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5518 							panic("end_added, nothing left and no MSG_EOR");
5519 						}
5520 #endif
5521 					}
5522 				}
5523 			} else {
5524 				/* Do we need to trim the mbuf? */
5525 				if (control->spec_flags & M_NOTIFICATION) {
5526 					out_flags |= MSG_NOTIFICATION;
5527 				}
5528 				if ((in_flags & MSG_PEEK) == 0) {
5529 					SCTP_BUF_RESV_UF(m, cp_len);
5530 					SCTP_BUF_LEN(m) -= cp_len;
5531 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5532 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5533 					}
5534 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5535 					if ((control->do_not_ref_stcb == 0) &&
5536 					    stcb) {
5537 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5538 					}
5539 					copied_so_far += cp_len;
5540 					embuf = m;
5541 					freed_so_far += cp_len;
5542 					freed_so_far += MSIZE;
5543 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5544 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5545 						    SCTP_LOG_SBRESULT, 0);
5546 					}
5547 					atomic_subtract_int(&control->length, cp_len);
5548 				} else {
5549 					copied_so_far += cp_len;
5550 				}
5551 			}
5552 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5553 				break;
5554 			}
5555 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5556 			    (control->do_not_ref_stcb == 0) &&
5557 			    (freed_so_far >= rwnd_req)) {
5558 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5559 			}
5560 		}		/* end while(m) */
5561 		/*
5562 		 * At this point we have looked at it all and we either have
5563 		 * a MSG_EOR/or read all the user wants... <OR>
5564 		 * control->length == 0.
5565 		 */
5566 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5567 			/* we are done with this control */
5568 			if (control->length == 0) {
5569 				if (control->data) {
5570 #ifdef INVARIANTS
5571 					panic("control->data not null at read eor?");
5572 #else
5573 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5574 					sctp_m_freem(control->data);
5575 					control->data = NULL;
5576 #endif
5577 				}
5578 		done_with_control:
5579 				if (TAILQ_NEXT(control, next) == NULL) {
5580 					/*
5581 					 * If we don't have a next we need a
5582 					 * lock, if there is a next
5583 					 * interrupt is filling ahead of us
5584 					 * and we don't need a lock to
5585 					 * remove this guy (which is the
5586 					 * head of the queue).
5587 					 */
5588 					if (hold_rlock == 0) {
5589 						SCTP_INP_READ_LOCK(inp);
5590 						hold_rlock = 1;
5591 					}
5592 				}
5593 				TAILQ_REMOVE(&inp->read_queue, control, next);
5594 				/* Add back any hiddend data */
5595 				if (control->held_length) {
5596 					held_length = 0;
5597 					control->held_length = 0;
5598 					wakeup_read_socket = 1;
5599 				}
5600 				if (control->aux_data) {
5601 					sctp_m_free(control->aux_data);
5602 					control->aux_data = NULL;
5603 				}
5604 				no_rcv_needed = control->do_not_ref_stcb;
5605 				sctp_free_remote_addr(control->whoFrom);
5606 				control->data = NULL;
5607 				sctp_free_a_readq(stcb, control);
5608 				control = NULL;
5609 				if ((freed_so_far >= rwnd_req) &&
5610 				    (no_rcv_needed == 0))
5611 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5612 
5613 			} else {
5614 				/*
5615 				 * The user did not read all of this
5616 				 * message, turn off the returned MSG_EOR
5617 				 * since we are leaving more behind on the
5618 				 * control to read.
5619 				 */
5620 #ifdef INVARIANTS
5621 				if (control->end_added &&
5622 				    (control->data == NULL) &&
5623 				    (control->tail_mbuf == NULL)) {
5624 					panic("Gak, control->length is corrupt?");
5625 				}
5626 #endif
5627 				no_rcv_needed = control->do_not_ref_stcb;
5628 				out_flags &= ~MSG_EOR;
5629 			}
5630 		}
5631 		if (out_flags & MSG_EOR) {
5632 			goto release;
5633 		}
5634 		if ((uio->uio_resid == 0) ||
5635 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5636 		    ) {
5637 			goto release;
5638 		}
5639 		/*
5640 		 * If I hit here the receiver wants more and this message is
5641 		 * NOT done (pd-api). So two questions. Can we block? if not
5642 		 * we are done. Did the user NOT set MSG_WAITALL?
5643 		 */
5644 		if (block_allowed == 0) {
5645 			goto release;
5646 		}
5647 		/*
5648 		 * We need to wait for more data a few things: - We don't
5649 		 * sbunlock() so we don't get someone else reading. - We
5650 		 * must be sure to account for the case where what is added
5651 		 * is NOT to our control when we wakeup.
5652 		 */
5653 
5654 		/*
5655 		 * Do we need to tell the transport a rwnd update might be
5656 		 * needed before we go to sleep?
5657 		 */
5658 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5659 		    ((freed_so_far >= rwnd_req) &&
5660 		    (control->do_not_ref_stcb == 0) &&
5661 		    (no_rcv_needed == 0))) {
5662 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5663 		}
5664 wait_some_more:
5665 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5666 			goto release;
5667 		}
5668 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5669 			goto release;
5670 
5671 		if (hold_rlock == 1) {
5672 			SCTP_INP_READ_UNLOCK(inp);
5673 			hold_rlock = 0;
5674 		}
5675 		if (hold_sblock == 0) {
5676 			SOCKBUF_LOCK(&so->so_rcv);
5677 			hold_sblock = 1;
5678 		}
5679 		if ((copied_so_far) && (control->length == 0) &&
5680 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5681 		    ) {
5682 			goto release;
5683 		}
5684 		if (so->so_rcv.sb_cc <= control->held_length) {
5685 			error = sbwait(&so->so_rcv);
5686 			if (error) {
5687 				goto release;
5688 			}
5689 			control->held_length = 0;
5690 		}
5691 		if (hold_sblock) {
5692 			SOCKBUF_UNLOCK(&so->so_rcv);
5693 			hold_sblock = 0;
5694 		}
5695 		if (control->length == 0) {
5696 			/* still nothing here */
5697 			if (control->end_added == 1) {
5698 				/* he aborted, or is done i.e.did a shutdown */
5699 				out_flags |= MSG_EOR;
5700 				if (control->pdapi_aborted) {
5701 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5702 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5703 
5704 					out_flags |= MSG_TRUNC;
5705 				} else {
5706 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5707 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5708 				}
5709 				goto done_with_control;
5710 			}
5711 			if (so->so_rcv.sb_cc > held_length) {
5712 				control->held_length = so->so_rcv.sb_cc;
5713 				held_length = 0;
5714 			}
5715 			goto wait_some_more;
5716 		} else if (control->data == NULL) {
5717 			/*
5718 			 * we must re-sync since data is probably being
5719 			 * added
5720 			 */
5721 			SCTP_INP_READ_LOCK(inp);
5722 			if ((control->length > 0) && (control->data == NULL)) {
5723 				/*
5724 				 * big trouble.. we have the lock and its
5725 				 * corrupt?
5726 				 */
5727 				panic("Impossible data==NULL length !=0");
5728 			}
5729 			SCTP_INP_READ_UNLOCK(inp);
5730 			/* We will fall around to get more data */
5731 		}
5732 		goto get_more_data;
5733 	} else {
5734 		/*-
5735 		 * Give caller back the mbuf chain,
5736 		 * store in uio_resid the length
5737 		 */
5738 		wakeup_read_socket = 0;
5739 		if ((control->end_added == 0) ||
5740 		    (TAILQ_NEXT(control, next) == NULL)) {
5741 			/* Need to get rlock */
5742 			if (hold_rlock == 0) {
5743 				SCTP_INP_READ_LOCK(inp);
5744 				hold_rlock = 1;
5745 			}
5746 		}
5747 		if (control->end_added) {
5748 			out_flags |= MSG_EOR;
5749 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5750 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5751 		}
5752 		if (control->spec_flags & M_NOTIFICATION) {
5753 			out_flags |= MSG_NOTIFICATION;
5754 		}
5755 		uio->uio_resid = control->length;
5756 		*mp = control->data;
5757 		m = control->data;
5758 		while (m) {
5759 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5760 				sctp_sblog(&so->so_rcv,
5761 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5762 			}
5763 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5764 			freed_so_far += SCTP_BUF_LEN(m);
5765 			freed_so_far += MSIZE;
5766 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5767 				sctp_sblog(&so->so_rcv,
5768 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5769 			}
5770 			m = SCTP_BUF_NEXT(m);
5771 		}
5772 		control->data = control->tail_mbuf = NULL;
5773 		control->length = 0;
5774 		if (out_flags & MSG_EOR) {
5775 			/* Done with this control */
5776 			goto done_with_control;
5777 		}
5778 	}
5779 release:
5780 	if (hold_rlock == 1) {
5781 		SCTP_INP_READ_UNLOCK(inp);
5782 		hold_rlock = 0;
5783 	}
5784 	if (hold_sblock == 1) {
5785 		SOCKBUF_UNLOCK(&so->so_rcv);
5786 		hold_sblock = 0;
5787 	}
5788 	sbunlock(&so->so_rcv);
5789 	sockbuf_lock = 0;
5790 
5791 release_unlocked:
5792 	if (hold_sblock) {
5793 		SOCKBUF_UNLOCK(&so->so_rcv);
5794 		hold_sblock = 0;
5795 	}
5796 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5797 		if ((freed_so_far >= rwnd_req) &&
5798 		    (control && (control->do_not_ref_stcb == 0)) &&
5799 		    (no_rcv_needed == 0))
5800 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5801 	}
5802 	if (msg_flags)
5803 		*msg_flags |= out_flags;
5804 out:
5805 	if (((out_flags & MSG_EOR) == 0) &&
5806 	    ((in_flags & MSG_PEEK) == 0) &&
5807 	    (sinfo) &&
5808 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5809 		struct sctp_extrcvinfo *s_extra;
5810 
5811 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5812 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5813 	}
5814 	if (hold_rlock == 1) {
5815 		SCTP_INP_READ_UNLOCK(inp);
5816 		hold_rlock = 0;
5817 	}
5818 	if (hold_sblock) {
5819 		SOCKBUF_UNLOCK(&so->so_rcv);
5820 		hold_sblock = 0;
5821 	}
5822 	if (sockbuf_lock) {
5823 		sbunlock(&so->so_rcv);
5824 	}
5825 	if (freecnt_applied) {
5826 		/*
5827 		 * The lock on the socket buffer protects us so the free
5828 		 * code will stop. But since we used the socketbuf lock and
5829 		 * the sender uses the tcb_lock to increment, we need to use
5830 		 * the atomic add to the refcnt.
5831 		 */
5832 		if (stcb == NULL) {
5833 			panic("stcb for refcnt has gone NULL?");
5834 		}
5835 		atomic_add_int(&stcb->asoc.refcnt, -1);
5836 		freecnt_applied = 0;
5837 		/* Save the value back for next time */
5838 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5839 	}
5840 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
5841 		if (stcb) {
5842 			sctp_misc_ints(SCTP_SORECV_DONE,
5843 			    freed_so_far,
5844 			    ((uio) ? (slen - uio->uio_resid) : slen),
5845 			    stcb->asoc.my_rwnd,
5846 			    so->so_rcv.sb_cc);
5847 		} else {
5848 			sctp_misc_ints(SCTP_SORECV_DONE,
5849 			    freed_so_far,
5850 			    ((uio) ? (slen - uio->uio_resid) : slen),
5851 			    0,
5852 			    so->so_rcv.sb_cc);
5853 		}
5854 	}
5855 	if (wakeup_read_socket) {
5856 		sctp_sorwakeup(inp, so);
5857 	}
5858 	return (error);
5859 }
5860 
5861 
5862 #ifdef SCTP_MBUF_LOGGING
5863 struct mbuf *
5864 sctp_m_free(struct mbuf *m)
5865 {
5866 	if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
5867 		if (SCTP_BUF_IS_EXTENDED(m)) {
5868 			sctp_log_mb(m, SCTP_MBUF_IFREE);
5869 		}
5870 	}
5871 	return (m_free(m));
5872 }
5873 
5874 void
5875 sctp_m_freem(struct mbuf *mb)
5876 {
5877 	while (mb != NULL)
5878 		mb = sctp_m_free(mb);
5879 }
5880 
5881 #endif
5882 
5883 int
5884 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5885 {
5886 	/*
5887 	 * Given a local address. For all associations that holds the
5888 	 * address, request a peer-set-primary.
5889 	 */
5890 	struct sctp_ifa *ifa;
5891 	struct sctp_laddr *wi;
5892 
5893 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5894 	if (ifa == NULL) {
5895 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
5896 		return (EADDRNOTAVAIL);
5897 	}
5898 	/*
5899 	 * Now that we have the ifa we must awaken the iterator with this
5900 	 * message.
5901 	 */
5902 	wi = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
5903 	if (wi == NULL) {
5904 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
5905 		return (ENOMEM);
5906 	}
5907 	/* Now incr the count and int wi structure */
5908 	SCTP_INCR_LADDR_COUNT();
5909 	bzero(wi, sizeof(*wi));
5910 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
5911 	wi->ifa = ifa;
5912 	wi->action = SCTP_SET_PRIM_ADDR;
5913 	atomic_add_int(&ifa->refcount, 1);
5914 
5915 	/* Now add it to the work queue */
5916 	SCTP_IPI_ITERATOR_WQ_LOCK();
5917 	/*
5918 	 * Should this really be a tailq? As it is we will process the
5919 	 * newest first :-0
5920 	 */
5921 	LIST_INSERT_HEAD(&sctppcbinfo.addr_wq, wi, sctp_nxt_addr);
5922 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
5923 	    (struct sctp_inpcb *)NULL,
5924 	    (struct sctp_tcb *)NULL,
5925 	    (struct sctp_nets *)NULL);
5926 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
5927 	return (0);
5928 }
5929 
5930 
5931 
5932 
5933 int
5934 sctp_soreceive(struct socket *so,
5935     struct sockaddr **psa,
5936     struct uio *uio,
5937     struct mbuf **mp0,
5938     struct mbuf **controlp,
5939     int *flagsp)
5940 {
5941 	int error, fromlen;
5942 	uint8_t sockbuf[256];
5943 	struct sockaddr *from;
5944 	struct sctp_extrcvinfo sinfo;
5945 	int filling_sinfo = 1;
5946 	struct sctp_inpcb *inp;
5947 
5948 	inp = (struct sctp_inpcb *)so->so_pcb;
5949 	/* pickup the assoc we are reading from */
5950 	if (inp == NULL) {
5951 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5952 		return (EINVAL);
5953 	}
5954 	if ((sctp_is_feature_off(inp,
5955 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
5956 	    (controlp == NULL)) {
5957 		/* user does not want the sndrcv ctl */
5958 		filling_sinfo = 0;
5959 	}
5960 	if (psa) {
5961 		from = (struct sockaddr *)sockbuf;
5962 		fromlen = sizeof(sockbuf);
5963 		from->sa_len = 0;
5964 	} else {
5965 		from = NULL;
5966 		fromlen = 0;
5967 	}
5968 
5969 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
5970 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
5971 	if ((controlp) && (filling_sinfo)) {
5972 		/* copy back the sinfo in a CMSG format */
5973 		if (filling_sinfo)
5974 			*controlp = sctp_build_ctl_nchunk(inp,
5975 			    (struct sctp_sndrcvinfo *)&sinfo);
5976 		else
5977 			*controlp = NULL;
5978 	}
5979 	if (psa) {
5980 		/* copy back the address info */
5981 		if (from && from->sa_len) {
5982 			*psa = sodupsockaddr(from, M_NOWAIT);
5983 		} else {
5984 			*psa = NULL;
5985 		}
5986 	}
5987 	return (error);
5988 }
5989 
5990 
5991 int
5992 sctp_l_soreceive(struct socket *so,
5993     struct sockaddr **name,
5994     struct uio *uio,
5995     char **controlp,
5996     int *controllen,
5997     int *flag)
5998 {
5999 	int error, fromlen;
6000 	uint8_t sockbuf[256];
6001 	struct sockaddr *from;
6002 	struct sctp_extrcvinfo sinfo;
6003 	int filling_sinfo = 1;
6004 	struct sctp_inpcb *inp;
6005 
6006 	inp = (struct sctp_inpcb *)so->so_pcb;
6007 	/* pickup the assoc we are reading from */
6008 	if (inp == NULL) {
6009 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6010 		return (EINVAL);
6011 	}
6012 	if ((sctp_is_feature_off(inp,
6013 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6014 	    (controlp == NULL)) {
6015 		/* user does not want the sndrcv ctl */
6016 		filling_sinfo = 0;
6017 	}
6018 	if (name) {
6019 		from = (struct sockaddr *)sockbuf;
6020 		fromlen = sizeof(sockbuf);
6021 		from->sa_len = 0;
6022 	} else {
6023 		from = NULL;
6024 		fromlen = 0;
6025 	}
6026 
6027 	error = sctp_sorecvmsg(so, uio,
6028 	    (struct mbuf **)NULL,
6029 	    from, fromlen, flag,
6030 	    (struct sctp_sndrcvinfo *)&sinfo,
6031 	    filling_sinfo);
6032 	if ((controlp) && (filling_sinfo)) {
6033 		/*
6034 		 * copy back the sinfo in a CMSG format note that the caller
6035 		 * has reponsibility for freeing the memory.
6036 		 */
6037 		if (filling_sinfo)
6038 			*controlp = sctp_build_ctl_cchunk(inp,
6039 			    controllen,
6040 			    (struct sctp_sndrcvinfo *)&sinfo);
6041 	}
6042 	if (name) {
6043 		/* copy back the address info */
6044 		if (from && from->sa_len) {
6045 			*name = sodupsockaddr(from, M_WAIT);
6046 		} else {
6047 			*name = NULL;
6048 		}
6049 	}
6050 	return (error);
6051 }
6052 
6053 
6054 
6055 
6056 
6057 
6058 
6059 int
6060 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6061     int totaddr, int *error)
6062 {
6063 	int added = 0;
6064 	int i;
6065 	struct sctp_inpcb *inp;
6066 	struct sockaddr *sa;
6067 	size_t incr = 0;
6068 
6069 	sa = addr;
6070 	inp = stcb->sctp_ep;
6071 	*error = 0;
6072 	for (i = 0; i < totaddr; i++) {
6073 		if (sa->sa_family == AF_INET) {
6074 			incr = sizeof(struct sockaddr_in);
6075 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6076 				/* assoc gone no un-lock */
6077 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6078 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6079 				*error = ENOBUFS;
6080 				goto out_now;
6081 			}
6082 			added++;
6083 		} else if (sa->sa_family == AF_INET6) {
6084 			incr = sizeof(struct sockaddr_in6);
6085 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6086 				/* assoc gone no un-lock */
6087 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6088 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6089 				*error = ENOBUFS;
6090 				goto out_now;
6091 			}
6092 			added++;
6093 		}
6094 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6095 	}
6096 out_now:
6097 	return (added);
6098 }
6099 
6100 struct sctp_tcb *
6101 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6102     int *totaddr, int *num_v4, int *num_v6, int *error,
6103     int limit, int *bad_addr)
6104 {
6105 	struct sockaddr *sa;
6106 	struct sctp_tcb *stcb = NULL;
6107 	size_t incr, at, i;
6108 
6109 	at = incr = 0;
6110 	sa = addr;
6111 	*error = *num_v6 = *num_v4 = 0;
6112 	/* account and validate addresses */
6113 	for (i = 0; i < (size_t)*totaddr; i++) {
6114 		if (sa->sa_family == AF_INET) {
6115 			(*num_v4) += 1;
6116 			incr = sizeof(struct sockaddr_in);
6117 			if (sa->sa_len != incr) {
6118 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6119 				*error = EINVAL;
6120 				*bad_addr = 1;
6121 				return (NULL);
6122 			}
6123 		} else if (sa->sa_family == AF_INET6) {
6124 			struct sockaddr_in6 *sin6;
6125 
6126 			sin6 = (struct sockaddr_in6 *)sa;
6127 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6128 				/* Must be non-mapped for connectx */
6129 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6130 				*error = EINVAL;
6131 				*bad_addr = 1;
6132 				return (NULL);
6133 			}
6134 			(*num_v6) += 1;
6135 			incr = sizeof(struct sockaddr_in6);
6136 			if (sa->sa_len != incr) {
6137 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6138 				*error = EINVAL;
6139 				*bad_addr = 1;
6140 				return (NULL);
6141 			}
6142 		} else {
6143 			*totaddr = i;
6144 			/* we are done */
6145 			break;
6146 		}
6147 		SCTP_INP_INCR_REF(inp);
6148 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6149 		if (stcb != NULL) {
6150 			/* Already have or am bring up an association */
6151 			return (stcb);
6152 		} else {
6153 			SCTP_INP_DECR_REF(inp);
6154 		}
6155 		if ((at + incr) > (size_t)limit) {
6156 			*totaddr = i;
6157 			break;
6158 		}
6159 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6160 	}
6161 	return ((struct sctp_tcb *)NULL);
6162 }
6163 
6164 /*
6165  * sctp_bindx(ADD) for one address.
6166  * assumes all arguments are valid/checked by caller.
6167  */
6168 void
6169 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6170     struct sockaddr *sa, sctp_assoc_t assoc_id,
6171     uint32_t vrf_id, int *error, void *p)
6172 {
6173 	struct sockaddr *addr_touse;
6174 	struct sockaddr_in sin;
6175 
6176 	/* see if we're bound all already! */
6177 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6178 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6179 		*error = EINVAL;
6180 		return;
6181 	}
6182 	addr_touse = sa;
6183 #if defined(INET6)
6184 	if (sa->sa_family == AF_INET6) {
6185 		struct sockaddr_in6 *sin6;
6186 
6187 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6188 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6189 			*error = EINVAL;
6190 			return;
6191 		}
6192 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6193 			/* can only bind v6 on PF_INET6 sockets */
6194 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6195 			*error = EINVAL;
6196 			return;
6197 		}
6198 		sin6 = (struct sockaddr_in6 *)addr_touse;
6199 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6200 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6201 			    SCTP_IPV6_V6ONLY(inp)) {
6202 				/* can't bind v4-mapped on PF_INET sockets */
6203 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6204 				*error = EINVAL;
6205 				return;
6206 			}
6207 			in6_sin6_2_sin(&sin, sin6);
6208 			addr_touse = (struct sockaddr *)&sin;
6209 		}
6210 	}
6211 #endif
6212 	if (sa->sa_family == AF_INET) {
6213 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6214 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6215 			*error = EINVAL;
6216 			return;
6217 		}
6218 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6219 		    SCTP_IPV6_V6ONLY(inp)) {
6220 			/* can't bind v4 on PF_INET sockets */
6221 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6222 			*error = EINVAL;
6223 			return;
6224 		}
6225 	}
6226 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6227 		if (p == NULL) {
6228 			/* Can't get proc for Net/Open BSD */
6229 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6230 			*error = EINVAL;
6231 			return;
6232 		}
6233 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6234 		return;
6235 	}
6236 	/*
6237 	 * No locks required here since bind and mgmt_ep_sa all do their own
6238 	 * locking. If we do something for the FIX: below we may need to
6239 	 * lock in that case.
6240 	 */
6241 	if (assoc_id == 0) {
6242 		/* add the address */
6243 		struct sctp_inpcb *lep;
6244 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6245 
6246 		/* validate the incoming port */
6247 		if ((lsin->sin_port != 0) &&
6248 		    (lsin->sin_port != inp->sctp_lport)) {
6249 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6250 			*error = EINVAL;
6251 			return;
6252 		} else {
6253 			/* user specified 0 port, set it to existing port */
6254 			lsin->sin_port = inp->sctp_lport;
6255 		}
6256 
6257 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6258 		if (lep != NULL) {
6259 			/*
6260 			 * We must decrement the refcount since we have the
6261 			 * ep already and are binding. No remove going on
6262 			 * here.
6263 			 */
6264 			SCTP_INP_DECR_REF(inp);
6265 		}
6266 		if (lep == inp) {
6267 			/* already bound to it.. ok */
6268 			return;
6269 		} else if (lep == NULL) {
6270 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6271 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6272 			    SCTP_ADD_IP_ADDRESS,
6273 			    vrf_id, NULL);
6274 		} else {
6275 			*error = EADDRINUSE;
6276 		}
6277 		if (*error)
6278 			return;
6279 	} else {
6280 		/*
6281 		 * FIX: decide whether we allow assoc based bindx
6282 		 */
6283 	}
6284 }
6285 
6286 /*
6287  * sctp_bindx(DELETE) for one address.
6288  * assumes all arguments are valid/checked by caller.
6289  */
6290 void
6291 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6292     struct sockaddr *sa, sctp_assoc_t assoc_id,
6293     uint32_t vrf_id, int *error)
6294 {
6295 	struct sockaddr *addr_touse;
6296 	struct sockaddr_in sin;
6297 
6298 	/* see if we're bound all already! */
6299 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6300 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6301 		*error = EINVAL;
6302 		return;
6303 	}
6304 	addr_touse = sa;
6305 #if defined(INET6)
6306 	if (sa->sa_family == AF_INET6) {
6307 		struct sockaddr_in6 *sin6;
6308 
6309 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6310 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6311 			*error = EINVAL;
6312 			return;
6313 		}
6314 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6315 			/* can only bind v6 on PF_INET6 sockets */
6316 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6317 			*error = EINVAL;
6318 			return;
6319 		}
6320 		sin6 = (struct sockaddr_in6 *)addr_touse;
6321 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6322 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6323 			    SCTP_IPV6_V6ONLY(inp)) {
6324 				/* can't bind mapped-v4 on PF_INET sockets */
6325 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6326 				*error = EINVAL;
6327 				return;
6328 			}
6329 			in6_sin6_2_sin(&sin, sin6);
6330 			addr_touse = (struct sockaddr *)&sin;
6331 		}
6332 	}
6333 #endif
6334 	if (sa->sa_family == AF_INET) {
6335 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6336 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6337 			*error = EINVAL;
6338 			return;
6339 		}
6340 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6341 		    SCTP_IPV6_V6ONLY(inp)) {
6342 			/* can't bind v4 on PF_INET sockets */
6343 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6344 			*error = EINVAL;
6345 			return;
6346 		}
6347 	}
6348 	/*
6349 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6350 	 * below is ever changed we may need to lock before calling
6351 	 * association level binding.
6352 	 */
6353 	if (assoc_id == 0) {
6354 		/* delete the address */
6355 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6356 		    SCTP_DEL_IP_ADDRESS,
6357 		    vrf_id, NULL);
6358 	} else {
6359 		/*
6360 		 * FIX: decide whether we allow assoc based bindx
6361 		 */
6362 	}
6363 }
6364 
6365 /*
6366  * returns the valid local address count for an assoc, taking into account
6367  * all scoping rules
6368  */
6369 int
6370 sctp_local_addr_count(struct sctp_tcb *stcb)
6371 {
6372 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6373 	int ipv4_addr_legal, ipv6_addr_legal;
6374 	struct sctp_vrf *vrf;
6375 	struct sctp_ifn *sctp_ifn;
6376 	struct sctp_ifa *sctp_ifa;
6377 	int count = 0;
6378 
6379 	/* Turn on all the appropriate scopes */
6380 	loopback_scope = stcb->asoc.loopback_scope;
6381 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6382 	local_scope = stcb->asoc.local_scope;
6383 	site_scope = stcb->asoc.site_scope;
6384 	ipv4_addr_legal = ipv6_addr_legal = 0;
6385 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6386 		ipv6_addr_legal = 1;
6387 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6388 			ipv4_addr_legal = 1;
6389 		}
6390 	} else {
6391 		ipv4_addr_legal = 1;
6392 	}
6393 
6394 	SCTP_IPI_ADDR_RLOCK();
6395 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6396 	if (vrf == NULL) {
6397 		/* no vrf, no addresses */
6398 		SCTP_IPI_ADDR_RUNLOCK();
6399 		return (0);
6400 	}
6401 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6402 		/*
6403 		 * bound all case: go through all ifns on the vrf
6404 		 */
6405 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6406 			if ((loopback_scope == 0) &&
6407 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6408 				continue;
6409 			}
6410 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6411 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6412 					continue;
6413 
6414 				if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
6415 				    (ipv4_addr_legal)) {
6416 					struct sockaddr_in *sin;
6417 
6418 					sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6419 					if (sin->sin_addr.s_addr == 0) {
6420 						/* skip unspecified addrs */
6421 						continue;
6422 					}
6423 					if ((ipv4_local_scope == 0) &&
6424 					    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6425 						continue;
6426 					}
6427 					/* count this one */
6428 					count++;
6429 				} else if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
6430 				    (ipv6_addr_legal)) {
6431 					struct sockaddr_in6 *sin6;
6432 
6433 					sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6434 					if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6435 						continue;
6436 					}
6437 					if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6438 						if (local_scope == 0)
6439 							continue;
6440 						if (sin6->sin6_scope_id == 0) {
6441 							if (sa6_recoverscope(sin6) != 0)
6442 								/*
6443 								 * bad link
6444 								 * local
6445 								 * address
6446 								 */
6447 								continue;
6448 						}
6449 					}
6450 					if ((site_scope == 0) &&
6451 					    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6452 						continue;
6453 					}
6454 					/* count this one */
6455 					count++;
6456 				}
6457 			}
6458 		}
6459 	} else {
6460 		/*
6461 		 * subset bound case
6462 		 */
6463 		struct sctp_laddr *laddr;
6464 
6465 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6466 		    sctp_nxt_addr) {
6467 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6468 				continue;
6469 			}
6470 			/* count this one */
6471 			count++;
6472 		}
6473 	}
6474 	SCTP_IPI_ADDR_RUNLOCK();
6475 	return (count);
6476 }
6477 
6478 #if defined(SCTP_LOCAL_TRACE_BUF)
6479 
6480 void
6481 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6482 {
6483 	uint32_t saveindex, newindex;
6484 
6485 	do {
6486 		saveindex = sctp_log.index;
6487 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6488 			newindex = 1;
6489 		} else {
6490 			newindex = saveindex + 1;
6491 		}
6492 	} while (atomic_cmpset_int(&sctp_log.index, saveindex, newindex) == 0);
6493 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6494 		saveindex = 0;
6495 	}
6496 	sctp_log.entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6497 	sctp_log.entry[saveindex].subsys = subsys;
6498 	sctp_log.entry[saveindex].params[0] = a;
6499 	sctp_log.entry[saveindex].params[1] = b;
6500 	sctp_log.entry[saveindex].params[2] = c;
6501 	sctp_log.entry[saveindex].params[3] = d;
6502 	sctp_log.entry[saveindex].params[4] = e;
6503 	sctp_log.entry[saveindex].params[5] = f;
6504 }
6505 
6506 #endif
6507