xref: /freebsd/sys/netinet/sctputil.c (revision 35a04710d7286aa9538917fd7f8e417dbee95b82)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_crc32.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_cc_functions.h>
53 
54 #define NUMBER_OF_MTU_SIZES 18
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 void
62 sctp_sblog(struct sockbuf *sb,
63     struct sctp_tcb *stcb, int from, int incr)
64 {
65 	struct sctp_cwnd_log sctp_clog;
66 
67 	sctp_clog.x.sb.stcb = stcb;
68 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
69 	if (stcb)
70 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
71 	else
72 		sctp_clog.x.sb.stcb_sbcc = 0;
73 	sctp_clog.x.sb.incr = incr;
74 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
75 	    SCTP_LOG_EVENT_SB,
76 	    from,
77 	    sctp_clog.x.misc.log1,
78 	    sctp_clog.x.misc.log2,
79 	    sctp_clog.x.misc.log3,
80 	    sctp_clog.x.misc.log4);
81 }
82 
83 void
84 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
85 {
86 	struct sctp_cwnd_log sctp_clog;
87 
88 	sctp_clog.x.close.inp = (void *)inp;
89 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
90 	if (stcb) {
91 		sctp_clog.x.close.stcb = (void *)stcb;
92 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
93 	} else {
94 		sctp_clog.x.close.stcb = 0;
95 		sctp_clog.x.close.state = 0;
96 	}
97 	sctp_clog.x.close.loc = loc;
98 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
99 	    SCTP_LOG_EVENT_CLOSE,
100 	    0,
101 	    sctp_clog.x.misc.log1,
102 	    sctp_clog.x.misc.log2,
103 	    sctp_clog.x.misc.log3,
104 	    sctp_clog.x.misc.log4);
105 }
106 
107 
108 void
109 rto_logging(struct sctp_nets *net, int from)
110 {
111 	struct sctp_cwnd_log sctp_clog;
112 
113 	sctp_clog.x.rto.net = (void *)net;
114 	sctp_clog.x.rto.rtt = net->prev_rtt;
115 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 	    SCTP_LOG_EVENT_RTT,
117 	    from,
118 	    sctp_clog.x.misc.log1,
119 	    sctp_clog.x.misc.log2,
120 	    sctp_clog.x.misc.log3,
121 	    sctp_clog.x.misc.log4);
122 
123 }
124 
125 void
126 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
127 {
128 	struct sctp_cwnd_log sctp_clog;
129 
130 	sctp_clog.x.strlog.stcb = stcb;
131 	sctp_clog.x.strlog.n_tsn = tsn;
132 	sctp_clog.x.strlog.n_sseq = sseq;
133 	sctp_clog.x.strlog.e_tsn = 0;
134 	sctp_clog.x.strlog.e_sseq = 0;
135 	sctp_clog.x.strlog.strm = stream;
136 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
137 	    SCTP_LOG_EVENT_STRM,
138 	    from,
139 	    sctp_clog.x.misc.log1,
140 	    sctp_clog.x.misc.log2,
141 	    sctp_clog.x.misc.log3,
142 	    sctp_clog.x.misc.log4);
143 
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 	struct sctp_cwnd_log sctp_clog;
170 
171 	sctp_clog.x.sack.cumack = cumack;
172 	sctp_clog.x.sack.oldcumack = old_cumack;
173 	sctp_clog.x.sack.tsn = tsn;
174 	sctp_clog.x.sack.numGaps = gaps;
175 	sctp_clog.x.sack.numDups = dups;
176 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 	    SCTP_LOG_EVENT_SACK,
178 	    from,
179 	    sctp_clog.x.misc.log1,
180 	    sctp_clog.x.misc.log2,
181 	    sctp_clog.x.misc.log3,
182 	    sctp_clog.x.misc.log4);
183 }
184 
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
204     int from)
205 {
206 	struct sctp_cwnd_log sctp_clog;
207 
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 
219 }
220 
221 
222 void
223 sctp_log_mb(struct mbuf *m, int from)
224 {
225 	struct sctp_cwnd_log sctp_clog;
226 
227 	sctp_clog.x.mb.mp = m;
228 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
229 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
230 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
231 	if (SCTP_BUF_IS_EXTENDED(m)) {
232 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
233 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
234 	} else {
235 		sctp_clog.x.mb.ext = 0;
236 		sctp_clog.x.mb.refcnt = 0;
237 	}
238 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
239 	    SCTP_LOG_EVENT_MBUF,
240 	    from,
241 	    sctp_clog.x.misc.log1,
242 	    sctp_clog.x.misc.log2,
243 	    sctp_clog.x.misc.log3,
244 	    sctp_clog.x.misc.log4);
245 }
246 
247 
248 void
249 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
250     int from)
251 {
252 	struct sctp_cwnd_log sctp_clog;
253 
254 	if (control == NULL) {
255 		SCTP_PRINTF("Gak log of NULL?\n");
256 		return;
257 	}
258 	sctp_clog.x.strlog.stcb = control->stcb;
259 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
260 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
261 	sctp_clog.x.strlog.strm = control->sinfo_stream;
262 	if (poschk != NULL) {
263 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
264 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
265 	} else {
266 		sctp_clog.x.strlog.e_tsn = 0;
267 		sctp_clog.x.strlog.e_sseq = 0;
268 	}
269 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
270 	    SCTP_LOG_EVENT_STRM,
271 	    from,
272 	    sctp_clog.x.misc.log1,
273 	    sctp_clog.x.misc.log2,
274 	    sctp_clog.x.misc.log3,
275 	    sctp_clog.x.misc.log4);
276 
277 }
278 
279 void
280 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
281 {
282 	struct sctp_cwnd_log sctp_clog;
283 
284 	sctp_clog.x.cwnd.net = net;
285 	if (stcb->asoc.send_queue_cnt > 255)
286 		sctp_clog.x.cwnd.cnt_in_send = 255;
287 	else
288 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
289 	if (stcb->asoc.stream_queue_cnt > 255)
290 		sctp_clog.x.cwnd.cnt_in_str = 255;
291 	else
292 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
293 
294 	if (net) {
295 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
296 		sctp_clog.x.cwnd.inflight = net->flight_size;
297 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
298 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
299 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
300 	}
301 	if (SCTP_CWNDLOG_PRESEND == from) {
302 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
303 	}
304 	sctp_clog.x.cwnd.cwnd_augment = augment;
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_CWND,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 
313 }
314 
315 void
316 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
317 {
318 	struct sctp_cwnd_log sctp_clog;
319 
320 	if (inp) {
321 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
322 
323 	} else {
324 		sctp_clog.x.lock.sock = (void *)NULL;
325 	}
326 	sctp_clog.x.lock.inp = (void *)inp;
327 	if (stcb) {
328 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
329 	} else {
330 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
331 	}
332 	if (inp) {
333 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
334 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
335 	} else {
336 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
337 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
338 	}
339 	sctp_clog.x.lock.info_lock = rw_wowned(&sctppcbinfo.ipi_ep_mtx);
340 	if (inp->sctp_socket) {
341 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
342 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
343 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
344 	} else {
345 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
346 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
347 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
348 	}
349 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
350 	    SCTP_LOG_LOCK_EVENT,
351 	    from,
352 	    sctp_clog.x.misc.log1,
353 	    sctp_clog.x.misc.log2,
354 	    sctp_clog.x.misc.log3,
355 	    sctp_clog.x.misc.log4);
356 
357 }
358 
359 void
360 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
361 {
362 	struct sctp_cwnd_log sctp_clog;
363 
364 	sctp_clog.x.cwnd.net = net;
365 	sctp_clog.x.cwnd.cwnd_new_value = error;
366 	sctp_clog.x.cwnd.inflight = net->flight_size;
367 	sctp_clog.x.cwnd.cwnd_augment = burst;
368 	if (stcb->asoc.send_queue_cnt > 255)
369 		sctp_clog.x.cwnd.cnt_in_send = 255;
370 	else
371 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
372 	if (stcb->asoc.stream_queue_cnt > 255)
373 		sctp_clog.x.cwnd.cnt_in_str = 255;
374 	else
375 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
376 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
377 	    SCTP_LOG_EVENT_MAXBURST,
378 	    from,
379 	    sctp_clog.x.misc.log1,
380 	    sctp_clog.x.misc.log2,
381 	    sctp_clog.x.misc.log3,
382 	    sctp_clog.x.misc.log4);
383 
384 }
385 
386 void
387 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
388 {
389 	struct sctp_cwnd_log sctp_clog;
390 
391 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
392 	sctp_clog.x.rwnd.send_size = snd_size;
393 	sctp_clog.x.rwnd.overhead = overhead;
394 	sctp_clog.x.rwnd.new_rwnd = 0;
395 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
396 	    SCTP_LOG_EVENT_RWND,
397 	    from,
398 	    sctp_clog.x.misc.log1,
399 	    sctp_clog.x.misc.log2,
400 	    sctp_clog.x.misc.log3,
401 	    sctp_clog.x.misc.log4);
402 }
403 
404 void
405 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
406 {
407 	struct sctp_cwnd_log sctp_clog;
408 
409 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
410 	sctp_clog.x.rwnd.send_size = flight_size;
411 	sctp_clog.x.rwnd.overhead = overhead;
412 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
413 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
414 	    SCTP_LOG_EVENT_RWND,
415 	    from,
416 	    sctp_clog.x.misc.log1,
417 	    sctp_clog.x.misc.log2,
418 	    sctp_clog.x.misc.log3,
419 	    sctp_clog.x.misc.log4);
420 }
421 
422 void
423 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
424 {
425 	struct sctp_cwnd_log sctp_clog;
426 
427 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
428 	sctp_clog.x.mbcnt.size_change = book;
429 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
430 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
431 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
432 	    SCTP_LOG_EVENT_MBCNT,
433 	    from,
434 	    sctp_clog.x.misc.log1,
435 	    sctp_clog.x.misc.log2,
436 	    sctp_clog.x.misc.log3,
437 	    sctp_clog.x.misc.log4);
438 
439 }
440 
441 void
442 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
443 {
444 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
445 	    SCTP_LOG_MISC_EVENT,
446 	    from,
447 	    a, b, c, d);
448 }
449 
450 void
451 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
452 {
453 	struct sctp_cwnd_log sctp_clog;
454 
455 	sctp_clog.x.wake.stcb = (void *)stcb;
456 	sctp_clog.x.wake.wake_cnt = wake_cnt;
457 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
458 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
459 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
460 
461 	if (stcb->asoc.stream_queue_cnt < 0xff)
462 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
463 	else
464 		sctp_clog.x.wake.stream_qcnt = 0xff;
465 
466 	if (stcb->asoc.chunks_on_out_queue < 0xff)
467 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
468 	else
469 		sctp_clog.x.wake.chunks_on_oque = 0xff;
470 
471 	sctp_clog.x.wake.sctpflags = 0;
472 	/* set in the defered mode stuff */
473 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
474 		sctp_clog.x.wake.sctpflags |= 1;
475 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
476 		sctp_clog.x.wake.sctpflags |= 2;
477 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
478 		sctp_clog.x.wake.sctpflags |= 4;
479 	/* what about the sb */
480 	if (stcb->sctp_socket) {
481 		struct socket *so = stcb->sctp_socket;
482 
483 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
484 	} else {
485 		sctp_clog.x.wake.sbflags = 0xff;
486 	}
487 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
488 	    SCTP_LOG_EVENT_WAKE,
489 	    from,
490 	    sctp_clog.x.misc.log1,
491 	    sctp_clog.x.misc.log2,
492 	    sctp_clog.x.misc.log3,
493 	    sctp_clog.x.misc.log4);
494 
495 }
496 
497 void
498 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
499 {
500 	struct sctp_cwnd_log sctp_clog;
501 
502 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
503 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
504 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
505 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
506 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
507 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
508 	sctp_clog.x.blk.sndlen = sendlen;
509 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
510 	    SCTP_LOG_EVENT_BLOCK,
511 	    from,
512 	    sctp_clog.x.misc.log1,
513 	    sctp_clog.x.misc.log2,
514 	    sctp_clog.x.misc.log3,
515 	    sctp_clog.x.misc.log4);
516 
517 }
518 
519 int
520 sctp_fill_stat_log(void *optval, size_t *optsize)
521 {
522 	/* May need to fix this if ktrdump does not work */
523 	return (0);
524 }
525 
526 #ifdef SCTP_AUDITING_ENABLED
527 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
528 static int sctp_audit_indx = 0;
529 
530 static
531 void
532 sctp_print_audit_report(void)
533 {
534 	int i;
535 	int cnt;
536 
537 	cnt = 0;
538 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
539 		if ((sctp_audit_data[i][0] == 0xe0) &&
540 		    (sctp_audit_data[i][1] == 0x01)) {
541 			cnt = 0;
542 			SCTP_PRINTF("\n");
543 		} else if (sctp_audit_data[i][0] == 0xf0) {
544 			cnt = 0;
545 			SCTP_PRINTF("\n");
546 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			SCTP_PRINTF("\n");
549 			cnt = 0;
550 		}
551 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
552 		    (uint32_t) sctp_audit_data[i][1]);
553 		cnt++;
554 		if ((cnt % 14) == 0)
555 			SCTP_PRINTF("\n");
556 	}
557 	for (i = 0; i < sctp_audit_indx; i++) {
558 		if ((sctp_audit_data[i][0] == 0xe0) &&
559 		    (sctp_audit_data[i][1] == 0x01)) {
560 			cnt = 0;
561 			SCTP_PRINTF("\n");
562 		} else if (sctp_audit_data[i][0] == 0xf0) {
563 			cnt = 0;
564 			SCTP_PRINTF("\n");
565 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			SCTP_PRINTF("\n");
568 			cnt = 0;
569 		}
570 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
571 		    (uint32_t) sctp_audit_data[i][1]);
572 		cnt++;
573 		if ((cnt % 14) == 0)
574 			SCTP_PRINTF("\n");
575 	}
576 	SCTP_PRINTF("\n");
577 }
578 
579 void
580 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
581     struct sctp_nets *net)
582 {
583 	int resend_cnt, tot_out, rep, tot_book_cnt;
584 	struct sctp_nets *lnet;
585 	struct sctp_tmit_chunk *chk;
586 
587 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
588 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
589 	sctp_audit_indx++;
590 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
591 		sctp_audit_indx = 0;
592 	}
593 	if (inp == NULL) {
594 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
595 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
596 		sctp_audit_indx++;
597 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 			sctp_audit_indx = 0;
599 		}
600 		return;
601 	}
602 	if (stcb == NULL) {
603 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
604 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
605 		sctp_audit_indx++;
606 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 			sctp_audit_indx = 0;
608 		}
609 		return;
610 	}
611 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
612 	sctp_audit_data[sctp_audit_indx][1] =
613 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
614 	sctp_audit_indx++;
615 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
616 		sctp_audit_indx = 0;
617 	}
618 	rep = 0;
619 	tot_book_cnt = 0;
620 	resend_cnt = tot_out = 0;
621 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
622 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
623 			resend_cnt++;
624 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
625 			tot_out += chk->book_size;
626 			tot_book_cnt++;
627 		}
628 	}
629 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
630 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
631 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
632 		sctp_audit_indx++;
633 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
634 			sctp_audit_indx = 0;
635 		}
636 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
637 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
638 		rep = 1;
639 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
640 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
641 		sctp_audit_data[sctp_audit_indx][1] =
642 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
643 		sctp_audit_indx++;
644 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645 			sctp_audit_indx = 0;
646 		}
647 	}
648 	if (tot_out != stcb->asoc.total_flight) {
649 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
650 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
651 		sctp_audit_indx++;
652 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
653 			sctp_audit_indx = 0;
654 		}
655 		rep = 1;
656 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
657 		    (int)stcb->asoc.total_flight);
658 		stcb->asoc.total_flight = tot_out;
659 	}
660 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
661 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
662 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
663 		sctp_audit_indx++;
664 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
665 			sctp_audit_indx = 0;
666 		}
667 		rep = 1;
668 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
669 
670 		stcb->asoc.total_flight_count = tot_book_cnt;
671 	}
672 	tot_out = 0;
673 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
674 		tot_out += lnet->flight_size;
675 	}
676 	if (tot_out != stcb->asoc.total_flight) {
677 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
678 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
679 		sctp_audit_indx++;
680 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
681 			sctp_audit_indx = 0;
682 		}
683 		rep = 1;
684 		SCTP_PRINTF("real flight:%d net total was %d\n",
685 		    stcb->asoc.total_flight, tot_out);
686 		/* now corrective action */
687 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
688 
689 			tot_out = 0;
690 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
691 				if ((chk->whoTo == lnet) &&
692 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
693 					tot_out += chk->book_size;
694 				}
695 			}
696 			if (lnet->flight_size != tot_out) {
697 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
698 				    (uint32_t) lnet, lnet->flight_size,
699 				    tot_out);
700 				lnet->flight_size = tot_out;
701 			}
702 		}
703 	}
704 	if (rep) {
705 		sctp_print_audit_report();
706 	}
707 }
708 
709 void
710 sctp_audit_log(uint8_t ev, uint8_t fd)
711 {
712 
713 	sctp_audit_data[sctp_audit_indx][0] = ev;
714 	sctp_audit_data[sctp_audit_indx][1] = fd;
715 	sctp_audit_indx++;
716 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 		sctp_audit_indx = 0;
718 	}
719 }
720 
721 #endif
722 
723 /*
724  * a list of sizes based on typical mtu's, used only if next hop size not
725  * returned.
726  */
727 static int sctp_mtu_sizes[] = {
728 	68,
729 	296,
730 	508,
731 	512,
732 	544,
733 	576,
734 	1006,
735 	1492,
736 	1500,
737 	1536,
738 	2002,
739 	2048,
740 	4352,
741 	4464,
742 	8166,
743 	17914,
744 	32000,
745 	65535
746 };
747 
748 void
749 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
750 {
751 	struct sctp_association *asoc;
752 	struct sctp_nets *net;
753 
754 	asoc = &stcb->asoc;
755 
756 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
757 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
758 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
759 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
760 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
761 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
762 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
763 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
764 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
765 	}
766 }
767 
768 int
769 find_next_best_mtu(int totsz)
770 {
771 	int i, perfer;
772 
773 	/*
774 	 * if we are in here we must find the next best fit based on the
775 	 * size of the dg that failed to be sent.
776 	 */
777 	perfer = 0;
778 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
779 		if (totsz < sctp_mtu_sizes[i]) {
780 			perfer = i - 1;
781 			if (perfer < 0)
782 				perfer = 0;
783 			break;
784 		}
785 	}
786 	return (sctp_mtu_sizes[perfer]);
787 }
788 
789 void
790 sctp_fill_random_store(struct sctp_pcb *m)
791 {
792 	/*
793 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
794 	 * our counter. The result becomes our good random numbers and we
795 	 * then setup to give these out. Note that we do no locking to
796 	 * protect this. This is ok, since if competing folks call this we
797 	 * will get more gobbled gook in the random store which is what we
798 	 * want. There is a danger that two guys will use the same random
799 	 * numbers, but thats ok too since that is random as well :->
800 	 */
801 	m->store_at = 0;
802 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
803 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
804 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
805 	m->random_counter++;
806 }
807 
808 uint32_t
809 sctp_select_initial_TSN(struct sctp_pcb *inp)
810 {
811 	/*
812 	 * A true implementation should use random selection process to get
813 	 * the initial stream sequence number, using RFC1750 as a good
814 	 * guideline
815 	 */
816 	uint32_t x, *xp;
817 	uint8_t *p;
818 	int store_at, new_store;
819 
820 	if (inp->initial_sequence_debug != 0) {
821 		uint32_t ret;
822 
823 		ret = inp->initial_sequence_debug;
824 		inp->initial_sequence_debug++;
825 		return (ret);
826 	}
827 retry:
828 	store_at = inp->store_at;
829 	new_store = store_at + sizeof(uint32_t);
830 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
831 		new_store = 0;
832 	}
833 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
834 		goto retry;
835 	}
836 	if (new_store == 0) {
837 		/* Refill the random store */
838 		sctp_fill_random_store(inp);
839 	}
840 	p = &inp->random_store[store_at];
841 	xp = (uint32_t *) p;
842 	x = *xp;
843 	return (x);
844 }
845 
846 uint32_t
847 sctp_select_a_tag(struct sctp_inpcb *inp, int save_in_twait)
848 {
849 	u_long x, not_done;
850 	struct timeval now;
851 
852 	(void)SCTP_GETTIME_TIMEVAL(&now);
853 	not_done = 1;
854 	while (not_done) {
855 		x = sctp_select_initial_TSN(&inp->sctp_ep);
856 		if (x == 0) {
857 			/* we never use 0 */
858 			continue;
859 		}
860 		if (sctp_is_vtag_good(inp, x, &now, save_in_twait)) {
861 			not_done = 0;
862 		}
863 	}
864 	return (x);
865 }
866 
867 int
868 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
869     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
870 {
871 	struct sctp_association *asoc;
872 
873 	/*
874 	 * Anything set to zero is taken care of by the allocation routine's
875 	 * bzero
876 	 */
877 
878 	/*
879 	 * Up front select what scoping to apply on addresses I tell my peer
880 	 * Not sure what to do with these right now, we will need to come up
881 	 * with a way to set them. We may need to pass them through from the
882 	 * caller in the sctp_aloc_assoc() function.
883 	 */
884 	int i;
885 
886 	asoc = &stcb->asoc;
887 	/* init all variables to a known value. */
888 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
889 	asoc->max_burst = m->sctp_ep.max_burst;
890 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
891 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
892 	asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off;
893 	/* JRS 5/21/07 - Init CMT PF variables */
894 	asoc->sctp_cmt_pf = (uint8_t) sctp_cmt_pf;
895 	asoc->sctp_frag_point = m->sctp_frag_point;
896 #ifdef INET
897 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
898 #else
899 	asoc->default_tos = 0;
900 #endif
901 
902 #ifdef INET6
903 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
904 #else
905 	asoc->default_flowlabel = 0;
906 #endif
907 	if (override_tag) {
908 		struct timeval now;
909 
910 		(void)SCTP_GETTIME_TIMEVAL(&now);
911 		if (sctp_is_in_timewait(override_tag)) {
912 			/*
913 			 * It must be in the time-wait hash, we put it there
914 			 * when we aloc one. If not the peer is playing
915 			 * games.
916 			 */
917 			asoc->my_vtag = override_tag;
918 		} else {
919 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
920 			panic("Huh is_in_timewait fails");
921 			return (ENOMEM);
922 		}
923 
924 	} else {
925 		asoc->my_vtag = sctp_select_a_tag(m, 1);
926 	}
927 	/* Get the nonce tags */
928 	asoc->my_vtag_nonce = sctp_select_a_tag(m, 0);
929 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, 0);
930 	asoc->vrf_id = vrf_id;
931 
932 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
933 		asoc->hb_is_disabled = 1;
934 	else
935 		asoc->hb_is_disabled = 0;
936 
937 #ifdef SCTP_ASOCLOG_OF_TSNS
938 	asoc->tsn_in_at = 0;
939 	asoc->tsn_out_at = 0;
940 	asoc->tsn_in_wrapped = 0;
941 	asoc->tsn_out_wrapped = 0;
942 	asoc->cumack_log_at = 0;
943 	asoc->cumack_log_atsnt = 0;
944 #endif
945 #ifdef SCTP_FS_SPEC_LOG
946 	asoc->fs_index = 0;
947 #endif
948 	asoc->refcnt = 0;
949 	asoc->assoc_up_sent = 0;
950 	asoc->assoc_id = asoc->my_vtag;
951 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
952 	    sctp_select_initial_TSN(&m->sctp_ep);
953 	/* we are optimisitic here */
954 	asoc->peer_supports_pktdrop = 1;
955 
956 	asoc->sent_queue_retran_cnt = 0;
957 
958 	/* for CMT */
959 	asoc->last_net_data_came_from = NULL;
960 
961 	/* This will need to be adjusted */
962 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
963 	asoc->last_acked_seq = asoc->init_seq_number - 1;
964 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
965 	asoc->asconf_seq_in = asoc->last_acked_seq;
966 
967 	/* here we are different, we hold the next one we expect */
968 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
969 
970 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
971 	asoc->initial_rto = m->sctp_ep.initial_rto;
972 
973 	asoc->max_init_times = m->sctp_ep.max_init_times;
974 	asoc->max_send_times = m->sctp_ep.max_send_times;
975 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
976 	asoc->free_chunk_cnt = 0;
977 
978 	asoc->iam_blocking = 0;
979 	/* ECN Nonce initialization */
980 	asoc->context = m->sctp_context;
981 	asoc->def_send = m->def_send;
982 	asoc->ecn_nonce_allowed = 0;
983 	asoc->receiver_nonce_sum = 1;
984 	asoc->nonce_sum_expect_base = 1;
985 	asoc->nonce_sum_check = 1;
986 	asoc->nonce_resync_tsn = 0;
987 	asoc->nonce_wait_for_ecne = 0;
988 	asoc->nonce_wait_tsn = 0;
989 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
990 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
991 	asoc->pr_sctp_cnt = 0;
992 	asoc->total_output_queue_size = 0;
993 
994 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
995 		struct in6pcb *inp6;
996 
997 		/* Its a V6 socket */
998 		inp6 = (struct in6pcb *)m;
999 		asoc->ipv6_addr_legal = 1;
1000 		/* Now look at the binding flag to see if V4 will be legal */
1001 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1002 			asoc->ipv4_addr_legal = 1;
1003 		} else {
1004 			/* V4 addresses are NOT legal on the association */
1005 			asoc->ipv4_addr_legal = 0;
1006 		}
1007 	} else {
1008 		/* Its a V4 socket, no - V6 */
1009 		asoc->ipv4_addr_legal = 1;
1010 		asoc->ipv6_addr_legal = 0;
1011 	}
1012 
1013 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1014 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1015 
1016 	asoc->smallest_mtu = m->sctp_frag_point;
1017 #ifdef SCTP_PRINT_FOR_B_AND_M
1018 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1019 	    asoc->smallest_mtu);
1020 #endif
1021 	asoc->minrto = m->sctp_ep.sctp_minrto;
1022 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1023 
1024 	asoc->locked_on_sending = NULL;
1025 	asoc->stream_locked_on = 0;
1026 	asoc->ecn_echo_cnt_onq = 0;
1027 	asoc->stream_locked = 0;
1028 
1029 	asoc->send_sack = 1;
1030 
1031 	LIST_INIT(&asoc->sctp_restricted_addrs);
1032 
1033 	TAILQ_INIT(&asoc->nets);
1034 	TAILQ_INIT(&asoc->pending_reply_queue);
1035 	TAILQ_INIT(&asoc->asconf_ack_sent);
1036 	/* Setup to fill the hb random cache at first HB */
1037 	asoc->hb_random_idx = 4;
1038 
1039 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1040 
1041 	/*
1042 	 * JRS - Pick the default congestion control module based on the
1043 	 * sysctl.
1044 	 */
1045 	switch (m->sctp_ep.sctp_default_cc_module) {
1046 		/* JRS - Standard TCP congestion control */
1047 	case SCTP_CC_RFC2581:
1048 		{
1049 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1050 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1051 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1052 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1053 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1054 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1055 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1056 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1057 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1058 			break;
1059 		}
1060 		/* JRS - High Speed TCP congestion control (Floyd) */
1061 	case SCTP_CC_HSTCP:
1062 		{
1063 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1064 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1065 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1066 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1067 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1068 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1069 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1070 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1071 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1072 			break;
1073 		}
1074 		/* JRS - HTCP congestion control */
1075 	case SCTP_CC_HTCP:
1076 		{
1077 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1078 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1079 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1080 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1081 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1082 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1083 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1084 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1085 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1086 			break;
1087 		}
1088 		/* JRS - By default, use RFC2581 */
1089 	default:
1090 		{
1091 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1092 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1093 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1094 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1095 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1096 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1097 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1098 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1099 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1100 			break;
1101 		}
1102 	}
1103 
1104 	/*
1105 	 * Now the stream parameters, here we allocate space for all streams
1106 	 * that we request by default.
1107 	 */
1108 	asoc->streamoutcnt = asoc->pre_open_streams =
1109 	    m->sctp_ep.pre_open_stream_count;
1110 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1111 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1112 	    SCTP_M_STRMO);
1113 	if (asoc->strmout == NULL) {
1114 		/* big trouble no memory */
1115 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1116 		return (ENOMEM);
1117 	}
1118 	for (i = 0; i < asoc->streamoutcnt; i++) {
1119 		/*
1120 		 * inbound side must be set to 0xffff, also NOTE when we get
1121 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1122 		 * count (streamoutcnt) but first check if we sent to any of
1123 		 * the upper streams that were dropped (if some were). Those
1124 		 * that were dropped must be notified to the upper layer as
1125 		 * failed to send.
1126 		 */
1127 		asoc->strmout[i].next_sequence_sent = 0x0;
1128 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1129 		asoc->strmout[i].stream_no = i;
1130 		asoc->strmout[i].last_msg_incomplete = 0;
1131 		asoc->strmout[i].next_spoke.tqe_next = 0;
1132 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1133 	}
1134 	/* Now the mapping array */
1135 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1136 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1137 	    SCTP_M_MAP);
1138 	if (asoc->mapping_array == NULL) {
1139 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1140 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1141 		return (ENOMEM);
1142 	}
1143 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1144 	/* Now the init of the other outqueues */
1145 	TAILQ_INIT(&asoc->free_chunks);
1146 	TAILQ_INIT(&asoc->out_wheel);
1147 	TAILQ_INIT(&asoc->control_send_queue);
1148 	TAILQ_INIT(&asoc->send_queue);
1149 	TAILQ_INIT(&asoc->sent_queue);
1150 	TAILQ_INIT(&asoc->reasmqueue);
1151 	TAILQ_INIT(&asoc->resetHead);
1152 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1153 	TAILQ_INIT(&asoc->asconf_queue);
1154 	/* authentication fields */
1155 	asoc->authinfo.random = NULL;
1156 	asoc->authinfo.assoc_key = NULL;
1157 	asoc->authinfo.assoc_keyid = 0;
1158 	asoc->authinfo.recv_key = NULL;
1159 	asoc->authinfo.recv_keyid = 0;
1160 	LIST_INIT(&asoc->shared_keys);
1161 	asoc->marked_retrans = 0;
1162 	asoc->timoinit = 0;
1163 	asoc->timodata = 0;
1164 	asoc->timosack = 0;
1165 	asoc->timoshutdown = 0;
1166 	asoc->timoheartbeat = 0;
1167 	asoc->timocookie = 0;
1168 	asoc->timoshutdownack = 0;
1169 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1170 	asoc->discontinuity_time = asoc->start_time;
1171 	/*
1172 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1173 	 * freed later whe the association is freed.
1174 	 */
1175 	return (0);
1176 }
1177 
1178 int
1179 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1180 {
1181 	/* mapping array needs to grow */
1182 	uint8_t *new_array;
1183 	uint32_t new_size;
1184 
1185 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1186 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1187 	if (new_array == NULL) {
1188 		/* can't get more, forget it */
1189 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1190 		    new_size);
1191 		return (-1);
1192 	}
1193 	memset(new_array, 0, new_size);
1194 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1195 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1196 	asoc->mapping_array = new_array;
1197 	asoc->mapping_array_size = new_size;
1198 	return (0);
1199 }
1200 
1201 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1202 static void
1203 sctp_iterator_work(struct sctp_iterator *it)
1204 {
1205 	int iteration_count = 0;
1206 	int inp_skip = 0;
1207 
1208 	SCTP_ITERATOR_LOCK();
1209 	if (it->inp) {
1210 		SCTP_INP_DECR_REF(it->inp);
1211 	}
1212 	if (it->inp == NULL) {
1213 		/* iterator is complete */
1214 done_with_iterator:
1215 		SCTP_ITERATOR_UNLOCK();
1216 		if (it->function_atend != NULL) {
1217 			(*it->function_atend) (it->pointer, it->val);
1218 		}
1219 		SCTP_FREE(it, SCTP_M_ITER);
1220 		return;
1221 	}
1222 select_a_new_ep:
1223 	SCTP_INP_WLOCK(it->inp);
1224 	while (((it->pcb_flags) &&
1225 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1226 	    ((it->pcb_features) &&
1227 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1228 		/* endpoint flags or features don't match, so keep looking */
1229 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1230 			SCTP_INP_WUNLOCK(it->inp);
1231 			goto done_with_iterator;
1232 		}
1233 		SCTP_INP_WUNLOCK(it->inp);
1234 		it->inp = LIST_NEXT(it->inp, sctp_list);
1235 		if (it->inp == NULL) {
1236 			goto done_with_iterator;
1237 		}
1238 		SCTP_INP_WLOCK(it->inp);
1239 	}
1240 
1241 	SCTP_INP_WUNLOCK(it->inp);
1242 	SCTP_INP_RLOCK(it->inp);
1243 
1244 	/* now go through each assoc which is in the desired state */
1245 	if (it->done_current_ep == 0) {
1246 		if (it->function_inp != NULL)
1247 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1248 		it->done_current_ep = 1;
1249 	}
1250 	if (it->stcb == NULL) {
1251 		/* run the per instance function */
1252 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1253 	}
1254 	if ((inp_skip) || it->stcb == NULL) {
1255 		if (it->function_inp_end != NULL) {
1256 			inp_skip = (*it->function_inp_end) (it->inp,
1257 			    it->pointer,
1258 			    it->val);
1259 		}
1260 		SCTP_INP_RUNLOCK(it->inp);
1261 		goto no_stcb;
1262 	}
1263 	while (it->stcb) {
1264 		SCTP_TCB_LOCK(it->stcb);
1265 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1266 			/* not in the right state... keep looking */
1267 			SCTP_TCB_UNLOCK(it->stcb);
1268 			goto next_assoc;
1269 		}
1270 		/* see if we have limited out the iterator loop */
1271 		iteration_count++;
1272 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1273 			/* Pause to let others grab the lock */
1274 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1275 			SCTP_TCB_UNLOCK(it->stcb);
1276 
1277 			SCTP_INP_INCR_REF(it->inp);
1278 			SCTP_INP_RUNLOCK(it->inp);
1279 			SCTP_ITERATOR_UNLOCK();
1280 			SCTP_ITERATOR_LOCK();
1281 			SCTP_INP_RLOCK(it->inp);
1282 
1283 			SCTP_INP_DECR_REF(it->inp);
1284 			SCTP_TCB_LOCK(it->stcb);
1285 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1286 			iteration_count = 0;
1287 		}
1288 		/* run function on this one */
1289 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1290 
1291 		/*
1292 		 * we lie here, it really needs to have its own type but
1293 		 * first I must verify that this won't effect things :-0
1294 		 */
1295 		if (it->no_chunk_output == 0)
1296 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1297 
1298 		SCTP_TCB_UNLOCK(it->stcb);
1299 next_assoc:
1300 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1301 		if (it->stcb == NULL) {
1302 			/* Run last function */
1303 			if (it->function_inp_end != NULL) {
1304 				inp_skip = (*it->function_inp_end) (it->inp,
1305 				    it->pointer,
1306 				    it->val);
1307 			}
1308 		}
1309 	}
1310 	SCTP_INP_RUNLOCK(it->inp);
1311 no_stcb:
1312 	/* done with all assocs on this endpoint, move on to next endpoint */
1313 	it->done_current_ep = 0;
1314 	SCTP_INP_WLOCK(it->inp);
1315 	SCTP_INP_WUNLOCK(it->inp);
1316 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1317 		it->inp = NULL;
1318 	} else {
1319 		SCTP_INP_INFO_RLOCK();
1320 		it->inp = LIST_NEXT(it->inp, sctp_list);
1321 		SCTP_INP_INFO_RUNLOCK();
1322 	}
1323 	if (it->inp == NULL) {
1324 		goto done_with_iterator;
1325 	}
1326 	goto select_a_new_ep;
1327 }
1328 
1329 void
1330 sctp_iterator_worker(void)
1331 {
1332 	struct sctp_iterator *it = NULL;
1333 
1334 	/* This function is called with the WQ lock in place */
1335 
1336 	sctppcbinfo.iterator_running = 1;
1337 again:
1338 	it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
1339 	while (it) {
1340 		/* now lets work on this one */
1341 		TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
1342 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1343 		sctp_iterator_work(it);
1344 		SCTP_IPI_ITERATOR_WQ_LOCK();
1345 		/* sa_ignore FREED_MEMORY */
1346 		it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
1347 	}
1348 	if (TAILQ_FIRST(&sctppcbinfo.iteratorhead)) {
1349 		goto again;
1350 	}
1351 	sctppcbinfo.iterator_running = 0;
1352 	return;
1353 }
1354 
1355 #endif
1356 
1357 
1358 static void
1359 sctp_handle_addr_wq(void)
1360 {
1361 	/* deal with the ADDR wq from the rtsock calls */
1362 	struct sctp_laddr *wi;
1363 	struct sctp_asconf_iterator *asc;
1364 
1365 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1366 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1367 	if (asc == NULL) {
1368 		/* Try later, no memory */
1369 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1370 		    (struct sctp_inpcb *)NULL,
1371 		    (struct sctp_tcb *)NULL,
1372 		    (struct sctp_nets *)NULL);
1373 		return;
1374 	}
1375 	LIST_INIT(&asc->list_of_work);
1376 	asc->cnt = 0;
1377 	SCTP_IPI_ITERATOR_WQ_LOCK();
1378 	wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1379 	while (wi != NULL) {
1380 		LIST_REMOVE(wi, sctp_nxt_addr);
1381 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1382 		asc->cnt++;
1383 		wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1384 	}
1385 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1386 	if (asc->cnt == 0) {
1387 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1388 	} else {
1389 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1390 		    sctp_asconf_iterator_stcb,
1391 		    NULL,	/* No ep end for boundall */
1392 		    SCTP_PCB_FLAGS_BOUNDALL,
1393 		    SCTP_PCB_ANY_FEATURES,
1394 		    SCTP_ASOC_ANY_STATE,
1395 		    (void *)asc, 0,
1396 		    sctp_asconf_iterator_end, NULL, 0);
1397 	}
1398 }
1399 
1400 int retcode = 0;
1401 int cur_oerr = 0;
1402 
1403 void
1404 sctp_timeout_handler(void *t)
1405 {
1406 	struct sctp_inpcb *inp;
1407 	struct sctp_tcb *stcb;
1408 	struct sctp_nets *net;
1409 	struct sctp_timer *tmr;
1410 
1411 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1412 	struct socket *so;
1413 
1414 #endif
1415 	int did_output;
1416 	struct sctp_iterator *it = NULL;
1417 
1418 	tmr = (struct sctp_timer *)t;
1419 	inp = (struct sctp_inpcb *)tmr->ep;
1420 	stcb = (struct sctp_tcb *)tmr->tcb;
1421 	net = (struct sctp_nets *)tmr->net;
1422 	did_output = 1;
1423 
1424 #ifdef SCTP_AUDITING_ENABLED
1425 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1426 	sctp_auditing(3, inp, stcb, net);
1427 #endif
1428 
1429 	/* sanity checks... */
1430 	if (tmr->self != (void *)tmr) {
1431 		/*
1432 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1433 		 * tmr);
1434 		 */
1435 		return;
1436 	}
1437 	tmr->stopped_from = 0xa001;
1438 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1439 		/*
1440 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1441 		 * tmr->type);
1442 		 */
1443 		return;
1444 	}
1445 	tmr->stopped_from = 0xa002;
1446 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1447 		return;
1448 	}
1449 	/* if this is an iterator timeout, get the struct and clear inp */
1450 	tmr->stopped_from = 0xa003;
1451 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1452 		it = (struct sctp_iterator *)inp;
1453 		inp = NULL;
1454 	}
1455 	if (inp) {
1456 		SCTP_INP_INCR_REF(inp);
1457 		if ((inp->sctp_socket == 0) &&
1458 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1459 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1460 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1461 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1462 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1463 		    ) {
1464 			SCTP_INP_DECR_REF(inp);
1465 			return;
1466 		}
1467 	}
1468 	tmr->stopped_from = 0xa004;
1469 	if (stcb) {
1470 		atomic_add_int(&stcb->asoc.refcnt, 1);
1471 		if (stcb->asoc.state == 0) {
1472 			atomic_add_int(&stcb->asoc.refcnt, -1);
1473 			if (inp) {
1474 				SCTP_INP_DECR_REF(inp);
1475 			}
1476 			return;
1477 		}
1478 	}
1479 	tmr->stopped_from = 0xa005;
1480 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1481 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1482 		if (inp) {
1483 			SCTP_INP_DECR_REF(inp);
1484 		}
1485 		if (stcb) {
1486 			atomic_add_int(&stcb->asoc.refcnt, -1);
1487 		}
1488 		return;
1489 	}
1490 	tmr->stopped_from = 0xa006;
1491 
1492 	if (stcb) {
1493 		SCTP_TCB_LOCK(stcb);
1494 		atomic_add_int(&stcb->asoc.refcnt, -1);
1495 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1496 		    ((stcb->asoc.state == 0) ||
1497 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1498 			SCTP_TCB_UNLOCK(stcb);
1499 			if (inp) {
1500 				SCTP_INP_DECR_REF(inp);
1501 			}
1502 			return;
1503 		}
1504 	}
1505 	/* record in stopped what t-o occured */
1506 	tmr->stopped_from = tmr->type;
1507 
1508 	/* mark as being serviced now */
1509 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1510 		/*
1511 		 * Callout has been rescheduled.
1512 		 */
1513 		goto get_out;
1514 	}
1515 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1516 		/*
1517 		 * Not active, so no action.
1518 		 */
1519 		goto get_out;
1520 	}
1521 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1522 
1523 	/* call the handler for the appropriate timer type */
1524 	switch (tmr->type) {
1525 	case SCTP_TIMER_TYPE_ZERO_COPY:
1526 		if (inp == NULL) {
1527 			break;
1528 		}
1529 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1530 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1531 		}
1532 		break;
1533 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1534 		if (inp == NULL) {
1535 			break;
1536 		}
1537 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1538 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1539 		}
1540 		break;
1541 	case SCTP_TIMER_TYPE_ADDR_WQ:
1542 		sctp_handle_addr_wq();
1543 		break;
1544 	case SCTP_TIMER_TYPE_ITERATOR:
1545 		SCTP_STAT_INCR(sctps_timoiterator);
1546 		sctp_iterator_timer(it);
1547 		break;
1548 	case SCTP_TIMER_TYPE_SEND:
1549 		if ((stcb == NULL) || (inp == NULL)) {
1550 			break;
1551 		}
1552 		SCTP_STAT_INCR(sctps_timodata);
1553 		stcb->asoc.timodata++;
1554 		stcb->asoc.num_send_timers_up--;
1555 		if (stcb->asoc.num_send_timers_up < 0) {
1556 			stcb->asoc.num_send_timers_up = 0;
1557 		}
1558 		SCTP_TCB_LOCK_ASSERT(stcb);
1559 		cur_oerr = stcb->asoc.overall_error_count;
1560 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1561 		if (retcode) {
1562 			/* no need to unlock on tcb its gone */
1563 
1564 			goto out_decr;
1565 		}
1566 		SCTP_TCB_LOCK_ASSERT(stcb);
1567 #ifdef SCTP_AUDITING_ENABLED
1568 		sctp_auditing(4, inp, stcb, net);
1569 #endif
1570 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1571 		if ((stcb->asoc.num_send_timers_up == 0) &&
1572 		    (stcb->asoc.sent_queue_cnt > 0)
1573 		    ) {
1574 			struct sctp_tmit_chunk *chk;
1575 
1576 			/*
1577 			 * safeguard. If there on some on the sent queue
1578 			 * somewhere but no timers running something is
1579 			 * wrong... so we start a timer on the first chunk
1580 			 * on the send queue on whatever net it is sent to.
1581 			 */
1582 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1583 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1584 			    chk->whoTo);
1585 		}
1586 		break;
1587 	case SCTP_TIMER_TYPE_INIT:
1588 		if ((stcb == NULL) || (inp == NULL)) {
1589 			break;
1590 		}
1591 		SCTP_STAT_INCR(sctps_timoinit);
1592 		stcb->asoc.timoinit++;
1593 		if (sctp_t1init_timer(inp, stcb, net)) {
1594 			/* no need to unlock on tcb its gone */
1595 			goto out_decr;
1596 		}
1597 		/* We do output but not here */
1598 		did_output = 0;
1599 		break;
1600 	case SCTP_TIMER_TYPE_RECV:
1601 		if ((stcb == NULL) || (inp == NULL)) {
1602 			break;
1603 		} {
1604 			int abort_flag;
1605 
1606 			SCTP_STAT_INCR(sctps_timosack);
1607 			stcb->asoc.timosack++;
1608 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1609 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1610 			sctp_send_sack(stcb);
1611 		}
1612 #ifdef SCTP_AUDITING_ENABLED
1613 		sctp_auditing(4, inp, stcb, net);
1614 #endif
1615 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1616 		break;
1617 	case SCTP_TIMER_TYPE_SHUTDOWN:
1618 		if ((stcb == NULL) || (inp == NULL)) {
1619 			break;
1620 		}
1621 		if (sctp_shutdown_timer(inp, stcb, net)) {
1622 			/* no need to unlock on tcb its gone */
1623 			goto out_decr;
1624 		}
1625 		SCTP_STAT_INCR(sctps_timoshutdown);
1626 		stcb->asoc.timoshutdown++;
1627 #ifdef SCTP_AUDITING_ENABLED
1628 		sctp_auditing(4, inp, stcb, net);
1629 #endif
1630 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1631 		break;
1632 	case SCTP_TIMER_TYPE_HEARTBEAT:
1633 		{
1634 			struct sctp_nets *lnet;
1635 			int cnt_of_unconf = 0;
1636 
1637 			if ((stcb == NULL) || (inp == NULL)) {
1638 				break;
1639 			}
1640 			SCTP_STAT_INCR(sctps_timoheartbeat);
1641 			stcb->asoc.timoheartbeat++;
1642 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1643 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1644 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1645 					cnt_of_unconf++;
1646 				}
1647 			}
1648 			if (cnt_of_unconf == 0) {
1649 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1650 				    cnt_of_unconf)) {
1651 					/* no need to unlock on tcb its gone */
1652 					goto out_decr;
1653 				}
1654 			}
1655 #ifdef SCTP_AUDITING_ENABLED
1656 			sctp_auditing(4, inp, stcb, lnet);
1657 #endif
1658 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1659 			    stcb->sctp_ep, stcb, lnet);
1660 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1661 		}
1662 		break;
1663 	case SCTP_TIMER_TYPE_COOKIE:
1664 		if ((stcb == NULL) || (inp == NULL)) {
1665 			break;
1666 		}
1667 		if (sctp_cookie_timer(inp, stcb, net)) {
1668 			/* no need to unlock on tcb its gone */
1669 			goto out_decr;
1670 		}
1671 		SCTP_STAT_INCR(sctps_timocookie);
1672 		stcb->asoc.timocookie++;
1673 #ifdef SCTP_AUDITING_ENABLED
1674 		sctp_auditing(4, inp, stcb, net);
1675 #endif
1676 		/*
1677 		 * We consider T3 and Cookie timer pretty much the same with
1678 		 * respect to where from in chunk_output.
1679 		 */
1680 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1681 		break;
1682 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1683 		{
1684 			struct timeval tv;
1685 			int i, secret;
1686 
1687 			if (inp == NULL) {
1688 				break;
1689 			}
1690 			SCTP_STAT_INCR(sctps_timosecret);
1691 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1692 			SCTP_INP_WLOCK(inp);
1693 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1694 			inp->sctp_ep.last_secret_number =
1695 			    inp->sctp_ep.current_secret_number;
1696 			inp->sctp_ep.current_secret_number++;
1697 			if (inp->sctp_ep.current_secret_number >=
1698 			    SCTP_HOW_MANY_SECRETS) {
1699 				inp->sctp_ep.current_secret_number = 0;
1700 			}
1701 			secret = (int)inp->sctp_ep.current_secret_number;
1702 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1703 				inp->sctp_ep.secret_key[secret][i] =
1704 				    sctp_select_initial_TSN(&inp->sctp_ep);
1705 			}
1706 			SCTP_INP_WUNLOCK(inp);
1707 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1708 		}
1709 		did_output = 0;
1710 		break;
1711 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1712 		if ((stcb == NULL) || (inp == NULL)) {
1713 			break;
1714 		}
1715 		SCTP_STAT_INCR(sctps_timopathmtu);
1716 		sctp_pathmtu_timer(inp, stcb, net);
1717 		did_output = 0;
1718 		break;
1719 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1720 		if ((stcb == NULL) || (inp == NULL)) {
1721 			break;
1722 		}
1723 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1724 			/* no need to unlock on tcb its gone */
1725 			goto out_decr;
1726 		}
1727 		SCTP_STAT_INCR(sctps_timoshutdownack);
1728 		stcb->asoc.timoshutdownack++;
1729 #ifdef SCTP_AUDITING_ENABLED
1730 		sctp_auditing(4, inp, stcb, net);
1731 #endif
1732 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1733 		break;
1734 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1735 		if ((stcb == NULL) || (inp == NULL)) {
1736 			break;
1737 		}
1738 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1739 		sctp_abort_an_association(inp, stcb,
1740 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1741 		/* no need to unlock on tcb its gone */
1742 		goto out_decr;
1743 
1744 	case SCTP_TIMER_TYPE_STRRESET:
1745 		if ((stcb == NULL) || (inp == NULL)) {
1746 			break;
1747 		}
1748 		if (sctp_strreset_timer(inp, stcb, net)) {
1749 			/* no need to unlock on tcb its gone */
1750 			goto out_decr;
1751 		}
1752 		SCTP_STAT_INCR(sctps_timostrmrst);
1753 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1754 		break;
1755 	case SCTP_TIMER_TYPE_EARLYFR:
1756 		/* Need to do FR of things for net */
1757 		if ((stcb == NULL) || (inp == NULL)) {
1758 			break;
1759 		}
1760 		SCTP_STAT_INCR(sctps_timoearlyfr);
1761 		sctp_early_fr_timer(inp, stcb, net);
1762 		break;
1763 	case SCTP_TIMER_TYPE_ASCONF:
1764 		if ((stcb == NULL) || (inp == NULL)) {
1765 			break;
1766 		}
1767 		if (sctp_asconf_timer(inp, stcb, net)) {
1768 			/* no need to unlock on tcb its gone */
1769 			goto out_decr;
1770 		}
1771 		SCTP_STAT_INCR(sctps_timoasconf);
1772 #ifdef SCTP_AUDITING_ENABLED
1773 		sctp_auditing(4, inp, stcb, net);
1774 #endif
1775 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1776 		break;
1777 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1778 		if ((stcb == NULL) || (inp == NULL)) {
1779 			break;
1780 		}
1781 		sctp_delete_prim_timer(inp, stcb, net);
1782 		SCTP_STAT_INCR(sctps_timodelprim);
1783 		break;
1784 
1785 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1786 		if ((stcb == NULL) || (inp == NULL)) {
1787 			break;
1788 		}
1789 		SCTP_STAT_INCR(sctps_timoautoclose);
1790 		sctp_autoclose_timer(inp, stcb, net);
1791 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1792 		did_output = 0;
1793 		break;
1794 	case SCTP_TIMER_TYPE_ASOCKILL:
1795 		if ((stcb == NULL) || (inp == NULL)) {
1796 			break;
1797 		}
1798 		SCTP_STAT_INCR(sctps_timoassockill);
1799 		/* Can we free it yet? */
1800 		SCTP_INP_DECR_REF(inp);
1801 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1802 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1803 		so = SCTP_INP_SO(inp);
1804 		atomic_add_int(&stcb->asoc.refcnt, 1);
1805 		SCTP_TCB_UNLOCK(stcb);
1806 		SCTP_SOCKET_LOCK(so, 1);
1807 		SCTP_TCB_LOCK(stcb);
1808 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1809 #endif
1810 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1811 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1812 		SCTP_SOCKET_UNLOCK(so, 1);
1813 #endif
1814 		/*
1815 		 * free asoc, always unlocks (or destroy's) so prevent
1816 		 * duplicate unlock or unlock of a free mtx :-0
1817 		 */
1818 		stcb = NULL;
1819 		goto out_no_decr;
1820 	case SCTP_TIMER_TYPE_INPKILL:
1821 		SCTP_STAT_INCR(sctps_timoinpkill);
1822 		if (inp == NULL) {
1823 			break;
1824 		}
1825 		/*
1826 		 * special case, take away our increment since WE are the
1827 		 * killer
1828 		 */
1829 		SCTP_INP_DECR_REF(inp);
1830 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1831 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1832 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1833 		goto out_no_decr;
1834 	default:
1835 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1836 		    tmr->type);
1837 		break;
1838 	};
1839 #ifdef SCTP_AUDITING_ENABLED
1840 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1841 	if (inp)
1842 		sctp_auditing(5, inp, stcb, net);
1843 #endif
1844 	if ((did_output) && stcb) {
1845 		/*
1846 		 * Now we need to clean up the control chunk chain if an
1847 		 * ECNE is on it. It must be marked as UNSENT again so next
1848 		 * call will continue to send it until such time that we get
1849 		 * a CWR, to remove it. It is, however, less likely that we
1850 		 * will find a ecn echo on the chain though.
1851 		 */
1852 		sctp_fix_ecn_echo(&stcb->asoc);
1853 	}
1854 get_out:
1855 	if (stcb) {
1856 		SCTP_TCB_UNLOCK(stcb);
1857 	}
1858 out_decr:
1859 	if (inp) {
1860 		SCTP_INP_DECR_REF(inp);
1861 	}
1862 out_no_decr:
1863 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1864 	    tmr->type);
1865 	if (inp) {
1866 	}
1867 }
1868 
1869 void
1870 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1871     struct sctp_nets *net)
1872 {
1873 	int to_ticks;
1874 	struct sctp_timer *tmr;
1875 
1876 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1877 		return;
1878 
1879 	to_ticks = 0;
1880 
1881 	tmr = NULL;
1882 	if (stcb) {
1883 		SCTP_TCB_LOCK_ASSERT(stcb);
1884 	}
1885 	switch (t_type) {
1886 	case SCTP_TIMER_TYPE_ZERO_COPY:
1887 		tmr = &inp->sctp_ep.zero_copy_timer;
1888 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1889 		break;
1890 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1891 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1892 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1893 		break;
1894 	case SCTP_TIMER_TYPE_ADDR_WQ:
1895 		/* Only 1 tick away :-) */
1896 		tmr = &sctppcbinfo.addr_wq_timer;
1897 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1898 		break;
1899 	case SCTP_TIMER_TYPE_ITERATOR:
1900 		{
1901 			struct sctp_iterator *it;
1902 
1903 			it = (struct sctp_iterator *)inp;
1904 			tmr = &it->tmr;
1905 			to_ticks = SCTP_ITERATOR_TICKS;
1906 		}
1907 		break;
1908 	case SCTP_TIMER_TYPE_SEND:
1909 		/* Here we use the RTO timer */
1910 		{
1911 			int rto_val;
1912 
1913 			if ((stcb == NULL) || (net == NULL)) {
1914 				return;
1915 			}
1916 			tmr = &net->rxt_timer;
1917 			if (net->RTO == 0) {
1918 				rto_val = stcb->asoc.initial_rto;
1919 			} else {
1920 				rto_val = net->RTO;
1921 			}
1922 			to_ticks = MSEC_TO_TICKS(rto_val);
1923 		}
1924 		break;
1925 	case SCTP_TIMER_TYPE_INIT:
1926 		/*
1927 		 * Here we use the INIT timer default usually about 1
1928 		 * minute.
1929 		 */
1930 		if ((stcb == NULL) || (net == NULL)) {
1931 			return;
1932 		}
1933 		tmr = &net->rxt_timer;
1934 		if (net->RTO == 0) {
1935 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1936 		} else {
1937 			to_ticks = MSEC_TO_TICKS(net->RTO);
1938 		}
1939 		break;
1940 	case SCTP_TIMER_TYPE_RECV:
1941 		/*
1942 		 * Here we use the Delayed-Ack timer value from the inp
1943 		 * ususually about 200ms.
1944 		 */
1945 		if (stcb == NULL) {
1946 			return;
1947 		}
1948 		tmr = &stcb->asoc.dack_timer;
1949 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1950 		break;
1951 	case SCTP_TIMER_TYPE_SHUTDOWN:
1952 		/* Here we use the RTO of the destination. */
1953 		if ((stcb == NULL) || (net == NULL)) {
1954 			return;
1955 		}
1956 		if (net->RTO == 0) {
1957 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1958 		} else {
1959 			to_ticks = MSEC_TO_TICKS(net->RTO);
1960 		}
1961 		tmr = &net->rxt_timer;
1962 		break;
1963 	case SCTP_TIMER_TYPE_HEARTBEAT:
1964 		/*
1965 		 * the net is used here so that we can add in the RTO. Even
1966 		 * though we use a different timer. We also add the HB timer
1967 		 * PLUS a random jitter.
1968 		 */
1969 		if ((inp == NULL) || (stcb == NULL)) {
1970 			return;
1971 		} else {
1972 			uint32_t rndval;
1973 			uint8_t this_random;
1974 			int cnt_of_unconf = 0;
1975 			struct sctp_nets *lnet;
1976 
1977 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1978 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1979 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1980 					cnt_of_unconf++;
1981 				}
1982 			}
1983 			if (cnt_of_unconf) {
1984 				net = lnet = NULL;
1985 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
1986 			}
1987 			if (stcb->asoc.hb_random_idx > 3) {
1988 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1989 				memcpy(stcb->asoc.hb_random_values, &rndval,
1990 				    sizeof(stcb->asoc.hb_random_values));
1991 				stcb->asoc.hb_random_idx = 0;
1992 			}
1993 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1994 			stcb->asoc.hb_random_idx++;
1995 			stcb->asoc.hb_ect_randombit = 0;
1996 			/*
1997 			 * this_random will be 0 - 256 ms RTO is in ms.
1998 			 */
1999 			if ((stcb->asoc.hb_is_disabled) &&
2000 			    (cnt_of_unconf == 0)) {
2001 				return;
2002 			}
2003 			if (net) {
2004 				int delay;
2005 
2006 				delay = stcb->asoc.heart_beat_delay;
2007 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2008 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2009 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2010 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2011 						delay = 0;
2012 					}
2013 				}
2014 				if (net->RTO == 0) {
2015 					/* Never been checked */
2016 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2017 				} else {
2018 					/* set rto_val to the ms */
2019 					to_ticks = delay + net->RTO + this_random;
2020 				}
2021 			} else {
2022 				if (cnt_of_unconf) {
2023 					to_ticks = this_random + stcb->asoc.initial_rto;
2024 				} else {
2025 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2026 				}
2027 			}
2028 			/*
2029 			 * Now we must convert the to_ticks that are now in
2030 			 * ms to ticks.
2031 			 */
2032 			to_ticks = MSEC_TO_TICKS(to_ticks);
2033 			tmr = &stcb->asoc.hb_timer;
2034 		}
2035 		break;
2036 	case SCTP_TIMER_TYPE_COOKIE:
2037 		/*
2038 		 * Here we can use the RTO timer from the network since one
2039 		 * RTT was compelete. If a retran happened then we will be
2040 		 * using the RTO initial value.
2041 		 */
2042 		if ((stcb == NULL) || (net == NULL)) {
2043 			return;
2044 		}
2045 		if (net->RTO == 0) {
2046 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2047 		} else {
2048 			to_ticks = MSEC_TO_TICKS(net->RTO);
2049 		}
2050 		tmr = &net->rxt_timer;
2051 		break;
2052 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2053 		/*
2054 		 * nothing needed but the endpoint here ususually about 60
2055 		 * minutes.
2056 		 */
2057 		if (inp == NULL) {
2058 			return;
2059 		}
2060 		tmr = &inp->sctp_ep.signature_change;
2061 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2062 		break;
2063 	case SCTP_TIMER_TYPE_ASOCKILL:
2064 		if (stcb == NULL) {
2065 			return;
2066 		}
2067 		tmr = &stcb->asoc.strreset_timer;
2068 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2069 		break;
2070 	case SCTP_TIMER_TYPE_INPKILL:
2071 		/*
2072 		 * The inp is setup to die. We re-use the signature_chage
2073 		 * timer since that has stopped and we are in the GONE
2074 		 * state.
2075 		 */
2076 		if (inp == NULL) {
2077 			return;
2078 		}
2079 		tmr = &inp->sctp_ep.signature_change;
2080 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2081 		break;
2082 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2083 		/*
2084 		 * Here we use the value found in the EP for PMTU ususually
2085 		 * about 10 minutes.
2086 		 */
2087 		if ((stcb == NULL) || (inp == NULL)) {
2088 			return;
2089 		}
2090 		if (net == NULL) {
2091 			return;
2092 		}
2093 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2094 		tmr = &net->pmtu_timer;
2095 		break;
2096 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2097 		/* Here we use the RTO of the destination */
2098 		if ((stcb == NULL) || (net == NULL)) {
2099 			return;
2100 		}
2101 		if (net->RTO == 0) {
2102 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2103 		} else {
2104 			to_ticks = MSEC_TO_TICKS(net->RTO);
2105 		}
2106 		tmr = &net->rxt_timer;
2107 		break;
2108 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2109 		/*
2110 		 * Here we use the endpoints shutdown guard timer usually
2111 		 * about 3 minutes.
2112 		 */
2113 		if ((inp == NULL) || (stcb == NULL)) {
2114 			return;
2115 		}
2116 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2117 		tmr = &stcb->asoc.shut_guard_timer;
2118 		break;
2119 	case SCTP_TIMER_TYPE_STRRESET:
2120 		/*
2121 		 * Here the timer comes from the stcb but its value is from
2122 		 * the net's RTO.
2123 		 */
2124 		if ((stcb == NULL) || (net == NULL)) {
2125 			return;
2126 		}
2127 		if (net->RTO == 0) {
2128 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2129 		} else {
2130 			to_ticks = MSEC_TO_TICKS(net->RTO);
2131 		}
2132 		tmr = &stcb->asoc.strreset_timer;
2133 		break;
2134 
2135 	case SCTP_TIMER_TYPE_EARLYFR:
2136 		{
2137 			unsigned int msec;
2138 
2139 			if ((stcb == NULL) || (net == NULL)) {
2140 				return;
2141 			}
2142 			if (net->flight_size > net->cwnd) {
2143 				/* no need to start */
2144 				return;
2145 			}
2146 			SCTP_STAT_INCR(sctps_earlyfrstart);
2147 			if (net->lastsa == 0) {
2148 				/* Hmm no rtt estimate yet? */
2149 				msec = stcb->asoc.initial_rto >> 2;
2150 			} else {
2151 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2152 			}
2153 			if (msec < sctp_early_fr_msec) {
2154 				msec = sctp_early_fr_msec;
2155 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2156 					msec = SCTP_MINFR_MSEC_FLOOR;
2157 				}
2158 			}
2159 			to_ticks = MSEC_TO_TICKS(msec);
2160 			tmr = &net->fr_timer;
2161 		}
2162 		break;
2163 	case SCTP_TIMER_TYPE_ASCONF:
2164 		/*
2165 		 * Here the timer comes from the stcb but its value is from
2166 		 * the net's RTO.
2167 		 */
2168 		if ((stcb == NULL) || (net == NULL)) {
2169 			return;
2170 		}
2171 		if (net->RTO == 0) {
2172 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2173 		} else {
2174 			to_ticks = MSEC_TO_TICKS(net->RTO);
2175 		}
2176 		tmr = &stcb->asoc.asconf_timer;
2177 		break;
2178 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2179 		if ((stcb == NULL) || (net != NULL)) {
2180 			return;
2181 		}
2182 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2183 		tmr = &stcb->asoc.delete_prim_timer;
2184 		break;
2185 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2186 		if (stcb == NULL) {
2187 			return;
2188 		}
2189 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2190 			/*
2191 			 * Really an error since stcb is NOT set to
2192 			 * autoclose
2193 			 */
2194 			return;
2195 		}
2196 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2197 		tmr = &stcb->asoc.autoclose_timer;
2198 		break;
2199 	default:
2200 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2201 		    __FUNCTION__, t_type);
2202 		return;
2203 		break;
2204 	};
2205 	if ((to_ticks <= 0) || (tmr == NULL)) {
2206 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2207 		    __FUNCTION__, t_type, to_ticks, tmr);
2208 		return;
2209 	}
2210 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2211 		/*
2212 		 * we do NOT allow you to have it already running. if it is
2213 		 * we leave the current one up unchanged
2214 		 */
2215 		return;
2216 	}
2217 	/* At this point we can proceed */
2218 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2219 		stcb->asoc.num_send_timers_up++;
2220 	}
2221 	tmr->stopped_from = 0;
2222 	tmr->type = t_type;
2223 	tmr->ep = (void *)inp;
2224 	tmr->tcb = (void *)stcb;
2225 	tmr->net = (void *)net;
2226 	tmr->self = (void *)tmr;
2227 	tmr->ticks = sctp_get_tick_count();
2228 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2229 	return;
2230 }
2231 
2232 void
2233 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2234     struct sctp_nets *net, uint32_t from)
2235 {
2236 	struct sctp_timer *tmr;
2237 
2238 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2239 	    (inp == NULL))
2240 		return;
2241 
2242 	tmr = NULL;
2243 	if (stcb) {
2244 		SCTP_TCB_LOCK_ASSERT(stcb);
2245 	}
2246 	switch (t_type) {
2247 	case SCTP_TIMER_TYPE_ZERO_COPY:
2248 		tmr = &inp->sctp_ep.zero_copy_timer;
2249 		break;
2250 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2251 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2252 		break;
2253 	case SCTP_TIMER_TYPE_ADDR_WQ:
2254 		tmr = &sctppcbinfo.addr_wq_timer;
2255 		break;
2256 	case SCTP_TIMER_TYPE_EARLYFR:
2257 		if ((stcb == NULL) || (net == NULL)) {
2258 			return;
2259 		}
2260 		tmr = &net->fr_timer;
2261 		SCTP_STAT_INCR(sctps_earlyfrstop);
2262 		break;
2263 	case SCTP_TIMER_TYPE_ITERATOR:
2264 		{
2265 			struct sctp_iterator *it;
2266 
2267 			it = (struct sctp_iterator *)inp;
2268 			tmr = &it->tmr;
2269 		}
2270 		break;
2271 	case SCTP_TIMER_TYPE_SEND:
2272 		if ((stcb == NULL) || (net == NULL)) {
2273 			return;
2274 		}
2275 		tmr = &net->rxt_timer;
2276 		break;
2277 	case SCTP_TIMER_TYPE_INIT:
2278 		if ((stcb == NULL) || (net == NULL)) {
2279 			return;
2280 		}
2281 		tmr = &net->rxt_timer;
2282 		break;
2283 	case SCTP_TIMER_TYPE_RECV:
2284 		if (stcb == NULL) {
2285 			return;
2286 		}
2287 		tmr = &stcb->asoc.dack_timer;
2288 		break;
2289 	case SCTP_TIMER_TYPE_SHUTDOWN:
2290 		if ((stcb == NULL) || (net == NULL)) {
2291 			return;
2292 		}
2293 		tmr = &net->rxt_timer;
2294 		break;
2295 	case SCTP_TIMER_TYPE_HEARTBEAT:
2296 		if (stcb == NULL) {
2297 			return;
2298 		}
2299 		tmr = &stcb->asoc.hb_timer;
2300 		break;
2301 	case SCTP_TIMER_TYPE_COOKIE:
2302 		if ((stcb == NULL) || (net == NULL)) {
2303 			return;
2304 		}
2305 		tmr = &net->rxt_timer;
2306 		break;
2307 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2308 		/* nothing needed but the endpoint here */
2309 		tmr = &inp->sctp_ep.signature_change;
2310 		/*
2311 		 * We re-use the newcookie timer for the INP kill timer. We
2312 		 * must assure that we do not kill it by accident.
2313 		 */
2314 		break;
2315 	case SCTP_TIMER_TYPE_ASOCKILL:
2316 		/*
2317 		 * Stop the asoc kill timer.
2318 		 */
2319 		if (stcb == NULL) {
2320 			return;
2321 		}
2322 		tmr = &stcb->asoc.strreset_timer;
2323 		break;
2324 
2325 	case SCTP_TIMER_TYPE_INPKILL:
2326 		/*
2327 		 * The inp is setup to die. We re-use the signature_chage
2328 		 * timer since that has stopped and we are in the GONE
2329 		 * state.
2330 		 */
2331 		tmr = &inp->sctp_ep.signature_change;
2332 		break;
2333 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2334 		if ((stcb == NULL) || (net == NULL)) {
2335 			return;
2336 		}
2337 		tmr = &net->pmtu_timer;
2338 		break;
2339 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2340 		if ((stcb == NULL) || (net == NULL)) {
2341 			return;
2342 		}
2343 		tmr = &net->rxt_timer;
2344 		break;
2345 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2346 		if (stcb == NULL) {
2347 			return;
2348 		}
2349 		tmr = &stcb->asoc.shut_guard_timer;
2350 		break;
2351 	case SCTP_TIMER_TYPE_STRRESET:
2352 		if (stcb == NULL) {
2353 			return;
2354 		}
2355 		tmr = &stcb->asoc.strreset_timer;
2356 		break;
2357 	case SCTP_TIMER_TYPE_ASCONF:
2358 		if (stcb == NULL) {
2359 			return;
2360 		}
2361 		tmr = &stcb->asoc.asconf_timer;
2362 		break;
2363 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2364 		if (stcb == NULL) {
2365 			return;
2366 		}
2367 		tmr = &stcb->asoc.delete_prim_timer;
2368 		break;
2369 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2370 		if (stcb == NULL) {
2371 			return;
2372 		}
2373 		tmr = &stcb->asoc.autoclose_timer;
2374 		break;
2375 	default:
2376 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2377 		    __FUNCTION__, t_type);
2378 		break;
2379 	};
2380 	if (tmr == NULL) {
2381 		return;
2382 	}
2383 	if ((tmr->type != t_type) && tmr->type) {
2384 		/*
2385 		 * Ok we have a timer that is under joint use. Cookie timer
2386 		 * per chance with the SEND timer. We therefore are NOT
2387 		 * running the timer that the caller wants stopped.  So just
2388 		 * return.
2389 		 */
2390 		return;
2391 	}
2392 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2393 		stcb->asoc.num_send_timers_up--;
2394 		if (stcb->asoc.num_send_timers_up < 0) {
2395 			stcb->asoc.num_send_timers_up = 0;
2396 		}
2397 	}
2398 	tmr->self = NULL;
2399 	tmr->stopped_from = from;
2400 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2401 	return;
2402 }
2403 
2404 #ifdef SCTP_USE_ADLER32
2405 static uint32_t
2406 update_adler32(uint32_t adler, uint8_t * buf, int32_t len)
2407 {
2408 	uint32_t s1 = adler & 0xffff;
2409 	uint32_t s2 = (adler >> 16) & 0xffff;
2410 	int n;
2411 
2412 	for (n = 0; n < len; n++, buf++) {
2413 		/* s1 = (s1 + buf[n]) % BASE */
2414 		/* first we add */
2415 		s1 = (s1 + *buf);
2416 		/*
2417 		 * now if we need to, we do a mod by subtracting. It seems a
2418 		 * bit faster since I really will only ever do one subtract
2419 		 * at the MOST, since buf[n] is a max of 255.
2420 		 */
2421 		if (s1 >= SCTP_ADLER32_BASE) {
2422 			s1 -= SCTP_ADLER32_BASE;
2423 		}
2424 		/* s2 = (s2 + s1) % BASE */
2425 		/* first we add */
2426 		s2 = (s2 + s1);
2427 		/*
2428 		 * again, it is more efficent (it seems) to subtract since
2429 		 * the most s2 will ever be is (BASE-1 + BASE-1) in the
2430 		 * worse case. This would then be (2 * BASE) - 2, which will
2431 		 * still only do one subtract. On Intel this is much better
2432 		 * to do this way and avoid the divide. Have not -pg'd on
2433 		 * sparc.
2434 		 */
2435 		if (s2 >= SCTP_ADLER32_BASE) {
2436 			s2 -= SCTP_ADLER32_BASE;
2437 		}
2438 	}
2439 	/* Return the adler32 of the bytes buf[0..len-1] */
2440 	return ((s2 << 16) + s1);
2441 }
2442 
2443 #endif
2444 
2445 
2446 uint32_t
2447 sctp_calculate_len(struct mbuf *m)
2448 {
2449 	uint32_t tlen = 0;
2450 	struct mbuf *at;
2451 
2452 	at = m;
2453 	while (at) {
2454 		tlen += SCTP_BUF_LEN(at);
2455 		at = SCTP_BUF_NEXT(at);
2456 	}
2457 	return (tlen);
2458 }
2459 
2460 #if defined(SCTP_WITH_NO_CSUM)
2461 
2462 uint32_t
2463 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2464 {
2465 	/*
2466 	 * given a mbuf chain with a packetheader offset by 'offset'
2467 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2468 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2469 	 * has a side bonus as it will calculate the total length of the
2470 	 * mbuf chain. Note: if offset is greater than the total mbuf
2471 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2472 	 */
2473 	if (pktlen == NULL)
2474 		return (0);
2475 	*pktlen = sctp_calculate_len(m);
2476 	return (0);
2477 }
2478 
2479 #elif defined(SCTP_USE_INCHKSUM)
2480 
2481 #include <machine/in_cksum.h>
2482 
2483 uint32_t
2484 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2485 {
2486 	/*
2487 	 * given a mbuf chain with a packetheader offset by 'offset'
2488 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2489 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2490 	 * has a side bonus as it will calculate the total length of the
2491 	 * mbuf chain. Note: if offset is greater than the total mbuf
2492 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2493 	 */
2494 	int32_t tlen = 0;
2495 	struct mbuf *at;
2496 	uint32_t the_sum, retsum;
2497 
2498 	at = m;
2499 	while (at) {
2500 		tlen += SCTP_BUF_LEN(at);
2501 		at = SCTP_BUF_NEXT(at);
2502 	}
2503 	the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset));
2504 	if (pktlen != NULL)
2505 		*pktlen = (tlen - offset);
2506 	retsum = htons(the_sum);
2507 	return (the_sum);
2508 }
2509 
2510 #else
2511 
2512 uint32_t
2513 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2514 {
2515 	/*
2516 	 * given a mbuf chain with a packetheader offset by 'offset'
2517 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2518 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2519 	 * has a side bonus as it will calculate the total length of the
2520 	 * mbuf chain. Note: if offset is greater than the total mbuf
2521 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2522 	 */
2523 	int32_t tlen = 0;
2524 
2525 #ifdef SCTP_USE_ADLER32
2526 	uint32_t base = 1L;
2527 
2528 #else
2529 	uint32_t base = 0xffffffff;
2530 
2531 #endif
2532 	struct mbuf *at;
2533 
2534 	at = m;
2535 	/* find the correct mbuf and offset into mbuf */
2536 	while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
2537 		offset -= SCTP_BUF_LEN(at);	/* update remaining offset
2538 						 * left */
2539 		at = SCTP_BUF_NEXT(at);
2540 	}
2541 	while (at != NULL) {
2542 		if ((SCTP_BUF_LEN(at) - offset) > 0) {
2543 #ifdef SCTP_USE_ADLER32
2544 			base = update_adler32(base,
2545 			    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2546 			    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2547 #else
2548 			if ((SCTP_BUF_LEN(at) - offset) < 4) {
2549 				/* Use old method if less than 4 bytes */
2550 				base = old_update_crc32(base,
2551 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2552 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2553 			} else {
2554 				base = update_crc32(base,
2555 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2556 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2557 			}
2558 #endif
2559 			tlen += SCTP_BUF_LEN(at) - offset;
2560 			/* we only offset once into the first mbuf */
2561 		}
2562 		if (offset) {
2563 			if (offset < (uint32_t) SCTP_BUF_LEN(at))
2564 				offset = 0;
2565 			else
2566 				offset -= SCTP_BUF_LEN(at);
2567 		}
2568 		at = SCTP_BUF_NEXT(at);
2569 	}
2570 	if (pktlen != NULL) {
2571 		*pktlen = tlen;
2572 	}
2573 #ifdef SCTP_USE_ADLER32
2574 	/* Adler32 */
2575 	base = htonl(base);
2576 #else
2577 	/* CRC-32c */
2578 	base = sctp_csum_finalize(base);
2579 #endif
2580 	return (base);
2581 }
2582 
2583 
2584 #endif
2585 
2586 void
2587 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2588     struct sctp_association *asoc, uint32_t mtu)
2589 {
2590 	/*
2591 	 * Reset the P-MTU size on this association, this involves changing
2592 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2593 	 * allow the DF flag to be cleared.
2594 	 */
2595 	struct sctp_tmit_chunk *chk;
2596 	unsigned int eff_mtu, ovh;
2597 
2598 #ifdef SCTP_PRINT_FOR_B_AND_M
2599 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2600 	    inp, asoc, mtu);
2601 #endif
2602 	asoc->smallest_mtu = mtu;
2603 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2604 		ovh = SCTP_MIN_OVERHEAD;
2605 	} else {
2606 		ovh = SCTP_MIN_V4_OVERHEAD;
2607 	}
2608 	eff_mtu = mtu - ovh;
2609 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2610 
2611 		if (chk->send_size > eff_mtu) {
2612 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2613 		}
2614 	}
2615 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2616 		if (chk->send_size > eff_mtu) {
2617 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2618 		}
2619 	}
2620 }
2621 
2622 
2623 /*
2624  * given an association and starting time of the current RTT period return
2625  * RTO in number of msecs net should point to the current network
2626  */
2627 uint32_t
2628 sctp_calculate_rto(struct sctp_tcb *stcb,
2629     struct sctp_association *asoc,
2630     struct sctp_nets *net,
2631     struct timeval *told,
2632     int safe)
2633 {
2634 	/*-
2635 	 * given an association and the starting time of the current RTT
2636 	 * period (in value1/value2) return RTO in number of msecs.
2637 	 */
2638 	int calc_time = 0;
2639 	int o_calctime;
2640 	uint32_t new_rto = 0;
2641 	int first_measure = 0;
2642 	struct timeval now, then, *old;
2643 
2644 	/* Copy it out for sparc64 */
2645 	if (safe == sctp_align_unsafe_makecopy) {
2646 		old = &then;
2647 		memcpy(&then, told, sizeof(struct timeval));
2648 	} else if (safe == sctp_align_safe_nocopy) {
2649 		old = told;
2650 	} else {
2651 		/* error */
2652 		SCTP_PRINTF("Huh, bad rto calc call\n");
2653 		return (0);
2654 	}
2655 	/************************/
2656 	/* 1. calculate new RTT */
2657 	/************************/
2658 	/* get the current time */
2659 	(void)SCTP_GETTIME_TIMEVAL(&now);
2660 	/* compute the RTT value */
2661 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2662 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2663 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2664 			calc_time += (((u_long)now.tv_usec -
2665 			    (u_long)old->tv_usec) / 1000);
2666 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2667 			/* Borrow 1,000ms from current calculation */
2668 			calc_time -= 1000;
2669 			/* Add in the slop over */
2670 			calc_time += ((int)now.tv_usec / 1000);
2671 			/* Add in the pre-second ms's */
2672 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2673 		}
2674 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2675 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2676 			calc_time = ((u_long)now.tv_usec -
2677 			    (u_long)old->tv_usec) / 1000;
2678 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2679 			/* impossible .. garbage in nothing out */
2680 			goto calc_rto;
2681 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2682 			/*
2683 			 * We have to have 1 usec :-D this must be the
2684 			 * loopback.
2685 			 */
2686 			calc_time = 1;
2687 		} else {
2688 			/* impossible .. garbage in nothing out */
2689 			goto calc_rto;
2690 		}
2691 	} else {
2692 		/* Clock wrapped? */
2693 		goto calc_rto;
2694 	}
2695 	/***************************/
2696 	/* 2. update RTTVAR & SRTT */
2697 	/***************************/
2698 	o_calctime = calc_time;
2699 	/* this is Van Jacobson's integer version */
2700 	if (net->RTO_measured) {
2701 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2702 								 * shift=3 */
2703 		if (sctp_logging_level & SCTP_RTTVAR_LOGGING_ENABLE) {
2704 			rto_logging(net, SCTP_LOG_RTTVAR);
2705 		}
2706 		net->prev_rtt = o_calctime;
2707 		net->lastsa += calc_time;	/* add 7/8th into sa when
2708 						 * shift=3 */
2709 		if (calc_time < 0) {
2710 			calc_time = -calc_time;
2711 		}
2712 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2713 									 * VAR shift=2 */
2714 		net->lastsv += calc_time;
2715 		if (net->lastsv == 0) {
2716 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2717 		}
2718 	} else {
2719 		/* First RTO measurment */
2720 		net->RTO_measured = 1;
2721 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2722 								 * shift=3 */
2723 		net->lastsv = calc_time;
2724 		if (net->lastsv == 0) {
2725 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2726 		}
2727 		first_measure = 1;
2728 		net->prev_rtt = o_calctime;
2729 		if (sctp_logging_level & SCTP_RTTVAR_LOGGING_ENABLE) {
2730 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2731 		}
2732 	}
2733 calc_rto:
2734 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2735 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2736 	    (stcb->asoc.sat_network_lockout == 0)) {
2737 		stcb->asoc.sat_network = 1;
2738 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2739 		stcb->asoc.sat_network = 0;
2740 		stcb->asoc.sat_network_lockout = 1;
2741 	}
2742 	/* bound it, per C6/C7 in Section 5.3.1 */
2743 	if (new_rto < stcb->asoc.minrto) {
2744 		new_rto = stcb->asoc.minrto;
2745 	}
2746 	if (new_rto > stcb->asoc.maxrto) {
2747 		new_rto = stcb->asoc.maxrto;
2748 	}
2749 	/* we are now returning the RTO */
2750 	return (new_rto);
2751 }
2752 
2753 /*
2754  * return a pointer to a contiguous piece of data from the given mbuf chain
2755  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2756  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2757  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2758  */
2759 caddr_t
2760 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2761 {
2762 	uint32_t count;
2763 	uint8_t *ptr;
2764 
2765 	ptr = in_ptr;
2766 	if ((off < 0) || (len <= 0))
2767 		return (NULL);
2768 
2769 	/* find the desired start location */
2770 	while ((m != NULL) && (off > 0)) {
2771 		if (off < SCTP_BUF_LEN(m))
2772 			break;
2773 		off -= SCTP_BUF_LEN(m);
2774 		m = SCTP_BUF_NEXT(m);
2775 	}
2776 	if (m == NULL)
2777 		return (NULL);
2778 
2779 	/* is the current mbuf large enough (eg. contiguous)? */
2780 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2781 		return (mtod(m, caddr_t)+off);
2782 	} else {
2783 		/* else, it spans more than one mbuf, so save a temp copy... */
2784 		while ((m != NULL) && (len > 0)) {
2785 			count = min(SCTP_BUF_LEN(m) - off, len);
2786 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2787 			len -= count;
2788 			ptr += count;
2789 			off = 0;
2790 			m = SCTP_BUF_NEXT(m);
2791 		}
2792 		if ((m == NULL) && (len > 0))
2793 			return (NULL);
2794 		else
2795 			return ((caddr_t)in_ptr);
2796 	}
2797 }
2798 
2799 
2800 
2801 struct sctp_paramhdr *
2802 sctp_get_next_param(struct mbuf *m,
2803     int offset,
2804     struct sctp_paramhdr *pull,
2805     int pull_limit)
2806 {
2807 	/* This just provides a typed signature to Peter's Pull routine */
2808 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2809 	    (uint8_t *) pull));
2810 }
2811 
2812 
2813 int
2814 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2815 {
2816 	/*
2817 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2818 	 * padlen is > 3 this routine will fail.
2819 	 */
2820 	uint8_t *dp;
2821 	int i;
2822 
2823 	if (padlen > 3) {
2824 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2825 		return (ENOBUFS);
2826 	}
2827 	if (M_TRAILINGSPACE(m)) {
2828 		/*
2829 		 * The easy way. We hope the majority of the time we hit
2830 		 * here :)
2831 		 */
2832 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2833 		SCTP_BUF_LEN(m) += padlen;
2834 	} else {
2835 		/* Hard way we must grow the mbuf */
2836 		struct mbuf *tmp;
2837 
2838 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2839 		if (tmp == NULL) {
2840 			/* Out of space GAK! we are in big trouble. */
2841 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2842 			return (ENOSPC);
2843 		}
2844 		/* setup and insert in middle */
2845 		SCTP_BUF_NEXT(tmp) = SCTP_BUF_NEXT(m);
2846 		SCTP_BUF_LEN(tmp) = padlen;
2847 		SCTP_BUF_NEXT(m) = tmp;
2848 		dp = mtod(tmp, uint8_t *);
2849 	}
2850 	/* zero out the pad */
2851 	for (i = 0; i < padlen; i++) {
2852 		*dp = 0;
2853 		dp++;
2854 	}
2855 	return (0);
2856 }
2857 
2858 int
2859 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2860 {
2861 	/* find the last mbuf in chain and pad it */
2862 	struct mbuf *m_at;
2863 
2864 	m_at = m;
2865 	if (last_mbuf) {
2866 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2867 	} else {
2868 		while (m_at) {
2869 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2870 				return (sctp_add_pad_tombuf(m_at, padval));
2871 			}
2872 			m_at = SCTP_BUF_NEXT(m_at);
2873 		}
2874 	}
2875 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2876 	return (EFAULT);
2877 }
2878 
2879 int sctp_asoc_change_wake = 0;
2880 
2881 static void
2882 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2883     uint32_t error, void *data, int so_locked
2884 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2885     SCTP_UNUSED
2886 #endif
2887 )
2888 {
2889 	struct mbuf *m_notify;
2890 	struct sctp_assoc_change *sac;
2891 	struct sctp_queued_to_read *control;
2892 
2893 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2894 	struct socket *so;
2895 
2896 #endif
2897 
2898 	/*
2899 	 * First if we are are going down dump everything we can to the
2900 	 * socket rcv queue.
2901 	 */
2902 
2903 	if ((stcb == NULL) ||
2904 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2905 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2906 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
2907 	    ) {
2908 		/* If the socket is gone we are out of here */
2909 		return;
2910 	}
2911 	/*
2912 	 * For TCP model AND UDP connected sockets we will send an error up
2913 	 * when an ABORT comes in.
2914 	 */
2915 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2916 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2917 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2918 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2919 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2920 			stcb->sctp_socket->so_error = ECONNREFUSED;
2921 		} else {
2922 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2923 			stcb->sctp_socket->so_error = ECONNRESET;
2924 		}
2925 		/* Wake ANY sleepers */
2926 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2927 		so = SCTP_INP_SO(stcb->sctp_ep);
2928 		if (!so_locked) {
2929 			atomic_add_int(&stcb->asoc.refcnt, 1);
2930 			SCTP_TCB_UNLOCK(stcb);
2931 			SCTP_SOCKET_LOCK(so, 1);
2932 			SCTP_TCB_LOCK(stcb);
2933 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2934 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2935 				SCTP_SOCKET_UNLOCK(so, 1);
2936 				return;
2937 			}
2938 		}
2939 #endif
2940 		sorwakeup(stcb->sctp_socket);
2941 		sowwakeup(stcb->sctp_socket);
2942 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2943 		if (!so_locked) {
2944 			SCTP_SOCKET_UNLOCK(so, 1);
2945 		}
2946 #endif
2947 		sctp_asoc_change_wake++;
2948 	}
2949 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2950 		/* event not enabled */
2951 		return;
2952 	}
2953 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2954 	if (m_notify == NULL)
2955 		/* no space left */
2956 		return;
2957 	SCTP_BUF_LEN(m_notify) = 0;
2958 
2959 	sac = mtod(m_notify, struct sctp_assoc_change *);
2960 	sac->sac_type = SCTP_ASSOC_CHANGE;
2961 	sac->sac_flags = 0;
2962 	sac->sac_length = sizeof(struct sctp_assoc_change);
2963 	sac->sac_state = event;
2964 	sac->sac_error = error;
2965 	/* XXX verify these stream counts */
2966 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2967 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2968 	sac->sac_assoc_id = sctp_get_associd(stcb);
2969 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2970 	SCTP_BUF_NEXT(m_notify) = NULL;
2971 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2972 	    0, 0, 0, 0, 0, 0,
2973 	    m_notify);
2974 	if (control == NULL) {
2975 		/* no memory */
2976 		sctp_m_freem(m_notify);
2977 		return;
2978 	}
2979 	control->length = SCTP_BUF_LEN(m_notify);
2980 	/* not that we need this */
2981 	control->tail_mbuf = m_notify;
2982 	control->spec_flags = M_NOTIFICATION;
2983 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2984 	    control,
2985 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
2986 	if (event == SCTP_COMM_LOST) {
2987 		/* Wake up any sleeper */
2988 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2989 		so = SCTP_INP_SO(stcb->sctp_ep);
2990 		if (!so_locked) {
2991 			atomic_add_int(&stcb->asoc.refcnt, 1);
2992 			SCTP_TCB_UNLOCK(stcb);
2993 			SCTP_SOCKET_LOCK(so, 1);
2994 			SCTP_TCB_LOCK(stcb);
2995 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2996 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2997 				SCTP_SOCKET_UNLOCK(so, 1);
2998 				return;
2999 			}
3000 		}
3001 #endif
3002 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3003 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3004 		if (!so_locked) {
3005 			SCTP_SOCKET_UNLOCK(so, 1);
3006 		}
3007 #endif
3008 	}
3009 }
3010 
3011 static void
3012 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3013     struct sockaddr *sa, uint32_t error)
3014 {
3015 	struct mbuf *m_notify;
3016 	struct sctp_paddr_change *spc;
3017 	struct sctp_queued_to_read *control;
3018 
3019 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)))
3020 		/* event not enabled */
3021 		return;
3022 
3023 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
3024 	if (m_notify == NULL)
3025 		return;
3026 	SCTP_BUF_LEN(m_notify) = 0;
3027 	spc = mtod(m_notify, struct sctp_paddr_change *);
3028 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3029 	spc->spc_flags = 0;
3030 	spc->spc_length = sizeof(struct sctp_paddr_change);
3031 	if (sa->sa_family == AF_INET) {
3032 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3033 	} else {
3034 		struct sockaddr_in6 *sin6;
3035 
3036 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3037 
3038 		sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3039 		if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3040 			if (sin6->sin6_scope_id == 0) {
3041 				/* recover scope_id for user */
3042 				(void)sa6_recoverscope(sin6);
3043 			} else {
3044 				/* clear embedded scope_id for user */
3045 				in6_clearscope(&sin6->sin6_addr);
3046 			}
3047 		}
3048 	}
3049 	spc->spc_state = state;
3050 	spc->spc_error = error;
3051 	spc->spc_assoc_id = sctp_get_associd(stcb);
3052 
3053 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3054 	SCTP_BUF_NEXT(m_notify) = NULL;
3055 
3056 	/* append to socket */
3057 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3058 	    0, 0, 0, 0, 0, 0,
3059 	    m_notify);
3060 	if (control == NULL) {
3061 		/* no memory */
3062 		sctp_m_freem(m_notify);
3063 		return;
3064 	}
3065 	control->length = SCTP_BUF_LEN(m_notify);
3066 	control->spec_flags = M_NOTIFICATION;
3067 	/* not that we need this */
3068 	control->tail_mbuf = m_notify;
3069 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3070 	    control,
3071 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3072 }
3073 
3074 
3075 static void
3076 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
3077     struct sctp_tmit_chunk *chk, int so_locked
3078 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3079     SCTP_UNUSED
3080 #endif
3081 )
3082 {
3083 	struct mbuf *m_notify;
3084 	struct sctp_send_failed *ssf;
3085 	struct sctp_queued_to_read *control;
3086 	int length;
3087 
3088 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3089 		/* event not enabled */
3090 		return;
3091 
3092 	length = sizeof(struct sctp_send_failed) + chk->send_size;
3093 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3094 	if (m_notify == NULL)
3095 		/* no space left */
3096 		return;
3097 	SCTP_BUF_LEN(m_notify) = 0;
3098 	ssf = mtod(m_notify, struct sctp_send_failed *);
3099 	ssf->ssf_type = SCTP_SEND_FAILED;
3100 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3101 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3102 	else
3103 		ssf->ssf_flags = SCTP_DATA_SENT;
3104 	ssf->ssf_length = length;
3105 	ssf->ssf_error = error;
3106 	/* not exactly what the user sent in, but should be close :) */
3107 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3108 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3109 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3110 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3111 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3112 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3113 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3114 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3115 	SCTP_BUF_NEXT(m_notify) = chk->data;
3116 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3117 
3118 	/* Steal off the mbuf */
3119 	chk->data = NULL;
3120 	/*
3121 	 * For this case, we check the actual socket buffer, since the assoc
3122 	 * is going away we don't want to overfill the socket buffer for a
3123 	 * non-reader
3124 	 */
3125 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3126 		sctp_m_freem(m_notify);
3127 		return;
3128 	}
3129 	/* append to socket */
3130 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3131 	    0, 0, 0, 0, 0, 0,
3132 	    m_notify);
3133 	if (control == NULL) {
3134 		/* no memory */
3135 		sctp_m_freem(m_notify);
3136 		return;
3137 	}
3138 	control->spec_flags = M_NOTIFICATION;
3139 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3140 	    control,
3141 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3142 }
3143 
3144 
3145 static void
3146 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3147     struct sctp_stream_queue_pending *sp, int so_locked
3148 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3149     SCTP_UNUSED
3150 #endif
3151 )
3152 {
3153 	struct mbuf *m_notify;
3154 	struct sctp_send_failed *ssf;
3155 	struct sctp_queued_to_read *control;
3156 	int length;
3157 
3158 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3159 		/* event not enabled */
3160 		return;
3161 
3162 	length = sizeof(struct sctp_send_failed) + sp->length;
3163 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3164 	if (m_notify == NULL)
3165 		/* no space left */
3166 		return;
3167 	SCTP_BUF_LEN(m_notify) = 0;
3168 	ssf = mtod(m_notify, struct sctp_send_failed *);
3169 	ssf->ssf_type = SCTP_SEND_FAILED;
3170 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3171 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3172 	else
3173 		ssf->ssf_flags = SCTP_DATA_SENT;
3174 	ssf->ssf_length = length;
3175 	ssf->ssf_error = error;
3176 	/* not exactly what the user sent in, but should be close :) */
3177 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3178 	ssf->ssf_info.sinfo_stream = sp->stream;
3179 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3180 	ssf->ssf_info.sinfo_flags = sp->sinfo_flags;
3181 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3182 	ssf->ssf_info.sinfo_context = sp->context;
3183 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3184 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3185 	SCTP_BUF_NEXT(m_notify) = sp->data;
3186 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3187 
3188 	/* Steal off the mbuf */
3189 	sp->data = NULL;
3190 	/*
3191 	 * For this case, we check the actual socket buffer, since the assoc
3192 	 * is going away we don't want to overfill the socket buffer for a
3193 	 * non-reader
3194 	 */
3195 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3196 		sctp_m_freem(m_notify);
3197 		return;
3198 	}
3199 	/* append to socket */
3200 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3201 	    0, 0, 0, 0, 0, 0,
3202 	    m_notify);
3203 	if (control == NULL) {
3204 		/* no memory */
3205 		sctp_m_freem(m_notify);
3206 		return;
3207 	}
3208 	control->spec_flags = M_NOTIFICATION;
3209 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3210 	    control,
3211 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3212 }
3213 
3214 
3215 
3216 static void
3217 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3218     uint32_t error)
3219 {
3220 	struct mbuf *m_notify;
3221 	struct sctp_adaptation_event *sai;
3222 	struct sctp_queued_to_read *control;
3223 
3224 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)))
3225 		/* event not enabled */
3226 		return;
3227 
3228 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3229 	if (m_notify == NULL)
3230 		/* no space left */
3231 		return;
3232 	SCTP_BUF_LEN(m_notify) = 0;
3233 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3234 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3235 	sai->sai_flags = 0;
3236 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3237 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3238 	sai->sai_assoc_id = sctp_get_associd(stcb);
3239 
3240 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3241 	SCTP_BUF_NEXT(m_notify) = NULL;
3242 
3243 	/* append to socket */
3244 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3245 	    0, 0, 0, 0, 0, 0,
3246 	    m_notify);
3247 	if (control == NULL) {
3248 		/* no memory */
3249 		sctp_m_freem(m_notify);
3250 		return;
3251 	}
3252 	control->length = SCTP_BUF_LEN(m_notify);
3253 	control->spec_flags = M_NOTIFICATION;
3254 	/* not that we need this */
3255 	control->tail_mbuf = m_notify;
3256 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3257 	    control,
3258 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3259 }
3260 
3261 /* This always must be called with the read-queue LOCKED in the INP */
3262 void
3263 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3264     int nolock, uint32_t val)
3265 {
3266 	struct mbuf *m_notify;
3267 	struct sctp_pdapi_event *pdapi;
3268 	struct sctp_queued_to_read *control;
3269 	struct sockbuf *sb;
3270 
3271 	if ((stcb == NULL) || (stcb->sctp_socket == NULL) ||
3272 	    sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
3273 		/* event not enabled */
3274 		return;
3275 
3276 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3277 	if (m_notify == NULL)
3278 		/* no space left */
3279 		return;
3280 	SCTP_BUF_LEN(m_notify) = 0;
3281 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3282 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3283 	pdapi->pdapi_flags = 0;
3284 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3285 	pdapi->pdapi_indication = error;
3286 	pdapi->pdapi_stream = (val >> 16);
3287 	pdapi->pdapi_seq = (val & 0x0000ffff);
3288 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3289 
3290 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3291 	SCTP_BUF_NEXT(m_notify) = NULL;
3292 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3293 	    0, 0, 0, 0, 0, 0,
3294 	    m_notify);
3295 	if (control == NULL) {
3296 		/* no memory */
3297 		sctp_m_freem(m_notify);
3298 		return;
3299 	}
3300 	control->spec_flags = M_NOTIFICATION;
3301 	control->length = SCTP_BUF_LEN(m_notify);
3302 	/* not that we need this */
3303 	control->tail_mbuf = m_notify;
3304 	control->held_length = 0;
3305 	control->length = 0;
3306 	if (nolock == 0) {
3307 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
3308 	}
3309 	sb = &stcb->sctp_socket->so_rcv;
3310 	if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
3311 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3312 	}
3313 	sctp_sballoc(stcb, sb, m_notify);
3314 	if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
3315 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3316 	}
3317 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3318 	control->end_added = 1;
3319 	if (stcb->asoc.control_pdapi)
3320 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3321 	else {
3322 		/* we really should not see this case */
3323 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3324 	}
3325 	if (nolock == 0) {
3326 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3327 	}
3328 	if (stcb->sctp_ep && stcb->sctp_socket) {
3329 		/* This should always be the case */
3330 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3331 	}
3332 }
3333 
3334 static void
3335 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3336 {
3337 	struct mbuf *m_notify;
3338 	struct sctp_shutdown_event *sse;
3339 	struct sctp_queued_to_read *control;
3340 
3341 	/*
3342 	 * For TCP model AND UDP connected sockets we will send an error up
3343 	 * when an SHUTDOWN completes
3344 	 */
3345 	if (stcb == NULL) {
3346 		return;
3347 	}
3348 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3349 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3350 		/* mark socket closed for read/write and wakeup! */
3351 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3352 		struct socket *so;
3353 
3354 		so = SCTP_INP_SO(stcb->sctp_ep);
3355 		atomic_add_int(&stcb->asoc.refcnt, 1);
3356 		SCTP_TCB_UNLOCK(stcb);
3357 		SCTP_SOCKET_LOCK(so, 1);
3358 		SCTP_TCB_LOCK(stcb);
3359 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3360 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3361 			SCTP_SOCKET_UNLOCK(so, 1);
3362 			return;
3363 		}
3364 #endif
3365 		socantsendmore(stcb->sctp_socket);
3366 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3367 		SCTP_SOCKET_UNLOCK(so, 1);
3368 #endif
3369 	}
3370 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
3371 		/* event not enabled */
3372 		return;
3373 
3374 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3375 	if (m_notify == NULL)
3376 		/* no space left */
3377 		return;
3378 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3379 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3380 	sse->sse_flags = 0;
3381 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3382 	sse->sse_assoc_id = sctp_get_associd(stcb);
3383 
3384 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3385 	SCTP_BUF_NEXT(m_notify) = NULL;
3386 
3387 	/* append to socket */
3388 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3389 	    0, 0, 0, 0, 0, 0,
3390 	    m_notify);
3391 	if (control == NULL) {
3392 		/* no memory */
3393 		sctp_m_freem(m_notify);
3394 		return;
3395 	}
3396 	control->spec_flags = M_NOTIFICATION;
3397 	control->length = SCTP_BUF_LEN(m_notify);
3398 	/* not that we need this */
3399 	control->tail_mbuf = m_notify;
3400 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3401 	    control,
3402 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3403 }
3404 
3405 static void
3406 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3407     int number_entries, uint16_t * list, int flag)
3408 {
3409 	struct mbuf *m_notify;
3410 	struct sctp_queued_to_read *control;
3411 	struct sctp_stream_reset_event *strreset;
3412 	int len;
3413 
3414 	if (stcb == NULL) {
3415 		return;
3416 	}
3417 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
3418 		/* event not enabled */
3419 		return;
3420 
3421 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3422 	if (m_notify == NULL)
3423 		/* no space left */
3424 		return;
3425 	SCTP_BUF_LEN(m_notify) = 0;
3426 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3427 	if (len > M_TRAILINGSPACE(m_notify)) {
3428 		/* never enough room */
3429 		sctp_m_freem(m_notify);
3430 		return;
3431 	}
3432 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3433 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3434 	if (number_entries == 0) {
3435 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3436 	} else {
3437 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3438 	}
3439 	strreset->strreset_length = len;
3440 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3441 	if (number_entries) {
3442 		int i;
3443 
3444 		for (i = 0; i < number_entries; i++) {
3445 			strreset->strreset_list[i] = ntohs(list[i]);
3446 		}
3447 	}
3448 	SCTP_BUF_LEN(m_notify) = len;
3449 	SCTP_BUF_NEXT(m_notify) = NULL;
3450 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3451 		/* no space */
3452 		sctp_m_freem(m_notify);
3453 		return;
3454 	}
3455 	/* append to socket */
3456 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3457 	    0, 0, 0, 0, 0, 0,
3458 	    m_notify);
3459 	if (control == NULL) {
3460 		/* no memory */
3461 		sctp_m_freem(m_notify);
3462 		return;
3463 	}
3464 	control->spec_flags = M_NOTIFICATION;
3465 	control->length = SCTP_BUF_LEN(m_notify);
3466 	/* not that we need this */
3467 	control->tail_mbuf = m_notify;
3468 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3469 	    control,
3470 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3471 }
3472 
3473 
3474 void
3475 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3476     uint32_t error, void *data, int so_locked
3477 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3478     SCTP_UNUSED
3479 #endif
3480 )
3481 {
3482 	if (stcb == NULL) {
3483 		/* unlikely but */
3484 		return;
3485 	}
3486 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3487 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3488 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
3489 	    ) {
3490 		/* No notifications up when we are in a no socket state */
3491 		return;
3492 	}
3493 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3494 		/* Can't send up to a closed socket any notifications */
3495 		return;
3496 	}
3497 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3498 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3499 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3500 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3501 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3502 			/* Don't report these in front states */
3503 			return;
3504 		}
3505 	}
3506 	switch (notification) {
3507 	case SCTP_NOTIFY_ASSOC_UP:
3508 		if (stcb->asoc.assoc_up_sent == 0) {
3509 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3510 			stcb->asoc.assoc_up_sent = 1;
3511 		}
3512 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3513 			sctp_notify_adaptation_layer(stcb, error);
3514 		}
3515 		break;
3516 	case SCTP_NOTIFY_ASSOC_DOWN:
3517 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3518 		break;
3519 	case SCTP_NOTIFY_INTERFACE_DOWN:
3520 		{
3521 			struct sctp_nets *net;
3522 
3523 			net = (struct sctp_nets *)data;
3524 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3525 			    (struct sockaddr *)&net->ro._l_addr, error);
3526 			break;
3527 		}
3528 	case SCTP_NOTIFY_INTERFACE_UP:
3529 		{
3530 			struct sctp_nets *net;
3531 
3532 			net = (struct sctp_nets *)data;
3533 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3534 			    (struct sockaddr *)&net->ro._l_addr, error);
3535 			break;
3536 		}
3537 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3538 		{
3539 			struct sctp_nets *net;
3540 
3541 			net = (struct sctp_nets *)data;
3542 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3543 			    (struct sockaddr *)&net->ro._l_addr, error);
3544 			break;
3545 		}
3546 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3547 		sctp_notify_send_failed2(stcb, error,
3548 		    (struct sctp_stream_queue_pending *)data, so_locked);
3549 		break;
3550 	case SCTP_NOTIFY_DG_FAIL:
3551 		sctp_notify_send_failed(stcb, error,
3552 		    (struct sctp_tmit_chunk *)data, so_locked);
3553 		break;
3554 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3555 		{
3556 			uint32_t val;
3557 
3558 			val = *((uint32_t *) data);
3559 
3560 			sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3561 		}
3562 		break;
3563 	case SCTP_NOTIFY_STRDATA_ERR:
3564 		break;
3565 	case SCTP_NOTIFY_ASSOC_ABORTED:
3566 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3567 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3568 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3569 		} else {
3570 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3571 		}
3572 		break;
3573 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3574 		break;
3575 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3576 		break;
3577 	case SCTP_NOTIFY_ASSOC_RESTART:
3578 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3579 		break;
3580 	case SCTP_NOTIFY_HB_RESP:
3581 		break;
3582 	case SCTP_NOTIFY_STR_RESET_SEND:
3583 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3584 		break;
3585 	case SCTP_NOTIFY_STR_RESET_RECV:
3586 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3587 		break;
3588 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3589 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3590 		break;
3591 
3592 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3593 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3594 		break;
3595 
3596 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3597 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3598 		    error);
3599 		break;
3600 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3601 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3602 		    error);
3603 		break;
3604 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3605 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3606 		    error);
3607 		break;
3608 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3609 		break;
3610 	case SCTP_NOTIFY_ASCONF_FAILED:
3611 		break;
3612 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3613 		sctp_notify_shutdown_event(stcb);
3614 		break;
3615 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3616 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3617 		    (uint16_t) (uintptr_t) data);
3618 		break;
3619 #if 0
3620 	case SCTP_NOTIFY_AUTH_KEY_CONFLICT:
3621 		sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT,
3622 		    error, (uint16_t) (uintptr_t) data);
3623 		break;
3624 #endif				/* not yet? remove? */
3625 
3626 
3627 	default:
3628 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3629 		    __FUNCTION__, notification, notification);
3630 		break;
3631 	}			/* end switch */
3632 }
3633 
3634 void
3635 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3636 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3637     SCTP_UNUSED
3638 #endif
3639 )
3640 {
3641 	struct sctp_association *asoc;
3642 	struct sctp_stream_out *outs;
3643 	struct sctp_tmit_chunk *chk;
3644 	struct sctp_stream_queue_pending *sp;
3645 	int i;
3646 
3647 	asoc = &stcb->asoc;
3648 
3649 	if (stcb == NULL) {
3650 		return;
3651 	}
3652 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3653 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3654 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3655 		return;
3656 	}
3657 	/* now through all the gunk freeing chunks */
3658 	if (holds_lock == 0) {
3659 		SCTP_TCB_SEND_LOCK(stcb);
3660 	}
3661 	/* sent queue SHOULD be empty */
3662 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3663 		chk = TAILQ_FIRST(&asoc->sent_queue);
3664 		while (chk) {
3665 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3666 			asoc->sent_queue_cnt--;
3667 			if (chk->data) {
3668 				/*
3669 				 * trim off the sctp chunk header(it should
3670 				 * be there)
3671 				 */
3672 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3673 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3674 					sctp_mbuf_crush(chk->data);
3675 					chk->send_size -= sizeof(struct sctp_data_chunk);
3676 				}
3677 			}
3678 			sctp_free_bufspace(stcb, asoc, chk, 1);
3679 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3680 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3681 			if (chk->data) {
3682 				sctp_m_freem(chk->data);
3683 				chk->data = NULL;
3684 			}
3685 			sctp_free_a_chunk(stcb, chk);
3686 			/* sa_ignore FREED_MEMORY */
3687 			chk = TAILQ_FIRST(&asoc->sent_queue);
3688 		}
3689 	}
3690 	/* pending send queue SHOULD be empty */
3691 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3692 		chk = TAILQ_FIRST(&asoc->send_queue);
3693 		while (chk) {
3694 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3695 			asoc->send_queue_cnt--;
3696 			if (chk->data) {
3697 				/*
3698 				 * trim off the sctp chunk header(it should
3699 				 * be there)
3700 				 */
3701 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3702 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3703 					sctp_mbuf_crush(chk->data);
3704 					chk->send_size -= sizeof(struct sctp_data_chunk);
3705 				}
3706 			}
3707 			sctp_free_bufspace(stcb, asoc, chk, 1);
3708 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3709 			if (chk->data) {
3710 				sctp_m_freem(chk->data);
3711 				chk->data = NULL;
3712 			}
3713 			sctp_free_a_chunk(stcb, chk);
3714 			/* sa_ignore FREED_MEMORY */
3715 			chk = TAILQ_FIRST(&asoc->send_queue);
3716 		}
3717 	}
3718 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3719 		/* For each stream */
3720 		outs = &stcb->asoc.strmout[i];
3721 		/* clean up any sends there */
3722 		stcb->asoc.locked_on_sending = NULL;
3723 		sp = TAILQ_FIRST(&outs->outqueue);
3724 		while (sp) {
3725 			stcb->asoc.stream_queue_cnt--;
3726 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3727 			sctp_free_spbufspace(stcb, asoc, sp);
3728 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3729 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3730 			if (sp->data) {
3731 				sctp_m_freem(sp->data);
3732 				sp->data = NULL;
3733 			}
3734 			if (sp->net)
3735 				sctp_free_remote_addr(sp->net);
3736 			sp->net = NULL;
3737 			/* Free the chunk */
3738 			sctp_free_a_strmoq(stcb, sp);
3739 			/* sa_ignore FREED_MEMORY */
3740 			sp = TAILQ_FIRST(&outs->outqueue);
3741 		}
3742 	}
3743 
3744 	if (holds_lock == 0) {
3745 		SCTP_TCB_SEND_UNLOCK(stcb);
3746 	}
3747 }
3748 
3749 void
3750 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3751 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3752     SCTP_UNUSED
3753 #endif
3754 )
3755 {
3756 
3757 	if (stcb == NULL) {
3758 		return;
3759 	}
3760 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3761 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3762 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3763 		return;
3764 	}
3765 	/* Tell them we lost the asoc */
3766 	sctp_report_all_outbound(stcb, 1, so_locked);
3767 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3768 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3769 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3770 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3771 	}
3772 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3773 }
3774 
3775 void
3776 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3777     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3778     uint32_t vrf_id)
3779 {
3780 	uint32_t vtag;
3781 
3782 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3783 	struct socket *so;
3784 
3785 #endif
3786 
3787 	vtag = 0;
3788 	if (stcb != NULL) {
3789 		/* We have a TCB to abort, send notification too */
3790 		vtag = stcb->asoc.peer_vtag;
3791 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3792 		/* get the assoc vrf id and table id */
3793 		vrf_id = stcb->asoc.vrf_id;
3794 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3795 	}
3796 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id);
3797 	if (stcb != NULL) {
3798 		/* Ok, now lets free it */
3799 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3800 		so = SCTP_INP_SO(inp);
3801 		atomic_add_int(&stcb->asoc.refcnt, 1);
3802 		SCTP_TCB_UNLOCK(stcb);
3803 		SCTP_SOCKET_LOCK(so, 1);
3804 		SCTP_TCB_LOCK(stcb);
3805 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3806 #endif
3807 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3808 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3809 		SCTP_SOCKET_UNLOCK(so, 1);
3810 #endif
3811 	} else {
3812 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3813 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3814 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3815 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3816 			}
3817 		}
3818 	}
3819 }
3820 
3821 #ifdef SCTP_ASOCLOG_OF_TSNS
3822 void
3823 sctp_print_out_track_log(struct sctp_tcb *stcb)
3824 {
3825 #ifdef NOSIY_PRINTS
3826 	int i;
3827 
3828 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3829 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3830 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3831 		SCTP_PRINTF("None rcvd\n");
3832 		goto none_in;
3833 	}
3834 	if (stcb->asoc.tsn_in_wrapped) {
3835 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3836 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3837 			    stcb->asoc.in_tsnlog[i].tsn,
3838 			    stcb->asoc.in_tsnlog[i].strm,
3839 			    stcb->asoc.in_tsnlog[i].seq,
3840 			    stcb->asoc.in_tsnlog[i].flgs,
3841 			    stcb->asoc.in_tsnlog[i].sz);
3842 		}
3843 	}
3844 	if (stcb->asoc.tsn_in_at) {
3845 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3846 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3847 			    stcb->asoc.in_tsnlog[i].tsn,
3848 			    stcb->asoc.in_tsnlog[i].strm,
3849 			    stcb->asoc.in_tsnlog[i].seq,
3850 			    stcb->asoc.in_tsnlog[i].flgs,
3851 			    stcb->asoc.in_tsnlog[i].sz);
3852 		}
3853 	}
3854 none_in:
3855 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3856 	if ((stcb->asoc.tsn_out_at == 0) &&
3857 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3858 		SCTP_PRINTF("None sent\n");
3859 	}
3860 	if (stcb->asoc.tsn_out_wrapped) {
3861 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3862 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3863 			    stcb->asoc.out_tsnlog[i].tsn,
3864 			    stcb->asoc.out_tsnlog[i].strm,
3865 			    stcb->asoc.out_tsnlog[i].seq,
3866 			    stcb->asoc.out_tsnlog[i].flgs,
3867 			    stcb->asoc.out_tsnlog[i].sz);
3868 		}
3869 	}
3870 	if (stcb->asoc.tsn_out_at) {
3871 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3872 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3873 			    stcb->asoc.out_tsnlog[i].tsn,
3874 			    stcb->asoc.out_tsnlog[i].strm,
3875 			    stcb->asoc.out_tsnlog[i].seq,
3876 			    stcb->asoc.out_tsnlog[i].flgs,
3877 			    stcb->asoc.out_tsnlog[i].sz);
3878 		}
3879 	}
3880 #endif
3881 }
3882 
3883 #endif
3884 
3885 void
3886 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3887     int error, struct mbuf *op_err,
3888     int so_locked
3889 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3890     SCTP_UNUSED
3891 #endif
3892 )
3893 {
3894 	uint32_t vtag;
3895 
3896 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3897 	struct socket *so;
3898 
3899 #endif
3900 
3901 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3902 	so = SCTP_INP_SO(inp);
3903 #endif
3904 	if (stcb == NULL) {
3905 		/* Got to have a TCB */
3906 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3907 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3908 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3909 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3910 			}
3911 		}
3912 		return;
3913 	} else {
3914 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3915 	}
3916 	vtag = stcb->asoc.peer_vtag;
3917 	/* notify the ulp */
3918 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3919 		sctp_abort_notification(stcb, error, so_locked);
3920 	/* notify the peer */
3921 #if defined(SCTP_PANIC_ON_ABORT)
3922 	panic("aborting an association");
3923 #endif
3924 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3925 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3926 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3927 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3928 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3929 	}
3930 	/* now free the asoc */
3931 #ifdef SCTP_ASOCLOG_OF_TSNS
3932 	sctp_print_out_track_log(stcb);
3933 #endif
3934 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3935 	if (!so_locked) {
3936 		atomic_add_int(&stcb->asoc.refcnt, 1);
3937 		SCTP_TCB_UNLOCK(stcb);
3938 		SCTP_SOCKET_LOCK(so, 1);
3939 		SCTP_TCB_LOCK(stcb);
3940 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3941 	}
3942 #endif
3943 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3944 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3945 	if (!so_locked) {
3946 		SCTP_SOCKET_UNLOCK(so, 1);
3947 	}
3948 #endif
3949 }
3950 
3951 void
3952 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3953     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id)
3954 {
3955 	struct sctp_chunkhdr *ch, chunk_buf;
3956 	unsigned int chk_length;
3957 
3958 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3959 	/* Generate a TO address for future reference */
3960 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3961 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3962 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3963 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3964 		}
3965 	}
3966 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3967 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3968 	while (ch != NULL) {
3969 		chk_length = ntohs(ch->chunk_length);
3970 		if (chk_length < sizeof(*ch)) {
3971 			/* break to abort land */
3972 			break;
3973 		}
3974 		switch (ch->chunk_type) {
3975 		case SCTP_COOKIE_ECHO:
3976 			/* We hit here only if the assoc is being freed */
3977 			return;
3978 		case SCTP_PACKET_DROPPED:
3979 			/* we don't respond to pkt-dropped */
3980 			return;
3981 		case SCTP_ABORT_ASSOCIATION:
3982 			/* we don't respond with an ABORT to an ABORT */
3983 			return;
3984 		case SCTP_SHUTDOWN_COMPLETE:
3985 			/*
3986 			 * we ignore it since we are not waiting for it and
3987 			 * peer is gone
3988 			 */
3989 			return;
3990 		case SCTP_SHUTDOWN_ACK:
3991 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id);
3992 			return;
3993 		default:
3994 			break;
3995 		}
3996 		offset += SCTP_SIZE32(chk_length);
3997 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3998 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3999 	}
4000 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id);
4001 }
4002 
4003 /*
4004  * check the inbound datagram to make sure there is not an abort inside it,
4005  * if there is return 1, else return 0.
4006  */
4007 int
4008 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4009 {
4010 	struct sctp_chunkhdr *ch;
4011 	struct sctp_init_chunk *init_chk, chunk_buf;
4012 	int offset;
4013 	unsigned int chk_length;
4014 
4015 	offset = iphlen + sizeof(struct sctphdr);
4016 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4017 	    (uint8_t *) & chunk_buf);
4018 	while (ch != NULL) {
4019 		chk_length = ntohs(ch->chunk_length);
4020 		if (chk_length < sizeof(*ch)) {
4021 			/* packet is probably corrupt */
4022 			break;
4023 		}
4024 		/* we seem to be ok, is it an abort? */
4025 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4026 			/* yep, tell them */
4027 			return (1);
4028 		}
4029 		if (ch->chunk_type == SCTP_INITIATION) {
4030 			/* need to update the Vtag */
4031 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4032 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4033 			if (init_chk != NULL) {
4034 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4035 			}
4036 		}
4037 		/* Nope, move to the next chunk */
4038 		offset += SCTP_SIZE32(chk_length);
4039 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4040 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4041 	}
4042 	return (0);
4043 }
4044 
4045 /*
4046  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4047  * set (i.e. it's 0) so, create this function to compare link local scopes
4048  */
4049 uint32_t
4050 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4051 {
4052 	struct sockaddr_in6 a, b;
4053 
4054 	/* save copies */
4055 	a = *addr1;
4056 	b = *addr2;
4057 
4058 	if (a.sin6_scope_id == 0)
4059 		if (sa6_recoverscope(&a)) {
4060 			/* can't get scope, so can't match */
4061 			return (0);
4062 		}
4063 	if (b.sin6_scope_id == 0)
4064 		if (sa6_recoverscope(&b)) {
4065 			/* can't get scope, so can't match */
4066 			return (0);
4067 		}
4068 	if (a.sin6_scope_id != b.sin6_scope_id)
4069 		return (0);
4070 
4071 	return (1);
4072 }
4073 
4074 /*
4075  * returns a sockaddr_in6 with embedded scope recovered and removed
4076  */
4077 struct sockaddr_in6 *
4078 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4079 {
4080 	/* check and strip embedded scope junk */
4081 	if (addr->sin6_family == AF_INET6) {
4082 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4083 			if (addr->sin6_scope_id == 0) {
4084 				*store = *addr;
4085 				if (!sa6_recoverscope(store)) {
4086 					/* use the recovered scope */
4087 					addr = store;
4088 				}
4089 			} else {
4090 				/* else, return the original "to" addr */
4091 				in6_clearscope(&addr->sin6_addr);
4092 			}
4093 		}
4094 	}
4095 	return (addr);
4096 }
4097 
4098 /*
4099  * are the two addresses the same?  currently a "scopeless" check returns: 1
4100  * if same, 0 if not
4101  */
4102 int
4103 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4104 {
4105 
4106 	/* must be valid */
4107 	if (sa1 == NULL || sa2 == NULL)
4108 		return (0);
4109 
4110 	/* must be the same family */
4111 	if (sa1->sa_family != sa2->sa_family)
4112 		return (0);
4113 
4114 	if (sa1->sa_family == AF_INET6) {
4115 		/* IPv6 addresses */
4116 		struct sockaddr_in6 *sin6_1, *sin6_2;
4117 
4118 		sin6_1 = (struct sockaddr_in6 *)sa1;
4119 		sin6_2 = (struct sockaddr_in6 *)sa2;
4120 		return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
4121 		    &sin6_2->sin6_addr));
4122 	} else if (sa1->sa_family == AF_INET) {
4123 		/* IPv4 addresses */
4124 		struct sockaddr_in *sin_1, *sin_2;
4125 
4126 		sin_1 = (struct sockaddr_in *)sa1;
4127 		sin_2 = (struct sockaddr_in *)sa2;
4128 		return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4129 	} else {
4130 		/* we don't do these... */
4131 		return (0);
4132 	}
4133 }
4134 
4135 void
4136 sctp_print_address(struct sockaddr *sa)
4137 {
4138 	char ip6buf[INET6_ADDRSTRLEN];
4139 
4140 	ip6buf[0] = 0;
4141 	if (sa->sa_family == AF_INET6) {
4142 		struct sockaddr_in6 *sin6;
4143 
4144 		sin6 = (struct sockaddr_in6 *)sa;
4145 		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4146 		    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4147 		    ntohs(sin6->sin6_port),
4148 		    sin6->sin6_scope_id);
4149 	} else if (sa->sa_family == AF_INET) {
4150 		struct sockaddr_in *sin;
4151 		unsigned char *p;
4152 
4153 		sin = (struct sockaddr_in *)sa;
4154 		p = (unsigned char *)&sin->sin_addr;
4155 		SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4156 		    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4157 	} else {
4158 		SCTP_PRINTF("?\n");
4159 	}
4160 }
4161 
4162 void
4163 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4164 {
4165 	if (iph->ip_v == IPVERSION) {
4166 		struct sockaddr_in lsa, fsa;
4167 
4168 		bzero(&lsa, sizeof(lsa));
4169 		lsa.sin_len = sizeof(lsa);
4170 		lsa.sin_family = AF_INET;
4171 		lsa.sin_addr = iph->ip_src;
4172 		lsa.sin_port = sh->src_port;
4173 		bzero(&fsa, sizeof(fsa));
4174 		fsa.sin_len = sizeof(fsa);
4175 		fsa.sin_family = AF_INET;
4176 		fsa.sin_addr = iph->ip_dst;
4177 		fsa.sin_port = sh->dest_port;
4178 		SCTP_PRINTF("src: ");
4179 		sctp_print_address((struct sockaddr *)&lsa);
4180 		SCTP_PRINTF("dest: ");
4181 		sctp_print_address((struct sockaddr *)&fsa);
4182 	} else if (iph->ip_v == (IPV6_VERSION >> 4)) {
4183 		struct ip6_hdr *ip6;
4184 		struct sockaddr_in6 lsa6, fsa6;
4185 
4186 		ip6 = (struct ip6_hdr *)iph;
4187 		bzero(&lsa6, sizeof(lsa6));
4188 		lsa6.sin6_len = sizeof(lsa6);
4189 		lsa6.sin6_family = AF_INET6;
4190 		lsa6.sin6_addr = ip6->ip6_src;
4191 		lsa6.sin6_port = sh->src_port;
4192 		bzero(&fsa6, sizeof(fsa6));
4193 		fsa6.sin6_len = sizeof(fsa6);
4194 		fsa6.sin6_family = AF_INET6;
4195 		fsa6.sin6_addr = ip6->ip6_dst;
4196 		fsa6.sin6_port = sh->dest_port;
4197 		SCTP_PRINTF("src: ");
4198 		sctp_print_address((struct sockaddr *)&lsa6);
4199 		SCTP_PRINTF("dest: ");
4200 		sctp_print_address((struct sockaddr *)&fsa6);
4201 	}
4202 }
4203 
4204 void
4205 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4206     struct sctp_inpcb *new_inp,
4207     struct sctp_tcb *stcb,
4208     int waitflags)
4209 {
4210 	/*
4211 	 * go through our old INP and pull off any control structures that
4212 	 * belong to stcb and move then to the new inp.
4213 	 */
4214 	struct socket *old_so, *new_so;
4215 	struct sctp_queued_to_read *control, *nctl;
4216 	struct sctp_readhead tmp_queue;
4217 	struct mbuf *m;
4218 	int error = 0;
4219 
4220 	old_so = old_inp->sctp_socket;
4221 	new_so = new_inp->sctp_socket;
4222 	TAILQ_INIT(&tmp_queue);
4223 	error = sblock(&old_so->so_rcv, waitflags);
4224 	if (error) {
4225 		/*
4226 		 * Gak, can't get sblock, we have a problem. data will be
4227 		 * left stranded.. and we don't dare look at it since the
4228 		 * other thread may be reading something. Oh well, its a
4229 		 * screwed up app that does a peeloff OR a accept while
4230 		 * reading from the main socket... actually its only the
4231 		 * peeloff() case, since I think read will fail on a
4232 		 * listening socket..
4233 		 */
4234 		return;
4235 	}
4236 	/* lock the socket buffers */
4237 	SCTP_INP_READ_LOCK(old_inp);
4238 	control = TAILQ_FIRST(&old_inp->read_queue);
4239 	/* Pull off all for out target stcb */
4240 	while (control) {
4241 		nctl = TAILQ_NEXT(control, next);
4242 		if (control->stcb == stcb) {
4243 			/* remove it we want it */
4244 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4245 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4246 			m = control->data;
4247 			while (m) {
4248 				if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4249 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4250 				}
4251 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4252 				if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4253 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4254 				}
4255 				m = SCTP_BUF_NEXT(m);
4256 			}
4257 		}
4258 		control = nctl;
4259 	}
4260 	SCTP_INP_READ_UNLOCK(old_inp);
4261 	/* Remove the sb-lock on the old socket */
4262 
4263 	sbunlock(&old_so->so_rcv);
4264 	/* Now we move them over to the new socket buffer */
4265 	control = TAILQ_FIRST(&tmp_queue);
4266 	SCTP_INP_READ_LOCK(new_inp);
4267 	while (control) {
4268 		nctl = TAILQ_NEXT(control, next);
4269 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4270 		m = control->data;
4271 		while (m) {
4272 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4273 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4274 			}
4275 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4276 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4277 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4278 			}
4279 			m = SCTP_BUF_NEXT(m);
4280 		}
4281 		control = nctl;
4282 	}
4283 	SCTP_INP_READ_UNLOCK(new_inp);
4284 }
4285 
4286 
4287 void
4288 sctp_add_to_readq(struct sctp_inpcb *inp,
4289     struct sctp_tcb *stcb,
4290     struct sctp_queued_to_read *control,
4291     struct sockbuf *sb,
4292     int end,
4293     int so_locked
4294 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4295     SCTP_UNUSED
4296 #endif
4297 )
4298 {
4299 	/*
4300 	 * Here we must place the control on the end of the socket read
4301 	 * queue AND increment sb_cc so that select will work properly on
4302 	 * read.
4303 	 */
4304 	struct mbuf *m, *prev = NULL;
4305 
4306 	if (inp == NULL) {
4307 		/* Gak, TSNH!! */
4308 #ifdef INVARIANTS
4309 		panic("Gak, inp NULL on add_to_readq");
4310 #endif
4311 		return;
4312 	}
4313 	SCTP_INP_READ_LOCK(inp);
4314 	if (!(control->spec_flags & M_NOTIFICATION)) {
4315 		atomic_add_int(&inp->total_recvs, 1);
4316 		if (!control->do_not_ref_stcb) {
4317 			atomic_add_int(&stcb->total_recvs, 1);
4318 		}
4319 	}
4320 	m = control->data;
4321 	control->held_length = 0;
4322 	control->length = 0;
4323 	while (m) {
4324 		if (SCTP_BUF_LEN(m) == 0) {
4325 			/* Skip mbufs with NO length */
4326 			if (prev == NULL) {
4327 				/* First one */
4328 				control->data = sctp_m_free(m);
4329 				m = control->data;
4330 			} else {
4331 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4332 				m = SCTP_BUF_NEXT(prev);
4333 			}
4334 			if (m == NULL) {
4335 				control->tail_mbuf = prev;;
4336 			}
4337 			continue;
4338 		}
4339 		prev = m;
4340 		if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4341 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4342 		}
4343 		sctp_sballoc(stcb, sb, m);
4344 		if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4345 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4346 		}
4347 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4348 		m = SCTP_BUF_NEXT(m);
4349 	}
4350 	if (prev != NULL) {
4351 		control->tail_mbuf = prev;
4352 	} else {
4353 		/* Everything got collapsed out?? */
4354 		return;
4355 	}
4356 	if (end) {
4357 		control->end_added = 1;
4358 	}
4359 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4360 	SCTP_INP_READ_UNLOCK(inp);
4361 	if (inp && inp->sctp_socket) {
4362 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4363 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4364 		} else {
4365 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4366 			struct socket *so;
4367 
4368 			so = SCTP_INP_SO(inp);
4369 			if (!so_locked) {
4370 				atomic_add_int(&stcb->asoc.refcnt, 1);
4371 				SCTP_TCB_UNLOCK(stcb);
4372 				SCTP_SOCKET_LOCK(so, 1);
4373 				SCTP_TCB_LOCK(stcb);
4374 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4375 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4376 					SCTP_SOCKET_UNLOCK(so, 1);
4377 					return;
4378 				}
4379 			}
4380 #endif
4381 			sctp_sorwakeup(inp, inp->sctp_socket);
4382 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4383 			if (!so_locked) {
4384 				SCTP_SOCKET_UNLOCK(so, 1);
4385 			}
4386 #endif
4387 		}
4388 	}
4389 }
4390 
4391 
4392 int
4393 sctp_append_to_readq(struct sctp_inpcb *inp,
4394     struct sctp_tcb *stcb,
4395     struct sctp_queued_to_read *control,
4396     struct mbuf *m,
4397     int end,
4398     int ctls_cumack,
4399     struct sockbuf *sb)
4400 {
4401 	/*
4402 	 * A partial delivery API event is underway. OR we are appending on
4403 	 * the reassembly queue.
4404 	 *
4405 	 * If PDAPI this means we need to add m to the end of the data.
4406 	 * Increase the length in the control AND increment the sb_cc.
4407 	 * Otherwise sb is NULL and all we need to do is put it at the end
4408 	 * of the mbuf chain.
4409 	 */
4410 	int len = 0;
4411 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4412 
4413 	if (inp) {
4414 		SCTP_INP_READ_LOCK(inp);
4415 	}
4416 	if (control == NULL) {
4417 get_out:
4418 		if (inp) {
4419 			SCTP_INP_READ_UNLOCK(inp);
4420 		}
4421 		return (-1);
4422 	}
4423 	if (control->end_added) {
4424 		/* huh this one is complete? */
4425 		goto get_out;
4426 	}
4427 	mm = m;
4428 	if (mm == NULL) {
4429 		goto get_out;
4430 	}
4431 	while (mm) {
4432 		if (SCTP_BUF_LEN(mm) == 0) {
4433 			/* Skip mbufs with NO lenght */
4434 			if (prev == NULL) {
4435 				/* First one */
4436 				m = sctp_m_free(mm);
4437 				mm = m;
4438 			} else {
4439 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4440 				mm = SCTP_BUF_NEXT(prev);
4441 			}
4442 			continue;
4443 		}
4444 		prev = mm;
4445 		len += SCTP_BUF_LEN(mm);
4446 		if (sb) {
4447 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4448 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4449 			}
4450 			sctp_sballoc(stcb, sb, mm);
4451 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4452 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4453 			}
4454 		}
4455 		mm = SCTP_BUF_NEXT(mm);
4456 	}
4457 	if (prev) {
4458 		tail = prev;
4459 	} else {
4460 		/* Really there should always be a prev */
4461 		if (m == NULL) {
4462 			/* Huh nothing left? */
4463 #ifdef INVARIANTS
4464 			panic("Nothing left to add?");
4465 #else
4466 			goto get_out;
4467 #endif
4468 		}
4469 		tail = m;
4470 	}
4471 	if (control->tail_mbuf) {
4472 		/* append */
4473 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4474 		control->tail_mbuf = tail;
4475 	} else {
4476 		/* nothing there */
4477 #ifdef INVARIANTS
4478 		if (control->data != NULL) {
4479 			panic("This should NOT happen");
4480 		}
4481 #endif
4482 		control->data = m;
4483 		control->tail_mbuf = tail;
4484 	}
4485 	atomic_add_int(&control->length, len);
4486 	if (end) {
4487 		/* message is complete */
4488 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4489 			stcb->asoc.control_pdapi = NULL;
4490 		}
4491 		control->held_length = 0;
4492 		control->end_added = 1;
4493 	}
4494 	if (stcb == NULL) {
4495 		control->do_not_ref_stcb = 1;
4496 	}
4497 	/*
4498 	 * When we are appending in partial delivery, the cum-ack is used
4499 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4500 	 * is populated in the outbound sinfo structure from the true cumack
4501 	 * if the association exists...
4502 	 */
4503 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4504 	if (inp) {
4505 		SCTP_INP_READ_UNLOCK(inp);
4506 	}
4507 	if (inp && inp->sctp_socket) {
4508 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4509 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4510 		} else {
4511 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4512 			struct socket *so;
4513 
4514 			so = SCTP_INP_SO(inp);
4515 			atomic_add_int(&stcb->asoc.refcnt, 1);
4516 			SCTP_TCB_UNLOCK(stcb);
4517 			SCTP_SOCKET_LOCK(so, 1);
4518 			SCTP_TCB_LOCK(stcb);
4519 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4520 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4521 				SCTP_SOCKET_UNLOCK(so, 1);
4522 				return (0);
4523 			}
4524 #endif
4525 			sctp_sorwakeup(inp, inp->sctp_socket);
4526 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4527 			SCTP_SOCKET_UNLOCK(so, 1);
4528 #endif
4529 		}
4530 	}
4531 	return (0);
4532 }
4533 
4534 
4535 
4536 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4537  *************ALTERNATE ROUTING CODE
4538  */
4539 
4540 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4541  *************ALTERNATE ROUTING CODE
4542  */
4543 
4544 struct mbuf *
4545 sctp_generate_invmanparam(int err)
4546 {
4547 	/* Return a MBUF with a invalid mandatory parameter */
4548 	struct mbuf *m;
4549 
4550 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4551 	if (m) {
4552 		struct sctp_paramhdr *ph;
4553 
4554 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4555 		ph = mtod(m, struct sctp_paramhdr *);
4556 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4557 		ph->param_type = htons(err);
4558 	}
4559 	return (m);
4560 }
4561 
4562 #ifdef SCTP_MBCNT_LOGGING
4563 void
4564 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4565     struct sctp_tmit_chunk *tp1, int chk_cnt)
4566 {
4567 	if (tp1->data == NULL) {
4568 		return;
4569 	}
4570 	asoc->chunks_on_out_queue -= chk_cnt;
4571 	if (sctp_logging_level & SCTP_MBCNT_LOGGING_ENABLE) {
4572 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4573 		    asoc->total_output_queue_size,
4574 		    tp1->book_size,
4575 		    0,
4576 		    tp1->mbcnt);
4577 	}
4578 	if (asoc->total_output_queue_size >= tp1->book_size) {
4579 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4580 	} else {
4581 		asoc->total_output_queue_size = 0;
4582 	}
4583 
4584 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4585 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4586 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4587 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4588 		} else {
4589 			stcb->sctp_socket->so_snd.sb_cc = 0;
4590 
4591 		}
4592 	}
4593 }
4594 
4595 #endif
4596 
4597 int
4598 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4599     int reason, struct sctpchunk_listhead *queue, int so_locked
4600 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4601     SCTP_UNUSED
4602 #endif
4603 )
4604 {
4605 	int ret_sz = 0;
4606 	int notdone;
4607 	uint8_t foundeom = 0;
4608 
4609 	do {
4610 		ret_sz += tp1->book_size;
4611 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4612 		if (tp1->data) {
4613 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4614 			struct socket *so;
4615 
4616 #endif
4617 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4618 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, SCTP_SO_NOT_LOCKED);
4619 			sctp_m_freem(tp1->data);
4620 			tp1->data = NULL;
4621 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4622 			so = SCTP_INP_SO(stcb->sctp_ep);
4623 			if (!so_locked) {
4624 				atomic_add_int(&stcb->asoc.refcnt, 1);
4625 				SCTP_TCB_UNLOCK(stcb);
4626 				SCTP_SOCKET_LOCK(so, 1);
4627 				SCTP_TCB_LOCK(stcb);
4628 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4629 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4630 					/*
4631 					 * assoc was freed while we were
4632 					 * unlocked
4633 					 */
4634 					SCTP_SOCKET_UNLOCK(so, 1);
4635 					return (ret_sz);
4636 				}
4637 			}
4638 #endif
4639 			sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4640 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4641 			if (!so_locked) {
4642 				SCTP_SOCKET_UNLOCK(so, 1);
4643 			}
4644 #endif
4645 		}
4646 		if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4647 			stcb->asoc.sent_queue_cnt_removeable--;
4648 		}
4649 		if (queue == &stcb->asoc.send_queue) {
4650 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4651 			/* on to the sent queue */
4652 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4653 			    sctp_next);
4654 			stcb->asoc.sent_queue_cnt++;
4655 		}
4656 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4657 		    SCTP_DATA_NOT_FRAG) {
4658 			/* not frag'ed we ae done   */
4659 			notdone = 0;
4660 			foundeom = 1;
4661 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4662 			/* end of frag, we are done */
4663 			notdone = 0;
4664 			foundeom = 1;
4665 		} else {
4666 			/*
4667 			 * Its a begin or middle piece, we must mark all of
4668 			 * it
4669 			 */
4670 			notdone = 1;
4671 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4672 		}
4673 	} while (tp1 && notdone);
4674 	if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
4675 		/*
4676 		 * The multi-part message was scattered across the send and
4677 		 * sent queue.
4678 		 */
4679 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4680 		/*
4681 		 * recurse throught the send_queue too, starting at the
4682 		 * beginning.
4683 		 */
4684 		if (tp1) {
4685 			ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
4686 			    &stcb->asoc.send_queue, so_locked);
4687 		} else {
4688 			SCTP_PRINTF("hmm, nothing on the send queue and no EOM?\n");
4689 		}
4690 	}
4691 	return (ret_sz);
4692 }
4693 
4694 /*
4695  * checks to see if the given address, sa, is one that is currently known by
4696  * the kernel note: can't distinguish the same address on multiple interfaces
4697  * and doesn't handle multiple addresses with different zone/scope id's note:
4698  * ifa_ifwithaddr() compares the entire sockaddr struct
4699  */
4700 struct sctp_ifa *
4701 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4702     int holds_lock)
4703 {
4704 	struct sctp_laddr *laddr;
4705 
4706 	if (holds_lock == 0) {
4707 		SCTP_INP_RLOCK(inp);
4708 	}
4709 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4710 		if (laddr->ifa == NULL)
4711 			continue;
4712 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4713 			continue;
4714 		if (addr->sa_family == AF_INET) {
4715 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4716 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4717 				/* found him. */
4718 				if (holds_lock == 0) {
4719 					SCTP_INP_RUNLOCK(inp);
4720 				}
4721 				return (laddr->ifa);
4722 				break;
4723 			}
4724 		} else if (addr->sa_family == AF_INET6) {
4725 			if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
4726 			    &laddr->ifa->address.sin6.sin6_addr)) {
4727 				/* found him. */
4728 				if (holds_lock == 0) {
4729 					SCTP_INP_RUNLOCK(inp);
4730 				}
4731 				return (laddr->ifa);
4732 				break;
4733 			}
4734 		}
4735 	}
4736 	if (holds_lock == 0) {
4737 		SCTP_INP_RUNLOCK(inp);
4738 	}
4739 	return (NULL);
4740 }
4741 
4742 uint32_t
4743 sctp_get_ifa_hash_val(struct sockaddr *addr)
4744 {
4745 	if (addr->sa_family == AF_INET) {
4746 		struct sockaddr_in *sin;
4747 
4748 		sin = (struct sockaddr_in *)addr;
4749 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4750 	} else if (addr->sa_family == AF_INET6) {
4751 		struct sockaddr_in6 *sin6;
4752 		uint32_t hash_of_addr;
4753 
4754 		sin6 = (struct sockaddr_in6 *)addr;
4755 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4756 		    sin6->sin6_addr.s6_addr32[1] +
4757 		    sin6->sin6_addr.s6_addr32[2] +
4758 		    sin6->sin6_addr.s6_addr32[3]);
4759 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4760 		return (hash_of_addr);
4761 	}
4762 	return (0);
4763 }
4764 
4765 struct sctp_ifa *
4766 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4767 {
4768 	struct sctp_ifa *sctp_ifap;
4769 	struct sctp_vrf *vrf;
4770 	struct sctp_ifalist *hash_head;
4771 	uint32_t hash_of_addr;
4772 
4773 	if (holds_lock == 0)
4774 		SCTP_IPI_ADDR_RLOCK();
4775 
4776 	vrf = sctp_find_vrf(vrf_id);
4777 	if (vrf == NULL) {
4778 		if (holds_lock == 0)
4779 			SCTP_IPI_ADDR_RUNLOCK();
4780 		return (NULL);
4781 	}
4782 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4783 
4784 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4785 	if (hash_head == NULL) {
4786 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4787 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4788 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4789 		sctp_print_address(addr);
4790 		SCTP_PRINTF("No such bucket for address\n");
4791 		if (holds_lock == 0)
4792 			SCTP_IPI_ADDR_RUNLOCK();
4793 
4794 		return (NULL);
4795 	}
4796 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4797 		if (sctp_ifap == NULL) {
4798 			panic("Huh LIST_FOREACH corrupt");
4799 		}
4800 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4801 			continue;
4802 		if (addr->sa_family == AF_INET) {
4803 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4804 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4805 				/* found him. */
4806 				if (holds_lock == 0)
4807 					SCTP_IPI_ADDR_RUNLOCK();
4808 				return (sctp_ifap);
4809 				break;
4810 			}
4811 		} else if (addr->sa_family == AF_INET6) {
4812 			if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
4813 			    &sctp_ifap->address.sin6.sin6_addr)) {
4814 				/* found him. */
4815 				if (holds_lock == 0)
4816 					SCTP_IPI_ADDR_RUNLOCK();
4817 				return (sctp_ifap);
4818 				break;
4819 			}
4820 		}
4821 	}
4822 	if (holds_lock == 0)
4823 		SCTP_IPI_ADDR_RUNLOCK();
4824 	return (NULL);
4825 }
4826 
4827 static void
4828 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4829     uint32_t rwnd_req)
4830 {
4831 	/* User pulled some data, do we need a rwnd update? */
4832 	int r_unlocked = 0;
4833 	uint32_t dif, rwnd;
4834 	struct socket *so = NULL;
4835 
4836 	if (stcb == NULL)
4837 		return;
4838 
4839 	atomic_add_int(&stcb->asoc.refcnt, 1);
4840 
4841 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4842 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4843 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4844 		/* Pre-check If we are freeing no update */
4845 		goto no_lock;
4846 	}
4847 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4848 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4849 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4850 		goto out;
4851 	}
4852 	so = stcb->sctp_socket;
4853 	if (so == NULL) {
4854 		goto out;
4855 	}
4856 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4857 	/* Have you have freed enough to look */
4858 	*freed_so_far = 0;
4859 	/* Yep, its worth a look and the lock overhead */
4860 
4861 	/* Figure out what the rwnd would be */
4862 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4863 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4864 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4865 	} else {
4866 		dif = 0;
4867 	}
4868 	if (dif >= rwnd_req) {
4869 		if (hold_rlock) {
4870 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4871 			r_unlocked = 1;
4872 		}
4873 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4874 			/*
4875 			 * One last check before we allow the guy possibly
4876 			 * to get in. There is a race, where the guy has not
4877 			 * reached the gate. In that case
4878 			 */
4879 			goto out;
4880 		}
4881 		SCTP_TCB_LOCK(stcb);
4882 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4883 			/* No reports here */
4884 			SCTP_TCB_UNLOCK(stcb);
4885 			goto out;
4886 		}
4887 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4888 		sctp_send_sack(stcb);
4889 		sctp_chunk_output(stcb->sctp_ep, stcb,
4890 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4891 		/* make sure no timer is running */
4892 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4893 		SCTP_TCB_UNLOCK(stcb);
4894 	} else {
4895 		/* Update how much we have pending */
4896 		stcb->freed_by_sorcv_sincelast = dif;
4897 	}
4898 out:
4899 	if (so && r_unlocked && hold_rlock) {
4900 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4901 	}
4902 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4903 no_lock:
4904 	atomic_add_int(&stcb->asoc.refcnt, -1);
4905 	return;
4906 }
4907 
4908 int
4909 sctp_sorecvmsg(struct socket *so,
4910     struct uio *uio,
4911     struct mbuf **mp,
4912     struct sockaddr *from,
4913     int fromlen,
4914     int *msg_flags,
4915     struct sctp_sndrcvinfo *sinfo,
4916     int filling_sinfo)
4917 {
4918 	/*
4919 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4920 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4921 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4922 	 * On the way out we may send out any combination of:
4923 	 * MSG_NOTIFICATION MSG_EOR
4924 	 *
4925 	 */
4926 	struct sctp_inpcb *inp = NULL;
4927 	int my_len = 0;
4928 	int cp_len = 0, error = 0;
4929 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4930 	struct mbuf *m = NULL, *embuf = NULL;
4931 	struct sctp_tcb *stcb = NULL;
4932 	int wakeup_read_socket = 0;
4933 	int freecnt_applied = 0;
4934 	int out_flags = 0, in_flags = 0;
4935 	int block_allowed = 1;
4936 	uint32_t freed_so_far = 0;
4937 	uint32_t copied_so_far = 0;
4938 	int in_eeor_mode = 0;
4939 	int no_rcv_needed = 0;
4940 	uint32_t rwnd_req = 0;
4941 	int hold_sblock = 0;
4942 	int hold_rlock = 0;
4943 	int slen = 0;
4944 	uint32_t held_length = 0;
4945 	int sockbuf_lock = 0;
4946 
4947 	if (uio == NULL) {
4948 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
4949 		return (EINVAL);
4950 	}
4951 	if (from && fromlen <= 0) {
4952 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
4953 		return (EINVAL);
4954 	}
4955 	if (msg_flags) {
4956 		in_flags = *msg_flags;
4957 		if (in_flags & MSG_PEEK)
4958 			SCTP_STAT_INCR(sctps_read_peeks);
4959 	} else {
4960 		in_flags = 0;
4961 	}
4962 	slen = uio->uio_resid;
4963 
4964 	/* Pull in and set up our int flags */
4965 	if (in_flags & MSG_OOB) {
4966 		/* Out of band's NOT supported */
4967 		return (EOPNOTSUPP);
4968 	}
4969 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
4970 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
4971 		return (EINVAL);
4972 	}
4973 	if ((in_flags & (MSG_DONTWAIT
4974 	    | MSG_NBIO
4975 	    )) ||
4976 	    SCTP_SO_IS_NBIO(so)) {
4977 		block_allowed = 0;
4978 	}
4979 	/* setup the endpoint */
4980 	inp = (struct sctp_inpcb *)so->so_pcb;
4981 	if (inp == NULL) {
4982 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
4983 		return (EFAULT);
4984 	}
4985 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
4986 	/* Must be at least a MTU's worth */
4987 	if (rwnd_req < SCTP_MIN_RWND)
4988 		rwnd_req = SCTP_MIN_RWND;
4989 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
4990 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
4991 		sctp_misc_ints(SCTP_SORECV_ENTER,
4992 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
4993 	}
4994 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
4995 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
4996 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
4997 	}
4998 	error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
4999 	sockbuf_lock = 1;
5000 	if (error) {
5001 		goto release_unlocked;
5002 	}
5003 restart:
5004 
5005 
5006 restart_nosblocks:
5007 	if (hold_sblock == 0) {
5008 		SOCKBUF_LOCK(&so->so_rcv);
5009 		hold_sblock = 1;
5010 	}
5011 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5012 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5013 		goto out;
5014 	}
5015 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5016 		if (so->so_error) {
5017 			error = so->so_error;
5018 			if ((in_flags & MSG_PEEK) == 0)
5019 				so->so_error = 0;
5020 		} else {
5021 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5022 			/* indicate EOF */
5023 			error = 0;
5024 		}
5025 		goto out;
5026 	}
5027 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5028 		/* we need to wait for data */
5029 		if ((so->so_rcv.sb_cc == 0) &&
5030 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5031 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5032 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5033 				/*
5034 				 * For active open side clear flags for
5035 				 * re-use passive open is blocked by
5036 				 * connect.
5037 				 */
5038 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5039 					/*
5040 					 * You were aborted, passive side
5041 					 * always hits here
5042 					 */
5043 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5044 					error = ECONNRESET;
5045 					/*
5046 					 * You get this once if you are
5047 					 * active open side
5048 					 */
5049 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5050 						/*
5051 						 * Remove flag if on the
5052 						 * active open side
5053 						 */
5054 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5055 					}
5056 				}
5057 				so->so_state &= ~(SS_ISCONNECTING |
5058 				    SS_ISDISCONNECTING |
5059 				    SS_ISCONFIRMING |
5060 				    SS_ISCONNECTED);
5061 				if (error == 0) {
5062 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5063 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5064 						error = ENOTCONN;
5065 					} else {
5066 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5067 					}
5068 				}
5069 				goto out;
5070 			}
5071 		}
5072 		error = sbwait(&so->so_rcv);
5073 		if (error) {
5074 			goto out;
5075 		}
5076 		held_length = 0;
5077 		goto restart_nosblocks;
5078 	} else if (so->so_rcv.sb_cc == 0) {
5079 		if (so->so_error) {
5080 			error = so->so_error;
5081 			if ((in_flags & MSG_PEEK) == 0)
5082 				so->so_error = 0;
5083 		} else {
5084 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5085 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5086 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5087 					/*
5088 					 * For active open side clear flags
5089 					 * for re-use passive open is
5090 					 * blocked by connect.
5091 					 */
5092 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5093 						/*
5094 						 * You were aborted, passive
5095 						 * side always hits here
5096 						 */
5097 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5098 						error = ECONNRESET;
5099 						/*
5100 						 * You get this once if you
5101 						 * are active open side
5102 						 */
5103 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5104 							/*
5105 							 * Remove flag if on
5106 							 * the active open
5107 							 * side
5108 							 */
5109 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5110 						}
5111 					}
5112 					so->so_state &= ~(SS_ISCONNECTING |
5113 					    SS_ISDISCONNECTING |
5114 					    SS_ISCONFIRMING |
5115 					    SS_ISCONNECTED);
5116 					if (error == 0) {
5117 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5118 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5119 							error = ENOTCONN;
5120 						} else {
5121 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5122 						}
5123 					}
5124 					goto out;
5125 				}
5126 			}
5127 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5128 			error = EWOULDBLOCK;
5129 		}
5130 		goto out;
5131 	}
5132 	if (hold_sblock == 1) {
5133 		SOCKBUF_UNLOCK(&so->so_rcv);
5134 		hold_sblock = 0;
5135 	}
5136 	/* we possibly have data we can read */
5137 	/* sa_ignore FREED_MEMORY */
5138 	control = TAILQ_FIRST(&inp->read_queue);
5139 	if (control == NULL) {
5140 		/*
5141 		 * This could be happening since the appender did the
5142 		 * increment but as not yet did the tailq insert onto the
5143 		 * read_queue
5144 		 */
5145 		if (hold_rlock == 0) {
5146 			SCTP_INP_READ_LOCK(inp);
5147 			hold_rlock = 1;
5148 		}
5149 		control = TAILQ_FIRST(&inp->read_queue);
5150 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5151 #ifdef INVARIANTS
5152 			panic("Huh, its non zero and nothing on control?");
5153 #endif
5154 			so->so_rcv.sb_cc = 0;
5155 		}
5156 		SCTP_INP_READ_UNLOCK(inp);
5157 		hold_rlock = 0;
5158 		goto restart;
5159 	}
5160 	if ((control->length == 0) &&
5161 	    (control->do_not_ref_stcb)) {
5162 		/*
5163 		 * Clean up code for freeing assoc that left behind a
5164 		 * pdapi.. maybe a peer in EEOR that just closed after
5165 		 * sending and never indicated a EOR.
5166 		 */
5167 		if (hold_rlock == 0) {
5168 			hold_rlock = 1;
5169 			SCTP_INP_READ_LOCK(inp);
5170 		}
5171 		control->held_length = 0;
5172 		if (control->data) {
5173 			/* Hmm there is data here .. fix */
5174 			struct mbuf *m_tmp;
5175 			int cnt = 0;
5176 
5177 			m_tmp = control->data;
5178 			while (m_tmp) {
5179 				cnt += SCTP_BUF_LEN(m_tmp);
5180 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5181 					control->tail_mbuf = m_tmp;
5182 					control->end_added = 1;
5183 				}
5184 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5185 			}
5186 			control->length = cnt;
5187 		} else {
5188 			/* remove it */
5189 			TAILQ_REMOVE(&inp->read_queue, control, next);
5190 			/* Add back any hiddend data */
5191 			sctp_free_remote_addr(control->whoFrom);
5192 			sctp_free_a_readq(stcb, control);
5193 		}
5194 		if (hold_rlock) {
5195 			hold_rlock = 0;
5196 			SCTP_INP_READ_UNLOCK(inp);
5197 		}
5198 		goto restart;
5199 	}
5200 	if (control->length == 0) {
5201 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5202 		    (filling_sinfo)) {
5203 			/* find a more suitable one then this */
5204 			ctl = TAILQ_NEXT(control, next);
5205 			while (ctl) {
5206 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5207 				    (ctl->some_taken ||
5208 				    (ctl->spec_flags & M_NOTIFICATION) ||
5209 				    ((ctl->do_not_ref_stcb == 0) &&
5210 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5211 				    ) {
5212 					/*-
5213 					 * If we have a different TCB next, and there is data
5214 					 * present. If we have already taken some (pdapi), OR we can
5215 					 * ref the tcb and no delivery as started on this stream, we
5216 					 * take it. Note we allow a notification on a different
5217 					 * assoc to be delivered..
5218 					 */
5219 					control = ctl;
5220 					goto found_one;
5221 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5222 					    (ctl->length) &&
5223 					    ((ctl->some_taken) ||
5224 					    ((ctl->do_not_ref_stcb == 0) &&
5225 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5226 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5227 				    ) {
5228 					/*-
5229 					 * If we have the same tcb, and there is data present, and we
5230 					 * have the strm interleave feature present. Then if we have
5231 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5232 					 * not started a delivery for this stream, we can take it.
5233 					 * Note we do NOT allow a notificaiton on the same assoc to
5234 					 * be delivered.
5235 					 */
5236 					control = ctl;
5237 					goto found_one;
5238 				}
5239 				ctl = TAILQ_NEXT(ctl, next);
5240 			}
5241 		}
5242 		/*
5243 		 * if we reach here, not suitable replacement is available
5244 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5245 		 * into the our held count, and its time to sleep again.
5246 		 */
5247 		held_length = so->so_rcv.sb_cc;
5248 		control->held_length = so->so_rcv.sb_cc;
5249 		goto restart;
5250 	}
5251 	/* Clear the held length since there is something to read */
5252 	control->held_length = 0;
5253 	if (hold_rlock) {
5254 		SCTP_INP_READ_UNLOCK(inp);
5255 		hold_rlock = 0;
5256 	}
5257 found_one:
5258 	/*
5259 	 * If we reach here, control has a some data for us to read off.
5260 	 * Note that stcb COULD be NULL.
5261 	 */
5262 	control->some_taken = 1;
5263 	if (hold_sblock) {
5264 		SOCKBUF_UNLOCK(&so->so_rcv);
5265 		hold_sblock = 0;
5266 	}
5267 	stcb = control->stcb;
5268 	if (stcb) {
5269 		if ((control->do_not_ref_stcb == 0) &&
5270 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5271 			if (freecnt_applied == 0)
5272 				stcb = NULL;
5273 		} else if (control->do_not_ref_stcb == 0) {
5274 			/* you can't free it on me please */
5275 			/*
5276 			 * The lock on the socket buffer protects us so the
5277 			 * free code will stop. But since we used the
5278 			 * socketbuf lock and the sender uses the tcb_lock
5279 			 * to increment, we need to use the atomic add to
5280 			 * the refcnt
5281 			 */
5282 			if (freecnt_applied) {
5283 #ifdef INVARIANTS
5284 				panic("refcnt already incremented");
5285 #else
5286 				printf("refcnt already incremented?\n");
5287 #endif
5288 			} else {
5289 				atomic_add_int(&stcb->asoc.refcnt, 1);
5290 				freecnt_applied = 1;
5291 			}
5292 			/*
5293 			 * Setup to remember how much we have not yet told
5294 			 * the peer our rwnd has opened up. Note we grab the
5295 			 * value from the tcb from last time. Note too that
5296 			 * sack sending clears this when a sack is sent,
5297 			 * which is fine. Once we hit the rwnd_req, we then
5298 			 * will go to the sctp_user_rcvd() that will not
5299 			 * lock until it KNOWs it MUST send a WUP-SACK.
5300 			 */
5301 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5302 			stcb->freed_by_sorcv_sincelast = 0;
5303 		}
5304 	}
5305 	if (stcb &&
5306 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5307 	    control->do_not_ref_stcb == 0) {
5308 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5309 	}
5310 	/* First lets get off the sinfo and sockaddr info */
5311 	if ((sinfo) && filling_sinfo) {
5312 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5313 		nxt = TAILQ_NEXT(control, next);
5314 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5315 			struct sctp_extrcvinfo *s_extra;
5316 
5317 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5318 			if ((nxt) &&
5319 			    (nxt->length)) {
5320 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5321 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5322 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5323 				}
5324 				if (nxt->spec_flags & M_NOTIFICATION) {
5325 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5326 				}
5327 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5328 				s_extra->sreinfo_next_length = nxt->length;
5329 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5330 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5331 				if (nxt->tail_mbuf != NULL) {
5332 					if (nxt->end_added) {
5333 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5334 					}
5335 				}
5336 			} else {
5337 				/*
5338 				 * we explicitly 0 this, since the memcpy
5339 				 * got some other things beyond the older
5340 				 * sinfo_ that is on the control's structure
5341 				 * :-D
5342 				 */
5343 				nxt = NULL;
5344 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5345 				s_extra->sreinfo_next_aid = 0;
5346 				s_extra->sreinfo_next_length = 0;
5347 				s_extra->sreinfo_next_ppid = 0;
5348 				s_extra->sreinfo_next_stream = 0;
5349 			}
5350 		}
5351 		/*
5352 		 * update off the real current cum-ack, if we have an stcb.
5353 		 */
5354 		if ((control->do_not_ref_stcb == 0) && stcb)
5355 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5356 		/*
5357 		 * mask off the high bits, we keep the actual chunk bits in
5358 		 * there.
5359 		 */
5360 		sinfo->sinfo_flags &= 0x00ff;
5361 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5362 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5363 		}
5364 	}
5365 #ifdef SCTP_ASOCLOG_OF_TSNS
5366 	{
5367 		int index, newindex;
5368 		struct sctp_pcbtsn_rlog *entry;
5369 
5370 		do {
5371 			index = inp->readlog_index;
5372 			newindex = index + 1;
5373 			if (newindex >= SCTP_READ_LOG_SIZE) {
5374 				newindex = 0;
5375 			}
5376 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5377 		entry = &inp->readlog[index];
5378 		entry->vtag = control->sinfo_assoc_id;
5379 		entry->strm = control->sinfo_stream;
5380 		entry->seq = control->sinfo_ssn;
5381 		entry->sz = control->length;
5382 		entry->flgs = control->sinfo_flags;
5383 	}
5384 #endif
5385 	if (fromlen && from) {
5386 		struct sockaddr *to;
5387 
5388 #ifdef INET
5389 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5390 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5391 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5392 #else
5393 		/* No AF_INET use AF_INET6 */
5394 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5395 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5396 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5397 #endif
5398 
5399 		to = from;
5400 #if defined(INET) && defined(INET6)
5401 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
5402 		    (to->sa_family == AF_INET) &&
5403 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5404 			struct sockaddr_in *sin;
5405 			struct sockaddr_in6 sin6;
5406 
5407 			sin = (struct sockaddr_in *)to;
5408 			bzero(&sin6, sizeof(sin6));
5409 			sin6.sin6_family = AF_INET6;
5410 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5411 			sin6.sin6_addr.s6_addr16[2] = 0xffff;
5412 			bcopy(&sin->sin_addr,
5413 			    &sin6.sin6_addr.s6_addr16[3],
5414 			    sizeof(sin6.sin6_addr.s6_addr16[3]));
5415 			sin6.sin6_port = sin->sin_port;
5416 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5417 		}
5418 #endif
5419 #if defined(INET6)
5420 		{
5421 			struct sockaddr_in6 lsa6, *to6;
5422 
5423 			to6 = (struct sockaddr_in6 *)to;
5424 			sctp_recover_scope_mac(to6, (&lsa6));
5425 		}
5426 #endif
5427 	}
5428 	/* now copy out what data we can */
5429 	if (mp == NULL) {
5430 		/* copy out each mbuf in the chain up to length */
5431 get_more_data:
5432 		m = control->data;
5433 		while (m) {
5434 			/* Move out all we can */
5435 			cp_len = (int)uio->uio_resid;
5436 			my_len = (int)SCTP_BUF_LEN(m);
5437 			if (cp_len > my_len) {
5438 				/* not enough in this buf */
5439 				cp_len = my_len;
5440 			}
5441 			if (hold_rlock) {
5442 				SCTP_INP_READ_UNLOCK(inp);
5443 				hold_rlock = 0;
5444 			}
5445 			if (cp_len > 0)
5446 				error = uiomove(mtod(m, char *), cp_len, uio);
5447 			/* re-read */
5448 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5449 				goto release;
5450 			}
5451 			if ((control->do_not_ref_stcb == 0) && stcb &&
5452 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5453 				no_rcv_needed = 1;
5454 			}
5455 			if (error) {
5456 				/* error we are out of here */
5457 				goto release;
5458 			}
5459 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5460 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5461 			    ((control->end_added == 0) ||
5462 			    (control->end_added &&
5463 			    (TAILQ_NEXT(control, next) == NULL)))
5464 			    ) {
5465 				SCTP_INP_READ_LOCK(inp);
5466 				hold_rlock = 1;
5467 			}
5468 			if (cp_len == SCTP_BUF_LEN(m)) {
5469 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5470 				    (control->end_added)) {
5471 					out_flags |= MSG_EOR;
5472 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5473 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5474 				}
5475 				if (control->spec_flags & M_NOTIFICATION) {
5476 					out_flags |= MSG_NOTIFICATION;
5477 				}
5478 				/* we ate up the mbuf */
5479 				if (in_flags & MSG_PEEK) {
5480 					/* just looking */
5481 					m = SCTP_BUF_NEXT(m);
5482 					copied_so_far += cp_len;
5483 				} else {
5484 					/* dispose of the mbuf */
5485 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5486 						sctp_sblog(&so->so_rcv,
5487 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5488 					}
5489 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5490 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5491 						sctp_sblog(&so->so_rcv,
5492 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5493 					}
5494 					embuf = m;
5495 					copied_so_far += cp_len;
5496 					freed_so_far += cp_len;
5497 					freed_so_far += MSIZE;
5498 					atomic_subtract_int(&control->length, cp_len);
5499 					control->data = sctp_m_free(m);
5500 					m = control->data;
5501 					/*
5502 					 * been through it all, must hold sb
5503 					 * lock ok to null tail
5504 					 */
5505 					if (control->data == NULL) {
5506 #ifdef INVARIANTS
5507 						if ((control->end_added == 0) ||
5508 						    (TAILQ_NEXT(control, next) == NULL)) {
5509 							/*
5510 							 * If the end is not
5511 							 * added, OR the
5512 							 * next is NOT null
5513 							 * we MUST have the
5514 							 * lock.
5515 							 */
5516 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5517 								panic("Hmm we don't own the lock?");
5518 							}
5519 						}
5520 #endif
5521 						control->tail_mbuf = NULL;
5522 #ifdef INVARIANTS
5523 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5524 							panic("end_added, nothing left and no MSG_EOR");
5525 						}
5526 #endif
5527 					}
5528 				}
5529 			} else {
5530 				/* Do we need to trim the mbuf? */
5531 				if (control->spec_flags & M_NOTIFICATION) {
5532 					out_flags |= MSG_NOTIFICATION;
5533 				}
5534 				if ((in_flags & MSG_PEEK) == 0) {
5535 					SCTP_BUF_RESV_UF(m, cp_len);
5536 					SCTP_BUF_LEN(m) -= cp_len;
5537 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5538 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5539 					}
5540 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5541 					if ((control->do_not_ref_stcb == 0) &&
5542 					    stcb) {
5543 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5544 					}
5545 					copied_so_far += cp_len;
5546 					embuf = m;
5547 					freed_so_far += cp_len;
5548 					freed_so_far += MSIZE;
5549 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5550 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5551 						    SCTP_LOG_SBRESULT, 0);
5552 					}
5553 					atomic_subtract_int(&control->length, cp_len);
5554 				} else {
5555 					copied_so_far += cp_len;
5556 				}
5557 			}
5558 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5559 				break;
5560 			}
5561 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5562 			    (control->do_not_ref_stcb == 0) &&
5563 			    (freed_so_far >= rwnd_req)) {
5564 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5565 			}
5566 		}		/* end while(m) */
5567 		/*
5568 		 * At this point we have looked at it all and we either have
5569 		 * a MSG_EOR/or read all the user wants... <OR>
5570 		 * control->length == 0.
5571 		 */
5572 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5573 			/* we are done with this control */
5574 			if (control->length == 0) {
5575 				if (control->data) {
5576 #ifdef INVARIANTS
5577 					panic("control->data not null at read eor?");
5578 #else
5579 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5580 					sctp_m_freem(control->data);
5581 					control->data = NULL;
5582 #endif
5583 				}
5584 		done_with_control:
5585 				if (TAILQ_NEXT(control, next) == NULL) {
5586 					/*
5587 					 * If we don't have a next we need a
5588 					 * lock, if there is a next
5589 					 * interrupt is filling ahead of us
5590 					 * and we don't need a lock to
5591 					 * remove this guy (which is the
5592 					 * head of the queue).
5593 					 */
5594 					if (hold_rlock == 0) {
5595 						SCTP_INP_READ_LOCK(inp);
5596 						hold_rlock = 1;
5597 					}
5598 				}
5599 				TAILQ_REMOVE(&inp->read_queue, control, next);
5600 				/* Add back any hiddend data */
5601 				if (control->held_length) {
5602 					held_length = 0;
5603 					control->held_length = 0;
5604 					wakeup_read_socket = 1;
5605 				}
5606 				if (control->aux_data) {
5607 					sctp_m_free(control->aux_data);
5608 					control->aux_data = NULL;
5609 				}
5610 				no_rcv_needed = control->do_not_ref_stcb;
5611 				sctp_free_remote_addr(control->whoFrom);
5612 				control->data = NULL;
5613 				sctp_free_a_readq(stcb, control);
5614 				control = NULL;
5615 				if ((freed_so_far >= rwnd_req) &&
5616 				    (no_rcv_needed == 0))
5617 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5618 
5619 			} else {
5620 				/*
5621 				 * The user did not read all of this
5622 				 * message, turn off the returned MSG_EOR
5623 				 * since we are leaving more behind on the
5624 				 * control to read.
5625 				 */
5626 #ifdef INVARIANTS
5627 				if (control->end_added &&
5628 				    (control->data == NULL) &&
5629 				    (control->tail_mbuf == NULL)) {
5630 					panic("Gak, control->length is corrupt?");
5631 				}
5632 #endif
5633 				no_rcv_needed = control->do_not_ref_stcb;
5634 				out_flags &= ~MSG_EOR;
5635 			}
5636 		}
5637 		if (out_flags & MSG_EOR) {
5638 			goto release;
5639 		}
5640 		if ((uio->uio_resid == 0) ||
5641 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5642 		    ) {
5643 			goto release;
5644 		}
5645 		/*
5646 		 * If I hit here the receiver wants more and this message is
5647 		 * NOT done (pd-api). So two questions. Can we block? if not
5648 		 * we are done. Did the user NOT set MSG_WAITALL?
5649 		 */
5650 		if (block_allowed == 0) {
5651 			goto release;
5652 		}
5653 		/*
5654 		 * We need to wait for more data a few things: - We don't
5655 		 * sbunlock() so we don't get someone else reading. - We
5656 		 * must be sure to account for the case where what is added
5657 		 * is NOT to our control when we wakeup.
5658 		 */
5659 
5660 		/*
5661 		 * Do we need to tell the transport a rwnd update might be
5662 		 * needed before we go to sleep?
5663 		 */
5664 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5665 		    ((freed_so_far >= rwnd_req) &&
5666 		    (control->do_not_ref_stcb == 0) &&
5667 		    (no_rcv_needed == 0))) {
5668 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5669 		}
5670 wait_some_more:
5671 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5672 			goto release;
5673 		}
5674 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5675 			goto release;
5676 
5677 		if (hold_rlock == 1) {
5678 			SCTP_INP_READ_UNLOCK(inp);
5679 			hold_rlock = 0;
5680 		}
5681 		if (hold_sblock == 0) {
5682 			SOCKBUF_LOCK(&so->so_rcv);
5683 			hold_sblock = 1;
5684 		}
5685 		if ((copied_so_far) && (control->length == 0) &&
5686 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5687 		    ) {
5688 			goto release;
5689 		}
5690 		if (so->so_rcv.sb_cc <= control->held_length) {
5691 			error = sbwait(&so->so_rcv);
5692 			if (error) {
5693 				goto release;
5694 			}
5695 			control->held_length = 0;
5696 		}
5697 		if (hold_sblock) {
5698 			SOCKBUF_UNLOCK(&so->so_rcv);
5699 			hold_sblock = 0;
5700 		}
5701 		if (control->length == 0) {
5702 			/* still nothing here */
5703 			if (control->end_added == 1) {
5704 				/* he aborted, or is done i.e.did a shutdown */
5705 				out_flags |= MSG_EOR;
5706 				if (control->pdapi_aborted) {
5707 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5708 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5709 
5710 					out_flags |= MSG_TRUNC;
5711 				} else {
5712 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5713 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5714 				}
5715 				goto done_with_control;
5716 			}
5717 			if (so->so_rcv.sb_cc > held_length) {
5718 				control->held_length = so->so_rcv.sb_cc;
5719 				held_length = 0;
5720 			}
5721 			goto wait_some_more;
5722 		} else if (control->data == NULL) {
5723 			/*
5724 			 * we must re-sync since data is probably being
5725 			 * added
5726 			 */
5727 			SCTP_INP_READ_LOCK(inp);
5728 			if ((control->length > 0) && (control->data == NULL)) {
5729 				/*
5730 				 * big trouble.. we have the lock and its
5731 				 * corrupt?
5732 				 */
5733 				panic("Impossible data==NULL length !=0");
5734 			}
5735 			SCTP_INP_READ_UNLOCK(inp);
5736 			/* We will fall around to get more data */
5737 		}
5738 		goto get_more_data;
5739 	} else {
5740 		/*-
5741 		 * Give caller back the mbuf chain,
5742 		 * store in uio_resid the length
5743 		 */
5744 		wakeup_read_socket = 0;
5745 		if ((control->end_added == 0) ||
5746 		    (TAILQ_NEXT(control, next) == NULL)) {
5747 			/* Need to get rlock */
5748 			if (hold_rlock == 0) {
5749 				SCTP_INP_READ_LOCK(inp);
5750 				hold_rlock = 1;
5751 			}
5752 		}
5753 		if (control->end_added) {
5754 			out_flags |= MSG_EOR;
5755 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5756 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5757 		}
5758 		if (control->spec_flags & M_NOTIFICATION) {
5759 			out_flags |= MSG_NOTIFICATION;
5760 		}
5761 		uio->uio_resid = control->length;
5762 		*mp = control->data;
5763 		m = control->data;
5764 		while (m) {
5765 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5766 				sctp_sblog(&so->so_rcv,
5767 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5768 			}
5769 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5770 			freed_so_far += SCTP_BUF_LEN(m);
5771 			freed_so_far += MSIZE;
5772 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5773 				sctp_sblog(&so->so_rcv,
5774 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5775 			}
5776 			m = SCTP_BUF_NEXT(m);
5777 		}
5778 		control->data = control->tail_mbuf = NULL;
5779 		control->length = 0;
5780 		if (out_flags & MSG_EOR) {
5781 			/* Done with this control */
5782 			goto done_with_control;
5783 		}
5784 	}
5785 release:
5786 	if (hold_rlock == 1) {
5787 		SCTP_INP_READ_UNLOCK(inp);
5788 		hold_rlock = 0;
5789 	}
5790 	if (hold_sblock == 1) {
5791 		SOCKBUF_UNLOCK(&so->so_rcv);
5792 		hold_sblock = 0;
5793 	}
5794 	sbunlock(&so->so_rcv);
5795 	sockbuf_lock = 0;
5796 
5797 release_unlocked:
5798 	if (hold_sblock) {
5799 		SOCKBUF_UNLOCK(&so->so_rcv);
5800 		hold_sblock = 0;
5801 	}
5802 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5803 		if ((freed_so_far >= rwnd_req) &&
5804 		    (control && (control->do_not_ref_stcb == 0)) &&
5805 		    (no_rcv_needed == 0))
5806 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5807 	}
5808 	if (msg_flags)
5809 		*msg_flags |= out_flags;
5810 out:
5811 	if (((out_flags & MSG_EOR) == 0) &&
5812 	    ((in_flags & MSG_PEEK) == 0) &&
5813 	    (sinfo) &&
5814 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5815 		struct sctp_extrcvinfo *s_extra;
5816 
5817 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5818 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5819 	}
5820 	if (hold_rlock == 1) {
5821 		SCTP_INP_READ_UNLOCK(inp);
5822 		hold_rlock = 0;
5823 	}
5824 	if (hold_sblock) {
5825 		SOCKBUF_UNLOCK(&so->so_rcv);
5826 		hold_sblock = 0;
5827 	}
5828 	if (sockbuf_lock) {
5829 		sbunlock(&so->so_rcv);
5830 	}
5831 	if (freecnt_applied) {
5832 		/*
5833 		 * The lock on the socket buffer protects us so the free
5834 		 * code will stop. But since we used the socketbuf lock and
5835 		 * the sender uses the tcb_lock to increment, we need to use
5836 		 * the atomic add to the refcnt.
5837 		 */
5838 		if (stcb == NULL) {
5839 			panic("stcb for refcnt has gone NULL?");
5840 		}
5841 		atomic_add_int(&stcb->asoc.refcnt, -1);
5842 		freecnt_applied = 0;
5843 		/* Save the value back for next time */
5844 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5845 	}
5846 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
5847 		if (stcb) {
5848 			sctp_misc_ints(SCTP_SORECV_DONE,
5849 			    freed_so_far,
5850 			    ((uio) ? (slen - uio->uio_resid) : slen),
5851 			    stcb->asoc.my_rwnd,
5852 			    so->so_rcv.sb_cc);
5853 		} else {
5854 			sctp_misc_ints(SCTP_SORECV_DONE,
5855 			    freed_so_far,
5856 			    ((uio) ? (slen - uio->uio_resid) : slen),
5857 			    0,
5858 			    so->so_rcv.sb_cc);
5859 		}
5860 	}
5861 	if (wakeup_read_socket) {
5862 		sctp_sorwakeup(inp, so);
5863 	}
5864 	return (error);
5865 }
5866 
5867 
5868 #ifdef SCTP_MBUF_LOGGING
5869 struct mbuf *
5870 sctp_m_free(struct mbuf *m)
5871 {
5872 	if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
5873 		if (SCTP_BUF_IS_EXTENDED(m)) {
5874 			sctp_log_mb(m, SCTP_MBUF_IFREE);
5875 		}
5876 	}
5877 	return (m_free(m));
5878 }
5879 
5880 void
5881 sctp_m_freem(struct mbuf *mb)
5882 {
5883 	while (mb != NULL)
5884 		mb = sctp_m_free(mb);
5885 }
5886 
5887 #endif
5888 
5889 int
5890 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5891 {
5892 	/*
5893 	 * Given a local address. For all associations that holds the
5894 	 * address, request a peer-set-primary.
5895 	 */
5896 	struct sctp_ifa *ifa;
5897 	struct sctp_laddr *wi;
5898 
5899 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5900 	if (ifa == NULL) {
5901 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
5902 		return (EADDRNOTAVAIL);
5903 	}
5904 	/*
5905 	 * Now that we have the ifa we must awaken the iterator with this
5906 	 * message.
5907 	 */
5908 	wi = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
5909 	if (wi == NULL) {
5910 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
5911 		return (ENOMEM);
5912 	}
5913 	/* Now incr the count and int wi structure */
5914 	SCTP_INCR_LADDR_COUNT();
5915 	bzero(wi, sizeof(*wi));
5916 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
5917 	wi->ifa = ifa;
5918 	wi->action = SCTP_SET_PRIM_ADDR;
5919 	atomic_add_int(&ifa->refcount, 1);
5920 
5921 	/* Now add it to the work queue */
5922 	SCTP_IPI_ITERATOR_WQ_LOCK();
5923 	/*
5924 	 * Should this really be a tailq? As it is we will process the
5925 	 * newest first :-0
5926 	 */
5927 	LIST_INSERT_HEAD(&sctppcbinfo.addr_wq, wi, sctp_nxt_addr);
5928 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
5929 	    (struct sctp_inpcb *)NULL,
5930 	    (struct sctp_tcb *)NULL,
5931 	    (struct sctp_nets *)NULL);
5932 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
5933 	return (0);
5934 }
5935 
5936 
5937 
5938 
5939 int
5940 sctp_soreceive(struct socket *so,
5941     struct sockaddr **psa,
5942     struct uio *uio,
5943     struct mbuf **mp0,
5944     struct mbuf **controlp,
5945     int *flagsp)
5946 {
5947 	int error, fromlen;
5948 	uint8_t sockbuf[256];
5949 	struct sockaddr *from;
5950 	struct sctp_extrcvinfo sinfo;
5951 	int filling_sinfo = 1;
5952 	struct sctp_inpcb *inp;
5953 
5954 	inp = (struct sctp_inpcb *)so->so_pcb;
5955 	/* pickup the assoc we are reading from */
5956 	if (inp == NULL) {
5957 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5958 		return (EINVAL);
5959 	}
5960 	if ((sctp_is_feature_off(inp,
5961 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
5962 	    (controlp == NULL)) {
5963 		/* user does not want the sndrcv ctl */
5964 		filling_sinfo = 0;
5965 	}
5966 	if (psa) {
5967 		from = (struct sockaddr *)sockbuf;
5968 		fromlen = sizeof(sockbuf);
5969 		from->sa_len = 0;
5970 	} else {
5971 		from = NULL;
5972 		fromlen = 0;
5973 	}
5974 
5975 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
5976 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
5977 	if ((controlp) && (filling_sinfo)) {
5978 		/* copy back the sinfo in a CMSG format */
5979 		if (filling_sinfo)
5980 			*controlp = sctp_build_ctl_nchunk(inp,
5981 			    (struct sctp_sndrcvinfo *)&sinfo);
5982 		else
5983 			*controlp = NULL;
5984 	}
5985 	if (psa) {
5986 		/* copy back the address info */
5987 		if (from && from->sa_len) {
5988 			*psa = sodupsockaddr(from, M_NOWAIT);
5989 		} else {
5990 			*psa = NULL;
5991 		}
5992 	}
5993 	return (error);
5994 }
5995 
5996 
5997 int
5998 sctp_l_soreceive(struct socket *so,
5999     struct sockaddr **name,
6000     struct uio *uio,
6001     char **controlp,
6002     int *controllen,
6003     int *flag)
6004 {
6005 	int error, fromlen;
6006 	uint8_t sockbuf[256];
6007 	struct sockaddr *from;
6008 	struct sctp_extrcvinfo sinfo;
6009 	int filling_sinfo = 1;
6010 	struct sctp_inpcb *inp;
6011 
6012 	inp = (struct sctp_inpcb *)so->so_pcb;
6013 	/* pickup the assoc we are reading from */
6014 	if (inp == NULL) {
6015 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6016 		return (EINVAL);
6017 	}
6018 	if ((sctp_is_feature_off(inp,
6019 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6020 	    (controlp == NULL)) {
6021 		/* user does not want the sndrcv ctl */
6022 		filling_sinfo = 0;
6023 	}
6024 	if (name) {
6025 		from = (struct sockaddr *)sockbuf;
6026 		fromlen = sizeof(sockbuf);
6027 		from->sa_len = 0;
6028 	} else {
6029 		from = NULL;
6030 		fromlen = 0;
6031 	}
6032 
6033 	error = sctp_sorecvmsg(so, uio,
6034 	    (struct mbuf **)NULL,
6035 	    from, fromlen, flag,
6036 	    (struct sctp_sndrcvinfo *)&sinfo,
6037 	    filling_sinfo);
6038 	if ((controlp) && (filling_sinfo)) {
6039 		/*
6040 		 * copy back the sinfo in a CMSG format note that the caller
6041 		 * has reponsibility for freeing the memory.
6042 		 */
6043 		if (filling_sinfo)
6044 			*controlp = sctp_build_ctl_cchunk(inp,
6045 			    controllen,
6046 			    (struct sctp_sndrcvinfo *)&sinfo);
6047 	}
6048 	if (name) {
6049 		/* copy back the address info */
6050 		if (from && from->sa_len) {
6051 			*name = sodupsockaddr(from, M_WAIT);
6052 		} else {
6053 			*name = NULL;
6054 		}
6055 	}
6056 	return (error);
6057 }
6058 
6059 
6060 
6061 
6062 
6063 
6064 
6065 int
6066 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6067     int totaddr, int *error)
6068 {
6069 	int added = 0;
6070 	int i;
6071 	struct sctp_inpcb *inp;
6072 	struct sockaddr *sa;
6073 	size_t incr = 0;
6074 
6075 	sa = addr;
6076 	inp = stcb->sctp_ep;
6077 	*error = 0;
6078 	for (i = 0; i < totaddr; i++) {
6079 		if (sa->sa_family == AF_INET) {
6080 			incr = sizeof(struct sockaddr_in);
6081 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6082 				/* assoc gone no un-lock */
6083 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6084 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6085 				*error = ENOBUFS;
6086 				goto out_now;
6087 			}
6088 			added++;
6089 		} else if (sa->sa_family == AF_INET6) {
6090 			incr = sizeof(struct sockaddr_in6);
6091 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6092 				/* assoc gone no un-lock */
6093 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6094 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6095 				*error = ENOBUFS;
6096 				goto out_now;
6097 			}
6098 			added++;
6099 		}
6100 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6101 	}
6102 out_now:
6103 	return (added);
6104 }
6105 
6106 struct sctp_tcb *
6107 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6108     int *totaddr, int *num_v4, int *num_v6, int *error,
6109     int limit, int *bad_addr)
6110 {
6111 	struct sockaddr *sa;
6112 	struct sctp_tcb *stcb = NULL;
6113 	size_t incr, at, i;
6114 
6115 	at = incr = 0;
6116 	sa = addr;
6117 	*error = *num_v6 = *num_v4 = 0;
6118 	/* account and validate addresses */
6119 	for (i = 0; i < (size_t)*totaddr; i++) {
6120 		if (sa->sa_family == AF_INET) {
6121 			(*num_v4) += 1;
6122 			incr = sizeof(struct sockaddr_in);
6123 			if (sa->sa_len != incr) {
6124 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6125 				*error = EINVAL;
6126 				*bad_addr = 1;
6127 				return (NULL);
6128 			}
6129 		} else if (sa->sa_family == AF_INET6) {
6130 			struct sockaddr_in6 *sin6;
6131 
6132 			sin6 = (struct sockaddr_in6 *)sa;
6133 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6134 				/* Must be non-mapped for connectx */
6135 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6136 				*error = EINVAL;
6137 				*bad_addr = 1;
6138 				return (NULL);
6139 			}
6140 			(*num_v6) += 1;
6141 			incr = sizeof(struct sockaddr_in6);
6142 			if (sa->sa_len != incr) {
6143 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6144 				*error = EINVAL;
6145 				*bad_addr = 1;
6146 				return (NULL);
6147 			}
6148 		} else {
6149 			*totaddr = i;
6150 			/* we are done */
6151 			break;
6152 		}
6153 		SCTP_INP_INCR_REF(inp);
6154 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6155 		if (stcb != NULL) {
6156 			/* Already have or am bring up an association */
6157 			return (stcb);
6158 		} else {
6159 			SCTP_INP_DECR_REF(inp);
6160 		}
6161 		if ((at + incr) > (size_t)limit) {
6162 			*totaddr = i;
6163 			break;
6164 		}
6165 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6166 	}
6167 	return ((struct sctp_tcb *)NULL);
6168 }
6169 
6170 /*
6171  * sctp_bindx(ADD) for one address.
6172  * assumes all arguments are valid/checked by caller.
6173  */
6174 void
6175 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6176     struct sockaddr *sa, sctp_assoc_t assoc_id,
6177     uint32_t vrf_id, int *error, void *p)
6178 {
6179 	struct sockaddr *addr_touse;
6180 	struct sockaddr_in sin;
6181 
6182 	/* see if we're bound all already! */
6183 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6184 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6185 		*error = EINVAL;
6186 		return;
6187 	}
6188 	addr_touse = sa;
6189 #if defined(INET6)
6190 	if (sa->sa_family == AF_INET6) {
6191 		struct sockaddr_in6 *sin6;
6192 
6193 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6194 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6195 			*error = EINVAL;
6196 			return;
6197 		}
6198 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6199 			/* can only bind v6 on PF_INET6 sockets */
6200 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6201 			*error = EINVAL;
6202 			return;
6203 		}
6204 		sin6 = (struct sockaddr_in6 *)addr_touse;
6205 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6206 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6207 			    SCTP_IPV6_V6ONLY(inp)) {
6208 				/* can't bind v4-mapped on PF_INET sockets */
6209 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6210 				*error = EINVAL;
6211 				return;
6212 			}
6213 			in6_sin6_2_sin(&sin, sin6);
6214 			addr_touse = (struct sockaddr *)&sin;
6215 		}
6216 	}
6217 #endif
6218 	if (sa->sa_family == AF_INET) {
6219 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6220 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6221 			*error = EINVAL;
6222 			return;
6223 		}
6224 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6225 		    SCTP_IPV6_V6ONLY(inp)) {
6226 			/* can't bind v4 on PF_INET sockets */
6227 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6228 			*error = EINVAL;
6229 			return;
6230 		}
6231 	}
6232 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6233 		if (p == NULL) {
6234 			/* Can't get proc for Net/Open BSD */
6235 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6236 			*error = EINVAL;
6237 			return;
6238 		}
6239 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6240 		return;
6241 	}
6242 	/*
6243 	 * No locks required here since bind and mgmt_ep_sa all do their own
6244 	 * locking. If we do something for the FIX: below we may need to
6245 	 * lock in that case.
6246 	 */
6247 	if (assoc_id == 0) {
6248 		/* add the address */
6249 		struct sctp_inpcb *lep;
6250 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6251 
6252 		/* validate the incoming port */
6253 		if ((lsin->sin_port != 0) &&
6254 		    (lsin->sin_port != inp->sctp_lport)) {
6255 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6256 			*error = EINVAL;
6257 			return;
6258 		} else {
6259 			/* user specified 0 port, set it to existing port */
6260 			lsin->sin_port = inp->sctp_lport;
6261 		}
6262 
6263 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6264 		if (lep != NULL) {
6265 			/*
6266 			 * We must decrement the refcount since we have the
6267 			 * ep already and are binding. No remove going on
6268 			 * here.
6269 			 */
6270 			SCTP_INP_DECR_REF(inp);
6271 		}
6272 		if (lep == inp) {
6273 			/* already bound to it.. ok */
6274 			return;
6275 		} else if (lep == NULL) {
6276 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6277 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6278 			    SCTP_ADD_IP_ADDRESS,
6279 			    vrf_id, NULL);
6280 		} else {
6281 			*error = EADDRINUSE;
6282 		}
6283 		if (*error)
6284 			return;
6285 	} else {
6286 		/*
6287 		 * FIX: decide whether we allow assoc based bindx
6288 		 */
6289 	}
6290 }
6291 
6292 /*
6293  * sctp_bindx(DELETE) for one address.
6294  * assumes all arguments are valid/checked by caller.
6295  */
6296 void
6297 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6298     struct sockaddr *sa, sctp_assoc_t assoc_id,
6299     uint32_t vrf_id, int *error)
6300 {
6301 	struct sockaddr *addr_touse;
6302 	struct sockaddr_in sin;
6303 
6304 	/* see if we're bound all already! */
6305 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6306 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6307 		*error = EINVAL;
6308 		return;
6309 	}
6310 	addr_touse = sa;
6311 #if defined(INET6)
6312 	if (sa->sa_family == AF_INET6) {
6313 		struct sockaddr_in6 *sin6;
6314 
6315 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6316 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6317 			*error = EINVAL;
6318 			return;
6319 		}
6320 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6321 			/* can only bind v6 on PF_INET6 sockets */
6322 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6323 			*error = EINVAL;
6324 			return;
6325 		}
6326 		sin6 = (struct sockaddr_in6 *)addr_touse;
6327 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6328 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6329 			    SCTP_IPV6_V6ONLY(inp)) {
6330 				/* can't bind mapped-v4 on PF_INET sockets */
6331 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6332 				*error = EINVAL;
6333 				return;
6334 			}
6335 			in6_sin6_2_sin(&sin, sin6);
6336 			addr_touse = (struct sockaddr *)&sin;
6337 		}
6338 	}
6339 #endif
6340 	if (sa->sa_family == AF_INET) {
6341 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6342 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6343 			*error = EINVAL;
6344 			return;
6345 		}
6346 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6347 		    SCTP_IPV6_V6ONLY(inp)) {
6348 			/* can't bind v4 on PF_INET sockets */
6349 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6350 			*error = EINVAL;
6351 			return;
6352 		}
6353 	}
6354 	/*
6355 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6356 	 * below is ever changed we may need to lock before calling
6357 	 * association level binding.
6358 	 */
6359 	if (assoc_id == 0) {
6360 		/* delete the address */
6361 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6362 		    SCTP_DEL_IP_ADDRESS,
6363 		    vrf_id, NULL);
6364 	} else {
6365 		/*
6366 		 * FIX: decide whether we allow assoc based bindx
6367 		 */
6368 	}
6369 }
6370 
6371 /*
6372  * returns the valid local address count for an assoc, taking into account
6373  * all scoping rules
6374  */
6375 int
6376 sctp_local_addr_count(struct sctp_tcb *stcb)
6377 {
6378 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6379 	int ipv4_addr_legal, ipv6_addr_legal;
6380 	struct sctp_vrf *vrf;
6381 	struct sctp_ifn *sctp_ifn;
6382 	struct sctp_ifa *sctp_ifa;
6383 	int count = 0;
6384 
6385 	/* Turn on all the appropriate scopes */
6386 	loopback_scope = stcb->asoc.loopback_scope;
6387 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6388 	local_scope = stcb->asoc.local_scope;
6389 	site_scope = stcb->asoc.site_scope;
6390 	ipv4_addr_legal = ipv6_addr_legal = 0;
6391 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6392 		ipv6_addr_legal = 1;
6393 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6394 			ipv4_addr_legal = 1;
6395 		}
6396 	} else {
6397 		ipv4_addr_legal = 1;
6398 	}
6399 
6400 	SCTP_IPI_ADDR_RLOCK();
6401 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6402 	if (vrf == NULL) {
6403 		/* no vrf, no addresses */
6404 		SCTP_IPI_ADDR_RUNLOCK();
6405 		return (0);
6406 	}
6407 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6408 		/*
6409 		 * bound all case: go through all ifns on the vrf
6410 		 */
6411 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6412 			if ((loopback_scope == 0) &&
6413 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6414 				continue;
6415 			}
6416 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6417 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6418 					continue;
6419 
6420 				if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
6421 				    (ipv4_addr_legal)) {
6422 					struct sockaddr_in *sin;
6423 
6424 					sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6425 					if (sin->sin_addr.s_addr == 0) {
6426 						/* skip unspecified addrs */
6427 						continue;
6428 					}
6429 					if ((ipv4_local_scope == 0) &&
6430 					    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6431 						continue;
6432 					}
6433 					/* count this one */
6434 					count++;
6435 				} else if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
6436 				    (ipv6_addr_legal)) {
6437 					struct sockaddr_in6 *sin6;
6438 
6439 					sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6440 					if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6441 						continue;
6442 					}
6443 					if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6444 						if (local_scope == 0)
6445 							continue;
6446 						if (sin6->sin6_scope_id == 0) {
6447 							if (sa6_recoverscope(sin6) != 0)
6448 								/*
6449 								 * bad link
6450 								 * local
6451 								 * address
6452 								 */
6453 								continue;
6454 						}
6455 					}
6456 					if ((site_scope == 0) &&
6457 					    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6458 						continue;
6459 					}
6460 					/* count this one */
6461 					count++;
6462 				}
6463 			}
6464 		}
6465 	} else {
6466 		/*
6467 		 * subset bound case
6468 		 */
6469 		struct sctp_laddr *laddr;
6470 
6471 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6472 		    sctp_nxt_addr) {
6473 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6474 				continue;
6475 			}
6476 			/* count this one */
6477 			count++;
6478 		}
6479 	}
6480 	SCTP_IPI_ADDR_RUNLOCK();
6481 	return (count);
6482 }
6483 
6484 #if defined(SCTP_LOCAL_TRACE_BUF)
6485 
6486 void
6487 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6488 {
6489 	uint32_t saveindex, newindex;
6490 
6491 	do {
6492 		saveindex = sctp_log.index;
6493 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6494 			newindex = 1;
6495 		} else {
6496 			newindex = saveindex + 1;
6497 		}
6498 	} while (atomic_cmpset_int(&sctp_log.index, saveindex, newindex) == 0);
6499 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6500 		saveindex = 0;
6501 	}
6502 	sctp_log.entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6503 	sctp_log.entry[saveindex].subsys = subsys;
6504 	sctp_log.entry[saveindex].params[0] = a;
6505 	sctp_log.entry[saveindex].params[1] = b;
6506 	sctp_log.entry[saveindex].params[2] = c;
6507 	sctp_log.entry[saveindex].params[3] = d;
6508 	sctp_log.entry[saveindex].params[4] = e;
6509 	sctp_log.entry[saveindex].params[5] = f;
6510 }
6511 
6512 #endif
6513