xref: /freebsd/sys/netinet/sctputil.c (revision fa030de01267cc324a0b57479715e4a53356b665)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_crc32.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_cc_functions.h>
53 
54 #define NUMBER_OF_MTU_SIZES 18
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 void
62 sctp_sblog(struct sockbuf *sb,
63     struct sctp_tcb *stcb, int from, int incr)
64 {
65 	struct sctp_cwnd_log sctp_clog;
66 
67 	sctp_clog.x.sb.stcb = stcb;
68 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
69 	if (stcb)
70 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
71 	else
72 		sctp_clog.x.sb.stcb_sbcc = 0;
73 	sctp_clog.x.sb.incr = incr;
74 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
75 	    SCTP_LOG_EVENT_SB,
76 	    from,
77 	    sctp_clog.x.misc.log1,
78 	    sctp_clog.x.misc.log2,
79 	    sctp_clog.x.misc.log3,
80 	    sctp_clog.x.misc.log4);
81 }
82 
83 void
84 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
85 {
86 	struct sctp_cwnd_log sctp_clog;
87 
88 	sctp_clog.x.close.inp = (void *)inp;
89 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
90 	if (stcb) {
91 		sctp_clog.x.close.stcb = (void *)stcb;
92 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
93 	} else {
94 		sctp_clog.x.close.stcb = 0;
95 		sctp_clog.x.close.state = 0;
96 	}
97 	sctp_clog.x.close.loc = loc;
98 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
99 	    SCTP_LOG_EVENT_CLOSE,
100 	    0,
101 	    sctp_clog.x.misc.log1,
102 	    sctp_clog.x.misc.log2,
103 	    sctp_clog.x.misc.log3,
104 	    sctp_clog.x.misc.log4);
105 }
106 
107 
108 void
109 rto_logging(struct sctp_nets *net, int from)
110 {
111 	struct sctp_cwnd_log sctp_clog;
112 
113 	sctp_clog.x.rto.net = (void *)net;
114 	sctp_clog.x.rto.rtt = net->prev_rtt;
115 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 	    SCTP_LOG_EVENT_RTT,
117 	    from,
118 	    sctp_clog.x.misc.log1,
119 	    sctp_clog.x.misc.log2,
120 	    sctp_clog.x.misc.log3,
121 	    sctp_clog.x.misc.log4);
122 
123 }
124 
125 void
126 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
127 {
128 	struct sctp_cwnd_log sctp_clog;
129 
130 	sctp_clog.x.strlog.stcb = stcb;
131 	sctp_clog.x.strlog.n_tsn = tsn;
132 	sctp_clog.x.strlog.n_sseq = sseq;
133 	sctp_clog.x.strlog.e_tsn = 0;
134 	sctp_clog.x.strlog.e_sseq = 0;
135 	sctp_clog.x.strlog.strm = stream;
136 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
137 	    SCTP_LOG_EVENT_STRM,
138 	    from,
139 	    sctp_clog.x.misc.log1,
140 	    sctp_clog.x.misc.log2,
141 	    sctp_clog.x.misc.log3,
142 	    sctp_clog.x.misc.log4);
143 
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 	struct sctp_cwnd_log sctp_clog;
170 
171 	sctp_clog.x.sack.cumack = cumack;
172 	sctp_clog.x.sack.oldcumack = old_cumack;
173 	sctp_clog.x.sack.tsn = tsn;
174 	sctp_clog.x.sack.numGaps = gaps;
175 	sctp_clog.x.sack.numDups = dups;
176 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 	    SCTP_LOG_EVENT_SACK,
178 	    from,
179 	    sctp_clog.x.misc.log1,
180 	    sctp_clog.x.misc.log2,
181 	    sctp_clog.x.misc.log3,
182 	    sctp_clog.x.misc.log4);
183 }
184 
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
204     int from)
205 {
206 	struct sctp_cwnd_log sctp_clog;
207 
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 
219 }
220 
221 
222 void
223 sctp_log_mb(struct mbuf *m, int from)
224 {
225 	struct sctp_cwnd_log sctp_clog;
226 
227 	sctp_clog.x.mb.mp = m;
228 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
229 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
230 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
231 	if (SCTP_BUF_IS_EXTENDED(m)) {
232 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
233 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
234 	} else {
235 		sctp_clog.x.mb.ext = 0;
236 		sctp_clog.x.mb.refcnt = 0;
237 	}
238 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
239 	    SCTP_LOG_EVENT_MBUF,
240 	    from,
241 	    sctp_clog.x.misc.log1,
242 	    sctp_clog.x.misc.log2,
243 	    sctp_clog.x.misc.log3,
244 	    sctp_clog.x.misc.log4);
245 }
246 
247 
248 void
249 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
250     int from)
251 {
252 	struct sctp_cwnd_log sctp_clog;
253 
254 	if (control == NULL) {
255 		SCTP_PRINTF("Gak log of NULL?\n");
256 		return;
257 	}
258 	sctp_clog.x.strlog.stcb = control->stcb;
259 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
260 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
261 	sctp_clog.x.strlog.strm = control->sinfo_stream;
262 	if (poschk != NULL) {
263 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
264 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
265 	} else {
266 		sctp_clog.x.strlog.e_tsn = 0;
267 		sctp_clog.x.strlog.e_sseq = 0;
268 	}
269 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
270 	    SCTP_LOG_EVENT_STRM,
271 	    from,
272 	    sctp_clog.x.misc.log1,
273 	    sctp_clog.x.misc.log2,
274 	    sctp_clog.x.misc.log3,
275 	    sctp_clog.x.misc.log4);
276 
277 }
278 
279 void
280 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
281 {
282 	struct sctp_cwnd_log sctp_clog;
283 
284 	sctp_clog.x.cwnd.net = net;
285 	if (stcb->asoc.send_queue_cnt > 255)
286 		sctp_clog.x.cwnd.cnt_in_send = 255;
287 	else
288 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
289 	if (stcb->asoc.stream_queue_cnt > 255)
290 		sctp_clog.x.cwnd.cnt_in_str = 255;
291 	else
292 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
293 
294 	if (net) {
295 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
296 		sctp_clog.x.cwnd.inflight = net->flight_size;
297 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
298 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
299 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
300 	}
301 	if (SCTP_CWNDLOG_PRESEND == from) {
302 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
303 	}
304 	sctp_clog.x.cwnd.cwnd_augment = augment;
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_CWND,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 
313 }
314 
315 void
316 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
317 {
318 	struct sctp_cwnd_log sctp_clog;
319 
320 	if (inp) {
321 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
322 
323 	} else {
324 		sctp_clog.x.lock.sock = (void *)NULL;
325 	}
326 	sctp_clog.x.lock.inp = (void *)inp;
327 	if (stcb) {
328 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
329 	} else {
330 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
331 	}
332 	if (inp) {
333 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
334 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
335 	} else {
336 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
337 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
338 	}
339 	sctp_clog.x.lock.info_lock = rw_wowned(&sctppcbinfo.ipi_ep_mtx);
340 	if (inp->sctp_socket) {
341 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
342 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
343 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
344 	} else {
345 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
346 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
347 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
348 	}
349 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
350 	    SCTP_LOG_LOCK_EVENT,
351 	    from,
352 	    sctp_clog.x.misc.log1,
353 	    sctp_clog.x.misc.log2,
354 	    sctp_clog.x.misc.log3,
355 	    sctp_clog.x.misc.log4);
356 
357 }
358 
359 void
360 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
361 {
362 	struct sctp_cwnd_log sctp_clog;
363 
364 	sctp_clog.x.cwnd.net = net;
365 	sctp_clog.x.cwnd.cwnd_new_value = error;
366 	sctp_clog.x.cwnd.inflight = net->flight_size;
367 	sctp_clog.x.cwnd.cwnd_augment = burst;
368 	if (stcb->asoc.send_queue_cnt > 255)
369 		sctp_clog.x.cwnd.cnt_in_send = 255;
370 	else
371 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
372 	if (stcb->asoc.stream_queue_cnt > 255)
373 		sctp_clog.x.cwnd.cnt_in_str = 255;
374 	else
375 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
376 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
377 	    SCTP_LOG_EVENT_MAXBURST,
378 	    from,
379 	    sctp_clog.x.misc.log1,
380 	    sctp_clog.x.misc.log2,
381 	    sctp_clog.x.misc.log3,
382 	    sctp_clog.x.misc.log4);
383 
384 }
385 
386 void
387 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
388 {
389 	struct sctp_cwnd_log sctp_clog;
390 
391 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
392 	sctp_clog.x.rwnd.send_size = snd_size;
393 	sctp_clog.x.rwnd.overhead = overhead;
394 	sctp_clog.x.rwnd.new_rwnd = 0;
395 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
396 	    SCTP_LOG_EVENT_RWND,
397 	    from,
398 	    sctp_clog.x.misc.log1,
399 	    sctp_clog.x.misc.log2,
400 	    sctp_clog.x.misc.log3,
401 	    sctp_clog.x.misc.log4);
402 }
403 
404 void
405 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
406 {
407 	struct sctp_cwnd_log sctp_clog;
408 
409 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
410 	sctp_clog.x.rwnd.send_size = flight_size;
411 	sctp_clog.x.rwnd.overhead = overhead;
412 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
413 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
414 	    SCTP_LOG_EVENT_RWND,
415 	    from,
416 	    sctp_clog.x.misc.log1,
417 	    sctp_clog.x.misc.log2,
418 	    sctp_clog.x.misc.log3,
419 	    sctp_clog.x.misc.log4);
420 }
421 
422 void
423 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
424 {
425 	struct sctp_cwnd_log sctp_clog;
426 
427 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
428 	sctp_clog.x.mbcnt.size_change = book;
429 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
430 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
431 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
432 	    SCTP_LOG_EVENT_MBCNT,
433 	    from,
434 	    sctp_clog.x.misc.log1,
435 	    sctp_clog.x.misc.log2,
436 	    sctp_clog.x.misc.log3,
437 	    sctp_clog.x.misc.log4);
438 
439 }
440 
441 void
442 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
443 {
444 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
445 	    SCTP_LOG_MISC_EVENT,
446 	    from,
447 	    a, b, c, d);
448 }
449 
450 void
451 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
452 {
453 	struct sctp_cwnd_log sctp_clog;
454 
455 	sctp_clog.x.wake.stcb = (void *)stcb;
456 	sctp_clog.x.wake.wake_cnt = wake_cnt;
457 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
458 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
459 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
460 
461 	if (stcb->asoc.stream_queue_cnt < 0xff)
462 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
463 	else
464 		sctp_clog.x.wake.stream_qcnt = 0xff;
465 
466 	if (stcb->asoc.chunks_on_out_queue < 0xff)
467 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
468 	else
469 		sctp_clog.x.wake.chunks_on_oque = 0xff;
470 
471 	sctp_clog.x.wake.sctpflags = 0;
472 	/* set in the defered mode stuff */
473 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
474 		sctp_clog.x.wake.sctpflags |= 1;
475 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
476 		sctp_clog.x.wake.sctpflags |= 2;
477 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
478 		sctp_clog.x.wake.sctpflags |= 4;
479 	/* what about the sb */
480 	if (stcb->sctp_socket) {
481 		struct socket *so = stcb->sctp_socket;
482 
483 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
484 	} else {
485 		sctp_clog.x.wake.sbflags = 0xff;
486 	}
487 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
488 	    SCTP_LOG_EVENT_WAKE,
489 	    from,
490 	    sctp_clog.x.misc.log1,
491 	    sctp_clog.x.misc.log2,
492 	    sctp_clog.x.misc.log3,
493 	    sctp_clog.x.misc.log4);
494 
495 }
496 
497 void
498 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
499 {
500 	struct sctp_cwnd_log sctp_clog;
501 
502 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
503 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
504 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
505 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
506 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
507 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
508 	sctp_clog.x.blk.sndlen = sendlen;
509 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
510 	    SCTP_LOG_EVENT_BLOCK,
511 	    from,
512 	    sctp_clog.x.misc.log1,
513 	    sctp_clog.x.misc.log2,
514 	    sctp_clog.x.misc.log3,
515 	    sctp_clog.x.misc.log4);
516 
517 }
518 
519 int
520 sctp_fill_stat_log(void *optval, size_t *optsize)
521 {
522 	/* May need to fix this if ktrdump does not work */
523 	return (0);
524 }
525 
526 #ifdef SCTP_AUDITING_ENABLED
527 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
528 static int sctp_audit_indx = 0;
529 
530 static
531 void
532 sctp_print_audit_report(void)
533 {
534 	int i;
535 	int cnt;
536 
537 	cnt = 0;
538 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
539 		if ((sctp_audit_data[i][0] == 0xe0) &&
540 		    (sctp_audit_data[i][1] == 0x01)) {
541 			cnt = 0;
542 			SCTP_PRINTF("\n");
543 		} else if (sctp_audit_data[i][0] == 0xf0) {
544 			cnt = 0;
545 			SCTP_PRINTF("\n");
546 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			SCTP_PRINTF("\n");
549 			cnt = 0;
550 		}
551 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
552 		    (uint32_t) sctp_audit_data[i][1]);
553 		cnt++;
554 		if ((cnt % 14) == 0)
555 			SCTP_PRINTF("\n");
556 	}
557 	for (i = 0; i < sctp_audit_indx; i++) {
558 		if ((sctp_audit_data[i][0] == 0xe0) &&
559 		    (sctp_audit_data[i][1] == 0x01)) {
560 			cnt = 0;
561 			SCTP_PRINTF("\n");
562 		} else if (sctp_audit_data[i][0] == 0xf0) {
563 			cnt = 0;
564 			SCTP_PRINTF("\n");
565 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			SCTP_PRINTF("\n");
568 			cnt = 0;
569 		}
570 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
571 		    (uint32_t) sctp_audit_data[i][1]);
572 		cnt++;
573 		if ((cnt % 14) == 0)
574 			SCTP_PRINTF("\n");
575 	}
576 	SCTP_PRINTF("\n");
577 }
578 
579 void
580 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
581     struct sctp_nets *net)
582 {
583 	int resend_cnt, tot_out, rep, tot_book_cnt;
584 	struct sctp_nets *lnet;
585 	struct sctp_tmit_chunk *chk;
586 
587 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
588 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
589 	sctp_audit_indx++;
590 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
591 		sctp_audit_indx = 0;
592 	}
593 	if (inp == NULL) {
594 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
595 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
596 		sctp_audit_indx++;
597 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 			sctp_audit_indx = 0;
599 		}
600 		return;
601 	}
602 	if (stcb == NULL) {
603 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
604 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
605 		sctp_audit_indx++;
606 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 			sctp_audit_indx = 0;
608 		}
609 		return;
610 	}
611 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
612 	sctp_audit_data[sctp_audit_indx][1] =
613 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
614 	sctp_audit_indx++;
615 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
616 		sctp_audit_indx = 0;
617 	}
618 	rep = 0;
619 	tot_book_cnt = 0;
620 	resend_cnt = tot_out = 0;
621 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
622 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
623 			resend_cnt++;
624 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
625 			tot_out += chk->book_size;
626 			tot_book_cnt++;
627 		}
628 	}
629 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
630 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
631 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
632 		sctp_audit_indx++;
633 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
634 			sctp_audit_indx = 0;
635 		}
636 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
637 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
638 		rep = 1;
639 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
640 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
641 		sctp_audit_data[sctp_audit_indx][1] =
642 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
643 		sctp_audit_indx++;
644 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645 			sctp_audit_indx = 0;
646 		}
647 	}
648 	if (tot_out != stcb->asoc.total_flight) {
649 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
650 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
651 		sctp_audit_indx++;
652 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
653 			sctp_audit_indx = 0;
654 		}
655 		rep = 1;
656 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
657 		    (int)stcb->asoc.total_flight);
658 		stcb->asoc.total_flight = tot_out;
659 	}
660 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
661 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
662 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
663 		sctp_audit_indx++;
664 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
665 			sctp_audit_indx = 0;
666 		}
667 		rep = 1;
668 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
669 
670 		stcb->asoc.total_flight_count = tot_book_cnt;
671 	}
672 	tot_out = 0;
673 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
674 		tot_out += lnet->flight_size;
675 	}
676 	if (tot_out != stcb->asoc.total_flight) {
677 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
678 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
679 		sctp_audit_indx++;
680 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
681 			sctp_audit_indx = 0;
682 		}
683 		rep = 1;
684 		SCTP_PRINTF("real flight:%d net total was %d\n",
685 		    stcb->asoc.total_flight, tot_out);
686 		/* now corrective action */
687 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
688 
689 			tot_out = 0;
690 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
691 				if ((chk->whoTo == lnet) &&
692 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
693 					tot_out += chk->book_size;
694 				}
695 			}
696 			if (lnet->flight_size != tot_out) {
697 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
698 				    (uint32_t) lnet, lnet->flight_size,
699 				    tot_out);
700 				lnet->flight_size = tot_out;
701 			}
702 		}
703 	}
704 	if (rep) {
705 		sctp_print_audit_report();
706 	}
707 }
708 
709 void
710 sctp_audit_log(uint8_t ev, uint8_t fd)
711 {
712 
713 	sctp_audit_data[sctp_audit_indx][0] = ev;
714 	sctp_audit_data[sctp_audit_indx][1] = fd;
715 	sctp_audit_indx++;
716 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 		sctp_audit_indx = 0;
718 	}
719 }
720 
721 #endif
722 
723 /*
724  * a list of sizes based on typical mtu's, used only if next hop size not
725  * returned.
726  */
727 static int sctp_mtu_sizes[] = {
728 	68,
729 	296,
730 	508,
731 	512,
732 	544,
733 	576,
734 	1006,
735 	1492,
736 	1500,
737 	1536,
738 	2002,
739 	2048,
740 	4352,
741 	4464,
742 	8166,
743 	17914,
744 	32000,
745 	65535
746 };
747 
748 void
749 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
750 {
751 	struct sctp_association *asoc;
752 	struct sctp_nets *net;
753 
754 	asoc = &stcb->asoc;
755 
756 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
757 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
758 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
759 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
760 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
761 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
762 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
763 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
764 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
765 	}
766 }
767 
768 int
769 find_next_best_mtu(int totsz)
770 {
771 	int i, perfer;
772 
773 	/*
774 	 * if we are in here we must find the next best fit based on the
775 	 * size of the dg that failed to be sent.
776 	 */
777 	perfer = 0;
778 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
779 		if (totsz < sctp_mtu_sizes[i]) {
780 			perfer = i - 1;
781 			if (perfer < 0)
782 				perfer = 0;
783 			break;
784 		}
785 	}
786 	return (sctp_mtu_sizes[perfer]);
787 }
788 
789 void
790 sctp_fill_random_store(struct sctp_pcb *m)
791 {
792 	/*
793 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
794 	 * our counter. The result becomes our good random numbers and we
795 	 * then setup to give these out. Note that we do no locking to
796 	 * protect this. This is ok, since if competing folks call this we
797 	 * will get more gobbled gook in the random store which is what we
798 	 * want. There is a danger that two guys will use the same random
799 	 * numbers, but thats ok too since that is random as well :->
800 	 */
801 	m->store_at = 0;
802 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
803 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
804 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
805 	m->random_counter++;
806 }
807 
808 uint32_t
809 sctp_select_initial_TSN(struct sctp_pcb *inp)
810 {
811 	/*
812 	 * A true implementation should use random selection process to get
813 	 * the initial stream sequence number, using RFC1750 as a good
814 	 * guideline
815 	 */
816 	uint32_t x, *xp;
817 	uint8_t *p;
818 	int store_at, new_store;
819 
820 	if (inp->initial_sequence_debug != 0) {
821 		uint32_t ret;
822 
823 		ret = inp->initial_sequence_debug;
824 		inp->initial_sequence_debug++;
825 		return (ret);
826 	}
827 retry:
828 	store_at = inp->store_at;
829 	new_store = store_at + sizeof(uint32_t);
830 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
831 		new_store = 0;
832 	}
833 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
834 		goto retry;
835 	}
836 	if (new_store == 0) {
837 		/* Refill the random store */
838 		sctp_fill_random_store(inp);
839 	}
840 	p = &inp->random_store[store_at];
841 	xp = (uint32_t *) p;
842 	x = *xp;
843 	return (x);
844 }
845 
846 uint32_t
847 sctp_select_a_tag(struct sctp_inpcb *inp)
848 {
849 	u_long x, not_done;
850 	struct timeval now;
851 
852 	(void)SCTP_GETTIME_TIMEVAL(&now);
853 	not_done = 1;
854 	while (not_done) {
855 		x = sctp_select_initial_TSN(&inp->sctp_ep);
856 		if (x == 0) {
857 			/* we never use 0 */
858 			continue;
859 		}
860 		if (sctp_is_vtag_good(inp, x, &now)) {
861 			not_done = 0;
862 		}
863 	}
864 	return (x);
865 }
866 
867 int
868 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
869     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
870 {
871 	struct sctp_association *asoc;
872 
873 	/*
874 	 * Anything set to zero is taken care of by the allocation routine's
875 	 * bzero
876 	 */
877 
878 	/*
879 	 * Up front select what scoping to apply on addresses I tell my peer
880 	 * Not sure what to do with these right now, we will need to come up
881 	 * with a way to set them. We may need to pass them through from the
882 	 * caller in the sctp_aloc_assoc() function.
883 	 */
884 	int i;
885 
886 	asoc = &stcb->asoc;
887 	/* init all variables to a known value. */
888 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
889 	asoc->max_burst = m->sctp_ep.max_burst;
890 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
891 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
892 	asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off;
893 	/* JRS 5/21/07 - Init CMT PF variables */
894 	asoc->sctp_cmt_pf = (uint8_t) sctp_cmt_pf;
895 	asoc->sctp_frag_point = m->sctp_frag_point;
896 #ifdef INET
897 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
898 #else
899 	asoc->default_tos = 0;
900 #endif
901 
902 #ifdef INET6
903 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
904 #else
905 	asoc->default_flowlabel = 0;
906 #endif
907 	if (override_tag) {
908 		struct timeval now;
909 
910 		(void)SCTP_GETTIME_TIMEVAL(&now);
911 		if (sctp_is_vtag_good(m, override_tag, &now)) {
912 			asoc->my_vtag = override_tag;
913 		} else {
914 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
915 			return (ENOMEM);
916 		}
917 
918 	} else {
919 		asoc->my_vtag = sctp_select_a_tag(m);
920 	}
921 	/* Get the nonce tags */
922 	asoc->my_vtag_nonce = sctp_select_a_tag(m);
923 	asoc->peer_vtag_nonce = sctp_select_a_tag(m);
924 	asoc->vrf_id = vrf_id;
925 
926 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
927 		asoc->hb_is_disabled = 1;
928 	else
929 		asoc->hb_is_disabled = 0;
930 
931 #ifdef SCTP_ASOCLOG_OF_TSNS
932 	asoc->tsn_in_at = 0;
933 	asoc->tsn_out_at = 0;
934 	asoc->tsn_in_wrapped = 0;
935 	asoc->tsn_out_wrapped = 0;
936 	asoc->cumack_log_at = 0;
937 #endif
938 #ifdef SCTP_FS_SPEC_LOG
939 	asoc->fs_index = 0;
940 #endif
941 	asoc->refcnt = 0;
942 	asoc->assoc_up_sent = 0;
943 	asoc->assoc_id = asoc->my_vtag;
944 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
945 	    sctp_select_initial_TSN(&m->sctp_ep);
946 	/* we are optimisitic here */
947 	asoc->peer_supports_pktdrop = 1;
948 
949 	asoc->sent_queue_retran_cnt = 0;
950 
951 	/* for CMT */
952 	asoc->last_net_data_came_from = NULL;
953 
954 	/* This will need to be adjusted */
955 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
956 	asoc->last_acked_seq = asoc->init_seq_number - 1;
957 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
958 	asoc->asconf_seq_in = asoc->last_acked_seq;
959 
960 	/* here we are different, we hold the next one we expect */
961 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
962 
963 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
964 	asoc->initial_rto = m->sctp_ep.initial_rto;
965 
966 	asoc->max_init_times = m->sctp_ep.max_init_times;
967 	asoc->max_send_times = m->sctp_ep.max_send_times;
968 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
969 	asoc->free_chunk_cnt = 0;
970 
971 	asoc->iam_blocking = 0;
972 	/* ECN Nonce initialization */
973 	asoc->context = m->sctp_context;
974 	asoc->def_send = m->def_send;
975 	asoc->ecn_nonce_allowed = 0;
976 	asoc->receiver_nonce_sum = 1;
977 	asoc->nonce_sum_expect_base = 1;
978 	asoc->nonce_sum_check = 1;
979 	asoc->nonce_resync_tsn = 0;
980 	asoc->nonce_wait_for_ecne = 0;
981 	asoc->nonce_wait_tsn = 0;
982 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
983 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
984 	asoc->pr_sctp_cnt = 0;
985 	asoc->total_output_queue_size = 0;
986 
987 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
988 		struct in6pcb *inp6;
989 
990 		/* Its a V6 socket */
991 		inp6 = (struct in6pcb *)m;
992 		asoc->ipv6_addr_legal = 1;
993 		/* Now look at the binding flag to see if V4 will be legal */
994 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
995 			asoc->ipv4_addr_legal = 1;
996 		} else {
997 			/* V4 addresses are NOT legal on the association */
998 			asoc->ipv4_addr_legal = 0;
999 		}
1000 	} else {
1001 		/* Its a V4 socket, no - V6 */
1002 		asoc->ipv4_addr_legal = 1;
1003 		asoc->ipv6_addr_legal = 0;
1004 	}
1005 
1006 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1007 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1008 
1009 	asoc->smallest_mtu = m->sctp_frag_point;
1010 #ifdef SCTP_PRINT_FOR_B_AND_M
1011 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1012 	    asoc->smallest_mtu);
1013 #endif
1014 	asoc->minrto = m->sctp_ep.sctp_minrto;
1015 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1016 
1017 	asoc->locked_on_sending = NULL;
1018 	asoc->stream_locked_on = 0;
1019 	asoc->ecn_echo_cnt_onq = 0;
1020 	asoc->stream_locked = 0;
1021 
1022 	asoc->send_sack = 1;
1023 
1024 	LIST_INIT(&asoc->sctp_restricted_addrs);
1025 
1026 	TAILQ_INIT(&asoc->nets);
1027 	TAILQ_INIT(&asoc->pending_reply_queue);
1028 	TAILQ_INIT(&asoc->asconf_ack_sent);
1029 	/* Setup to fill the hb random cache at first HB */
1030 	asoc->hb_random_idx = 4;
1031 
1032 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1033 
1034 	/*
1035 	 * JRS - Pick the default congestion control module based on the
1036 	 * sysctl.
1037 	 */
1038 	switch (m->sctp_ep.sctp_default_cc_module) {
1039 		/* JRS - Standard TCP congestion control */
1040 	case SCTP_CC_RFC2581:
1041 		{
1042 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1043 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1044 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1045 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1046 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1047 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1048 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1049 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1050 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1051 			break;
1052 		}
1053 		/* JRS - High Speed TCP congestion control (Floyd) */
1054 	case SCTP_CC_HSTCP:
1055 		{
1056 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1057 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1058 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1059 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1060 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1061 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1064 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1065 			break;
1066 		}
1067 		/* JRS - HTCP congestion control */
1068 	case SCTP_CC_HTCP:
1069 		{
1070 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1071 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1072 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1073 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1074 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1075 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1078 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1079 			break;
1080 		}
1081 		/* JRS - By default, use RFC2581 */
1082 	default:
1083 		{
1084 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1085 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1086 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1087 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1088 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1089 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1092 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1093 			break;
1094 		}
1095 	}
1096 
1097 	/*
1098 	 * Now the stream parameters, here we allocate space for all streams
1099 	 * that we request by default.
1100 	 */
1101 	asoc->streamoutcnt = asoc->pre_open_streams =
1102 	    m->sctp_ep.pre_open_stream_count;
1103 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1104 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1105 	    SCTP_M_STRMO);
1106 	if (asoc->strmout == NULL) {
1107 		/* big trouble no memory */
1108 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1109 		return (ENOMEM);
1110 	}
1111 	for (i = 0; i < asoc->streamoutcnt; i++) {
1112 		/*
1113 		 * inbound side must be set to 0xffff, also NOTE when we get
1114 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1115 		 * count (streamoutcnt) but first check if we sent to any of
1116 		 * the upper streams that were dropped (if some were). Those
1117 		 * that were dropped must be notified to the upper layer as
1118 		 * failed to send.
1119 		 */
1120 		asoc->strmout[i].next_sequence_sent = 0x0;
1121 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1122 		asoc->strmout[i].stream_no = i;
1123 		asoc->strmout[i].last_msg_incomplete = 0;
1124 		asoc->strmout[i].next_spoke.tqe_next = 0;
1125 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1126 	}
1127 	/* Now the mapping array */
1128 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1129 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1130 	    SCTP_M_MAP);
1131 	if (asoc->mapping_array == NULL) {
1132 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1133 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1134 		return (ENOMEM);
1135 	}
1136 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1137 	/* Now the init of the other outqueues */
1138 	TAILQ_INIT(&asoc->free_chunks);
1139 	TAILQ_INIT(&asoc->out_wheel);
1140 	TAILQ_INIT(&asoc->control_send_queue);
1141 	TAILQ_INIT(&asoc->send_queue);
1142 	TAILQ_INIT(&asoc->sent_queue);
1143 	TAILQ_INIT(&asoc->reasmqueue);
1144 	TAILQ_INIT(&asoc->resetHead);
1145 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1146 	TAILQ_INIT(&asoc->asconf_queue);
1147 	/* authentication fields */
1148 	asoc->authinfo.random = NULL;
1149 	asoc->authinfo.assoc_key = NULL;
1150 	asoc->authinfo.assoc_keyid = 0;
1151 	asoc->authinfo.recv_key = NULL;
1152 	asoc->authinfo.recv_keyid = 0;
1153 	LIST_INIT(&asoc->shared_keys);
1154 	asoc->marked_retrans = 0;
1155 	asoc->timoinit = 0;
1156 	asoc->timodata = 0;
1157 	asoc->timosack = 0;
1158 	asoc->timoshutdown = 0;
1159 	asoc->timoheartbeat = 0;
1160 	asoc->timocookie = 0;
1161 	asoc->timoshutdownack = 0;
1162 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1163 	asoc->discontinuity_time = asoc->start_time;
1164 	/*
1165 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1166 	 * freed later whe the association is freed.
1167 	 */
1168 	return (0);
1169 }
1170 
1171 int
1172 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1173 {
1174 	/* mapping array needs to grow */
1175 	uint8_t *new_array;
1176 	uint32_t new_size;
1177 
1178 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1179 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1180 	if (new_array == NULL) {
1181 		/* can't get more, forget it */
1182 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1183 		    new_size);
1184 		return (-1);
1185 	}
1186 	memset(new_array, 0, new_size);
1187 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1188 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1189 	asoc->mapping_array = new_array;
1190 	asoc->mapping_array_size = new_size;
1191 	return (0);
1192 }
1193 
1194 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1195 static void
1196 sctp_iterator_work(struct sctp_iterator *it)
1197 {
1198 	int iteration_count = 0;
1199 	int inp_skip = 0;
1200 
1201 	SCTP_ITERATOR_LOCK();
1202 	if (it->inp) {
1203 		SCTP_INP_DECR_REF(it->inp);
1204 	}
1205 	if (it->inp == NULL) {
1206 		/* iterator is complete */
1207 done_with_iterator:
1208 		SCTP_ITERATOR_UNLOCK();
1209 		if (it->function_atend != NULL) {
1210 			(*it->function_atend) (it->pointer, it->val);
1211 		}
1212 		SCTP_FREE(it, SCTP_M_ITER);
1213 		return;
1214 	}
1215 select_a_new_ep:
1216 	SCTP_INP_WLOCK(it->inp);
1217 	while (((it->pcb_flags) &&
1218 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1219 	    ((it->pcb_features) &&
1220 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1221 		/* endpoint flags or features don't match, so keep looking */
1222 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1223 			SCTP_INP_WUNLOCK(it->inp);
1224 			goto done_with_iterator;
1225 		}
1226 		SCTP_INP_WUNLOCK(it->inp);
1227 		it->inp = LIST_NEXT(it->inp, sctp_list);
1228 		if (it->inp == NULL) {
1229 			goto done_with_iterator;
1230 		}
1231 		SCTP_INP_WLOCK(it->inp);
1232 	}
1233 
1234 	SCTP_INP_WUNLOCK(it->inp);
1235 	SCTP_INP_RLOCK(it->inp);
1236 
1237 	/* now go through each assoc which is in the desired state */
1238 	if (it->done_current_ep == 0) {
1239 		if (it->function_inp != NULL)
1240 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1241 		it->done_current_ep = 1;
1242 	}
1243 	if (it->stcb == NULL) {
1244 		/* run the per instance function */
1245 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1246 	}
1247 	if ((inp_skip) || it->stcb == NULL) {
1248 		if (it->function_inp_end != NULL) {
1249 			inp_skip = (*it->function_inp_end) (it->inp,
1250 			    it->pointer,
1251 			    it->val);
1252 		}
1253 		SCTP_INP_RUNLOCK(it->inp);
1254 		goto no_stcb;
1255 	}
1256 	while (it->stcb) {
1257 		SCTP_TCB_LOCK(it->stcb);
1258 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1259 			/* not in the right state... keep looking */
1260 			SCTP_TCB_UNLOCK(it->stcb);
1261 			goto next_assoc;
1262 		}
1263 		/* see if we have limited out the iterator loop */
1264 		iteration_count++;
1265 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1266 			/* Pause to let others grab the lock */
1267 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1268 			SCTP_TCB_UNLOCK(it->stcb);
1269 
1270 			SCTP_INP_INCR_REF(it->inp);
1271 			SCTP_INP_RUNLOCK(it->inp);
1272 			SCTP_ITERATOR_UNLOCK();
1273 			SCTP_ITERATOR_LOCK();
1274 			SCTP_INP_RLOCK(it->inp);
1275 
1276 			SCTP_INP_DECR_REF(it->inp);
1277 			SCTP_TCB_LOCK(it->stcb);
1278 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1279 			iteration_count = 0;
1280 		}
1281 		/* run function on this one */
1282 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1283 
1284 		/*
1285 		 * we lie here, it really needs to have its own type but
1286 		 * first I must verify that this won't effect things :-0
1287 		 */
1288 		if (it->no_chunk_output == 0)
1289 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1290 
1291 		SCTP_TCB_UNLOCK(it->stcb);
1292 next_assoc:
1293 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1294 		if (it->stcb == NULL) {
1295 			/* Run last function */
1296 			if (it->function_inp_end != NULL) {
1297 				inp_skip = (*it->function_inp_end) (it->inp,
1298 				    it->pointer,
1299 				    it->val);
1300 			}
1301 		}
1302 	}
1303 	SCTP_INP_RUNLOCK(it->inp);
1304 no_stcb:
1305 	/* done with all assocs on this endpoint, move on to next endpoint */
1306 	it->done_current_ep = 0;
1307 	SCTP_INP_WLOCK(it->inp);
1308 	SCTP_INP_WUNLOCK(it->inp);
1309 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1310 		it->inp = NULL;
1311 	} else {
1312 		SCTP_INP_INFO_RLOCK();
1313 		it->inp = LIST_NEXT(it->inp, sctp_list);
1314 		SCTP_INP_INFO_RUNLOCK();
1315 	}
1316 	if (it->inp == NULL) {
1317 		goto done_with_iterator;
1318 	}
1319 	goto select_a_new_ep;
1320 }
1321 
1322 void
1323 sctp_iterator_worker(void)
1324 {
1325 	struct sctp_iterator *it = NULL;
1326 
1327 	/* This function is called with the WQ lock in place */
1328 
1329 	sctppcbinfo.iterator_running = 1;
1330 again:
1331 	it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
1332 	while (it) {
1333 		/* now lets work on this one */
1334 		TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
1335 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1336 		sctp_iterator_work(it);
1337 		SCTP_IPI_ITERATOR_WQ_LOCK();
1338 		/* sa_ignore FREED_MEMORY */
1339 		it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
1340 	}
1341 	if (TAILQ_FIRST(&sctppcbinfo.iteratorhead)) {
1342 		goto again;
1343 	}
1344 	sctppcbinfo.iterator_running = 0;
1345 	return;
1346 }
1347 
1348 #endif
1349 
1350 
1351 static void
1352 sctp_handle_addr_wq(void)
1353 {
1354 	/* deal with the ADDR wq from the rtsock calls */
1355 	struct sctp_laddr *wi;
1356 	struct sctp_asconf_iterator *asc;
1357 
1358 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1359 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1360 	if (asc == NULL) {
1361 		/* Try later, no memory */
1362 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1363 		    (struct sctp_inpcb *)NULL,
1364 		    (struct sctp_tcb *)NULL,
1365 		    (struct sctp_nets *)NULL);
1366 		return;
1367 	}
1368 	LIST_INIT(&asc->list_of_work);
1369 	asc->cnt = 0;
1370 	SCTP_IPI_ITERATOR_WQ_LOCK();
1371 	wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1372 	while (wi != NULL) {
1373 		LIST_REMOVE(wi, sctp_nxt_addr);
1374 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1375 		asc->cnt++;
1376 		wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1377 	}
1378 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1379 	if (asc->cnt == 0) {
1380 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1381 	} else {
1382 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1383 		    sctp_asconf_iterator_stcb,
1384 		    NULL,	/* No ep end for boundall */
1385 		    SCTP_PCB_FLAGS_BOUNDALL,
1386 		    SCTP_PCB_ANY_FEATURES,
1387 		    SCTP_ASOC_ANY_STATE,
1388 		    (void *)asc, 0,
1389 		    sctp_asconf_iterator_end, NULL, 0);
1390 	}
1391 }
1392 
1393 int retcode = 0;
1394 int cur_oerr = 0;
1395 
1396 void
1397 sctp_timeout_handler(void *t)
1398 {
1399 	struct sctp_inpcb *inp;
1400 	struct sctp_tcb *stcb;
1401 	struct sctp_nets *net;
1402 	struct sctp_timer *tmr;
1403 
1404 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1405 	struct socket *so;
1406 
1407 #endif
1408 	int did_output;
1409 	struct sctp_iterator *it = NULL;
1410 
1411 	tmr = (struct sctp_timer *)t;
1412 	inp = (struct sctp_inpcb *)tmr->ep;
1413 	stcb = (struct sctp_tcb *)tmr->tcb;
1414 	net = (struct sctp_nets *)tmr->net;
1415 	did_output = 1;
1416 
1417 #ifdef SCTP_AUDITING_ENABLED
1418 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1419 	sctp_auditing(3, inp, stcb, net);
1420 #endif
1421 
1422 	/* sanity checks... */
1423 	if (tmr->self != (void *)tmr) {
1424 		/*
1425 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1426 		 * tmr);
1427 		 */
1428 		return;
1429 	}
1430 	tmr->stopped_from = 0xa001;
1431 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1432 		/*
1433 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1434 		 * tmr->type);
1435 		 */
1436 		return;
1437 	}
1438 	tmr->stopped_from = 0xa002;
1439 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1440 		return;
1441 	}
1442 	/* if this is an iterator timeout, get the struct and clear inp */
1443 	tmr->stopped_from = 0xa003;
1444 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1445 		it = (struct sctp_iterator *)inp;
1446 		inp = NULL;
1447 	}
1448 	if (inp) {
1449 		SCTP_INP_INCR_REF(inp);
1450 		if ((inp->sctp_socket == 0) &&
1451 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1452 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1453 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1454 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1455 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1456 		    ) {
1457 			SCTP_INP_DECR_REF(inp);
1458 			return;
1459 		}
1460 	}
1461 	tmr->stopped_from = 0xa004;
1462 	if (stcb) {
1463 		atomic_add_int(&stcb->asoc.refcnt, 1);
1464 		if (stcb->asoc.state == 0) {
1465 			atomic_add_int(&stcb->asoc.refcnt, -1);
1466 			if (inp) {
1467 				SCTP_INP_DECR_REF(inp);
1468 			}
1469 			return;
1470 		}
1471 	}
1472 	tmr->stopped_from = 0xa005;
1473 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1474 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1475 		if (inp) {
1476 			SCTP_INP_DECR_REF(inp);
1477 		}
1478 		if (stcb) {
1479 			atomic_add_int(&stcb->asoc.refcnt, -1);
1480 		}
1481 		return;
1482 	}
1483 	tmr->stopped_from = 0xa006;
1484 
1485 	if (stcb) {
1486 		SCTP_TCB_LOCK(stcb);
1487 		atomic_add_int(&stcb->asoc.refcnt, -1);
1488 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1489 		    ((stcb->asoc.state == 0) ||
1490 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1491 			SCTP_TCB_UNLOCK(stcb);
1492 			if (inp) {
1493 				SCTP_INP_DECR_REF(inp);
1494 			}
1495 			return;
1496 		}
1497 	}
1498 	/* record in stopped what t-o occured */
1499 	tmr->stopped_from = tmr->type;
1500 
1501 	/* mark as being serviced now */
1502 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1503 		/*
1504 		 * Callout has been rescheduled.
1505 		 */
1506 		goto get_out;
1507 	}
1508 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1509 		/*
1510 		 * Not active, so no action.
1511 		 */
1512 		goto get_out;
1513 	}
1514 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1515 
1516 	/* call the handler for the appropriate timer type */
1517 	switch (tmr->type) {
1518 	case SCTP_TIMER_TYPE_ZERO_COPY:
1519 		if (inp == NULL) {
1520 			break;
1521 		}
1522 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1523 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1524 		}
1525 		break;
1526 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1527 		if (inp == NULL) {
1528 			break;
1529 		}
1530 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1531 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1532 		}
1533 		break;
1534 	case SCTP_TIMER_TYPE_ADDR_WQ:
1535 		sctp_handle_addr_wq();
1536 		break;
1537 	case SCTP_TIMER_TYPE_ITERATOR:
1538 		SCTP_STAT_INCR(sctps_timoiterator);
1539 		sctp_iterator_timer(it);
1540 		break;
1541 	case SCTP_TIMER_TYPE_SEND:
1542 		if ((stcb == NULL) || (inp == NULL)) {
1543 			break;
1544 		}
1545 		SCTP_STAT_INCR(sctps_timodata);
1546 		stcb->asoc.timodata++;
1547 		stcb->asoc.num_send_timers_up--;
1548 		if (stcb->asoc.num_send_timers_up < 0) {
1549 			stcb->asoc.num_send_timers_up = 0;
1550 		}
1551 		SCTP_TCB_LOCK_ASSERT(stcb);
1552 		cur_oerr = stcb->asoc.overall_error_count;
1553 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1554 		if (retcode) {
1555 			/* no need to unlock on tcb its gone */
1556 
1557 			goto out_decr;
1558 		}
1559 		SCTP_TCB_LOCK_ASSERT(stcb);
1560 #ifdef SCTP_AUDITING_ENABLED
1561 		sctp_auditing(4, inp, stcb, net);
1562 #endif
1563 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1564 		if ((stcb->asoc.num_send_timers_up == 0) &&
1565 		    (stcb->asoc.sent_queue_cnt > 0)
1566 		    ) {
1567 			struct sctp_tmit_chunk *chk;
1568 
1569 			/*
1570 			 * safeguard. If there on some on the sent queue
1571 			 * somewhere but no timers running something is
1572 			 * wrong... so we start a timer on the first chunk
1573 			 * on the send queue on whatever net it is sent to.
1574 			 */
1575 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1576 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1577 			    chk->whoTo);
1578 		}
1579 		break;
1580 	case SCTP_TIMER_TYPE_INIT:
1581 		if ((stcb == NULL) || (inp == NULL)) {
1582 			break;
1583 		}
1584 		SCTP_STAT_INCR(sctps_timoinit);
1585 		stcb->asoc.timoinit++;
1586 		if (sctp_t1init_timer(inp, stcb, net)) {
1587 			/* no need to unlock on tcb its gone */
1588 			goto out_decr;
1589 		}
1590 		/* We do output but not here */
1591 		did_output = 0;
1592 		break;
1593 	case SCTP_TIMER_TYPE_RECV:
1594 		if ((stcb == NULL) || (inp == NULL)) {
1595 			break;
1596 		} {
1597 			int abort_flag;
1598 
1599 			SCTP_STAT_INCR(sctps_timosack);
1600 			stcb->asoc.timosack++;
1601 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1602 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1603 			sctp_send_sack(stcb);
1604 		}
1605 #ifdef SCTP_AUDITING_ENABLED
1606 		sctp_auditing(4, inp, stcb, net);
1607 #endif
1608 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1609 		break;
1610 	case SCTP_TIMER_TYPE_SHUTDOWN:
1611 		if ((stcb == NULL) || (inp == NULL)) {
1612 			break;
1613 		}
1614 		if (sctp_shutdown_timer(inp, stcb, net)) {
1615 			/* no need to unlock on tcb its gone */
1616 			goto out_decr;
1617 		}
1618 		SCTP_STAT_INCR(sctps_timoshutdown);
1619 		stcb->asoc.timoshutdown++;
1620 #ifdef SCTP_AUDITING_ENABLED
1621 		sctp_auditing(4, inp, stcb, net);
1622 #endif
1623 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1624 		break;
1625 	case SCTP_TIMER_TYPE_HEARTBEAT:
1626 		{
1627 			struct sctp_nets *lnet;
1628 			int cnt_of_unconf = 0;
1629 
1630 			if ((stcb == NULL) || (inp == NULL)) {
1631 				break;
1632 			}
1633 			SCTP_STAT_INCR(sctps_timoheartbeat);
1634 			stcb->asoc.timoheartbeat++;
1635 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1636 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1637 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1638 					cnt_of_unconf++;
1639 				}
1640 			}
1641 			if (cnt_of_unconf == 0) {
1642 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1643 				    cnt_of_unconf)) {
1644 					/* no need to unlock on tcb its gone */
1645 					goto out_decr;
1646 				}
1647 			}
1648 #ifdef SCTP_AUDITING_ENABLED
1649 			sctp_auditing(4, inp, stcb, lnet);
1650 #endif
1651 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1652 			    stcb->sctp_ep, stcb, lnet);
1653 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1654 		}
1655 		break;
1656 	case SCTP_TIMER_TYPE_COOKIE:
1657 		if ((stcb == NULL) || (inp == NULL)) {
1658 			break;
1659 		}
1660 		if (sctp_cookie_timer(inp, stcb, net)) {
1661 			/* no need to unlock on tcb its gone */
1662 			goto out_decr;
1663 		}
1664 		SCTP_STAT_INCR(sctps_timocookie);
1665 		stcb->asoc.timocookie++;
1666 #ifdef SCTP_AUDITING_ENABLED
1667 		sctp_auditing(4, inp, stcb, net);
1668 #endif
1669 		/*
1670 		 * We consider T3 and Cookie timer pretty much the same with
1671 		 * respect to where from in chunk_output.
1672 		 */
1673 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1674 		break;
1675 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1676 		{
1677 			struct timeval tv;
1678 			int i, secret;
1679 
1680 			if (inp == NULL) {
1681 				break;
1682 			}
1683 			SCTP_STAT_INCR(sctps_timosecret);
1684 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1685 			SCTP_INP_WLOCK(inp);
1686 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1687 			inp->sctp_ep.last_secret_number =
1688 			    inp->sctp_ep.current_secret_number;
1689 			inp->sctp_ep.current_secret_number++;
1690 			if (inp->sctp_ep.current_secret_number >=
1691 			    SCTP_HOW_MANY_SECRETS) {
1692 				inp->sctp_ep.current_secret_number = 0;
1693 			}
1694 			secret = (int)inp->sctp_ep.current_secret_number;
1695 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1696 				inp->sctp_ep.secret_key[secret][i] =
1697 				    sctp_select_initial_TSN(&inp->sctp_ep);
1698 			}
1699 			SCTP_INP_WUNLOCK(inp);
1700 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1701 		}
1702 		did_output = 0;
1703 		break;
1704 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1705 		if ((stcb == NULL) || (inp == NULL)) {
1706 			break;
1707 		}
1708 		SCTP_STAT_INCR(sctps_timopathmtu);
1709 		sctp_pathmtu_timer(inp, stcb, net);
1710 		did_output = 0;
1711 		break;
1712 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1713 		if ((stcb == NULL) || (inp == NULL)) {
1714 			break;
1715 		}
1716 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1717 			/* no need to unlock on tcb its gone */
1718 			goto out_decr;
1719 		}
1720 		SCTP_STAT_INCR(sctps_timoshutdownack);
1721 		stcb->asoc.timoshutdownack++;
1722 #ifdef SCTP_AUDITING_ENABLED
1723 		sctp_auditing(4, inp, stcb, net);
1724 #endif
1725 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1726 		break;
1727 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1728 		if ((stcb == NULL) || (inp == NULL)) {
1729 			break;
1730 		}
1731 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1732 		sctp_abort_an_association(inp, stcb,
1733 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1734 		/* no need to unlock on tcb its gone */
1735 		goto out_decr;
1736 
1737 	case SCTP_TIMER_TYPE_STRRESET:
1738 		if ((stcb == NULL) || (inp == NULL)) {
1739 			break;
1740 		}
1741 		if (sctp_strreset_timer(inp, stcb, net)) {
1742 			/* no need to unlock on tcb its gone */
1743 			goto out_decr;
1744 		}
1745 		SCTP_STAT_INCR(sctps_timostrmrst);
1746 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1747 		break;
1748 	case SCTP_TIMER_TYPE_EARLYFR:
1749 		/* Need to do FR of things for net */
1750 		if ((stcb == NULL) || (inp == NULL)) {
1751 			break;
1752 		}
1753 		SCTP_STAT_INCR(sctps_timoearlyfr);
1754 		sctp_early_fr_timer(inp, stcb, net);
1755 		break;
1756 	case SCTP_TIMER_TYPE_ASCONF:
1757 		if ((stcb == NULL) || (inp == NULL)) {
1758 			break;
1759 		}
1760 		if (sctp_asconf_timer(inp, stcb, net)) {
1761 			/* no need to unlock on tcb its gone */
1762 			goto out_decr;
1763 		}
1764 		SCTP_STAT_INCR(sctps_timoasconf);
1765 #ifdef SCTP_AUDITING_ENABLED
1766 		sctp_auditing(4, inp, stcb, net);
1767 #endif
1768 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1769 		break;
1770 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1771 		if ((stcb == NULL) || (inp == NULL)) {
1772 			break;
1773 		}
1774 		sctp_delete_prim_timer(inp, stcb, net);
1775 		SCTP_STAT_INCR(sctps_timodelprim);
1776 		break;
1777 
1778 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1779 		if ((stcb == NULL) || (inp == NULL)) {
1780 			break;
1781 		}
1782 		SCTP_STAT_INCR(sctps_timoautoclose);
1783 		sctp_autoclose_timer(inp, stcb, net);
1784 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1785 		did_output = 0;
1786 		break;
1787 	case SCTP_TIMER_TYPE_ASOCKILL:
1788 		if ((stcb == NULL) || (inp == NULL)) {
1789 			break;
1790 		}
1791 		SCTP_STAT_INCR(sctps_timoassockill);
1792 		/* Can we free it yet? */
1793 		SCTP_INP_DECR_REF(inp);
1794 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1795 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1796 		so = SCTP_INP_SO(inp);
1797 		atomic_add_int(&stcb->asoc.refcnt, 1);
1798 		SCTP_TCB_UNLOCK(stcb);
1799 		SCTP_SOCKET_LOCK(so, 1);
1800 		SCTP_TCB_LOCK(stcb);
1801 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1802 #endif
1803 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1804 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1805 		SCTP_SOCKET_UNLOCK(so, 1);
1806 #endif
1807 		/*
1808 		 * free asoc, always unlocks (or destroy's) so prevent
1809 		 * duplicate unlock or unlock of a free mtx :-0
1810 		 */
1811 		stcb = NULL;
1812 		goto out_no_decr;
1813 	case SCTP_TIMER_TYPE_INPKILL:
1814 		SCTP_STAT_INCR(sctps_timoinpkill);
1815 		if (inp == NULL) {
1816 			break;
1817 		}
1818 		/*
1819 		 * special case, take away our increment since WE are the
1820 		 * killer
1821 		 */
1822 		SCTP_INP_DECR_REF(inp);
1823 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1824 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1825 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1826 		goto out_no_decr;
1827 	default:
1828 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1829 		    tmr->type);
1830 		break;
1831 	};
1832 #ifdef SCTP_AUDITING_ENABLED
1833 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1834 	if (inp)
1835 		sctp_auditing(5, inp, stcb, net);
1836 #endif
1837 	if ((did_output) && stcb) {
1838 		/*
1839 		 * Now we need to clean up the control chunk chain if an
1840 		 * ECNE is on it. It must be marked as UNSENT again so next
1841 		 * call will continue to send it until such time that we get
1842 		 * a CWR, to remove it. It is, however, less likely that we
1843 		 * will find a ecn echo on the chain though.
1844 		 */
1845 		sctp_fix_ecn_echo(&stcb->asoc);
1846 	}
1847 get_out:
1848 	if (stcb) {
1849 		SCTP_TCB_UNLOCK(stcb);
1850 	}
1851 out_decr:
1852 	if (inp) {
1853 		SCTP_INP_DECR_REF(inp);
1854 	}
1855 out_no_decr:
1856 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1857 	    tmr->type);
1858 	if (inp) {
1859 	}
1860 }
1861 
1862 void
1863 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1864     struct sctp_nets *net)
1865 {
1866 	int to_ticks;
1867 	struct sctp_timer *tmr;
1868 
1869 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1870 		return;
1871 
1872 	to_ticks = 0;
1873 
1874 	tmr = NULL;
1875 	if (stcb) {
1876 		SCTP_TCB_LOCK_ASSERT(stcb);
1877 	}
1878 	switch (t_type) {
1879 	case SCTP_TIMER_TYPE_ZERO_COPY:
1880 		tmr = &inp->sctp_ep.zero_copy_timer;
1881 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1882 		break;
1883 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1884 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1885 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1886 		break;
1887 	case SCTP_TIMER_TYPE_ADDR_WQ:
1888 		/* Only 1 tick away :-) */
1889 		tmr = &sctppcbinfo.addr_wq_timer;
1890 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1891 		break;
1892 	case SCTP_TIMER_TYPE_ITERATOR:
1893 		{
1894 			struct sctp_iterator *it;
1895 
1896 			it = (struct sctp_iterator *)inp;
1897 			tmr = &it->tmr;
1898 			to_ticks = SCTP_ITERATOR_TICKS;
1899 		}
1900 		break;
1901 	case SCTP_TIMER_TYPE_SEND:
1902 		/* Here we use the RTO timer */
1903 		{
1904 			int rto_val;
1905 
1906 			if ((stcb == NULL) || (net == NULL)) {
1907 				return;
1908 			}
1909 			tmr = &net->rxt_timer;
1910 			if (net->RTO == 0) {
1911 				rto_val = stcb->asoc.initial_rto;
1912 			} else {
1913 				rto_val = net->RTO;
1914 			}
1915 			to_ticks = MSEC_TO_TICKS(rto_val);
1916 		}
1917 		break;
1918 	case SCTP_TIMER_TYPE_INIT:
1919 		/*
1920 		 * Here we use the INIT timer default usually about 1
1921 		 * minute.
1922 		 */
1923 		if ((stcb == NULL) || (net == NULL)) {
1924 			return;
1925 		}
1926 		tmr = &net->rxt_timer;
1927 		if (net->RTO == 0) {
1928 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1929 		} else {
1930 			to_ticks = MSEC_TO_TICKS(net->RTO);
1931 		}
1932 		break;
1933 	case SCTP_TIMER_TYPE_RECV:
1934 		/*
1935 		 * Here we use the Delayed-Ack timer value from the inp
1936 		 * ususually about 200ms.
1937 		 */
1938 		if (stcb == NULL) {
1939 			return;
1940 		}
1941 		tmr = &stcb->asoc.dack_timer;
1942 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1943 		break;
1944 	case SCTP_TIMER_TYPE_SHUTDOWN:
1945 		/* Here we use the RTO of the destination. */
1946 		if ((stcb == NULL) || (net == NULL)) {
1947 			return;
1948 		}
1949 		if (net->RTO == 0) {
1950 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1951 		} else {
1952 			to_ticks = MSEC_TO_TICKS(net->RTO);
1953 		}
1954 		tmr = &net->rxt_timer;
1955 		break;
1956 	case SCTP_TIMER_TYPE_HEARTBEAT:
1957 		/*
1958 		 * the net is used here so that we can add in the RTO. Even
1959 		 * though we use a different timer. We also add the HB timer
1960 		 * PLUS a random jitter.
1961 		 */
1962 		if ((inp == NULL) || (stcb == NULL)) {
1963 			return;
1964 		} else {
1965 			uint32_t rndval;
1966 			uint8_t this_random;
1967 			int cnt_of_unconf = 0;
1968 			struct sctp_nets *lnet;
1969 
1970 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1971 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1972 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1973 					cnt_of_unconf++;
1974 				}
1975 			}
1976 			if (cnt_of_unconf) {
1977 				net = lnet = NULL;
1978 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
1979 			}
1980 			if (stcb->asoc.hb_random_idx > 3) {
1981 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1982 				memcpy(stcb->asoc.hb_random_values, &rndval,
1983 				    sizeof(stcb->asoc.hb_random_values));
1984 				stcb->asoc.hb_random_idx = 0;
1985 			}
1986 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1987 			stcb->asoc.hb_random_idx++;
1988 			stcb->asoc.hb_ect_randombit = 0;
1989 			/*
1990 			 * this_random will be 0 - 256 ms RTO is in ms.
1991 			 */
1992 			if ((stcb->asoc.hb_is_disabled) &&
1993 			    (cnt_of_unconf == 0)) {
1994 				return;
1995 			}
1996 			if (net) {
1997 				int delay;
1998 
1999 				delay = stcb->asoc.heart_beat_delay;
2000 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2001 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2002 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2003 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2004 						delay = 0;
2005 					}
2006 				}
2007 				if (net->RTO == 0) {
2008 					/* Never been checked */
2009 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2010 				} else {
2011 					/* set rto_val to the ms */
2012 					to_ticks = delay + net->RTO + this_random;
2013 				}
2014 			} else {
2015 				if (cnt_of_unconf) {
2016 					to_ticks = this_random + stcb->asoc.initial_rto;
2017 				} else {
2018 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2019 				}
2020 			}
2021 			/*
2022 			 * Now we must convert the to_ticks that are now in
2023 			 * ms to ticks.
2024 			 */
2025 			to_ticks = MSEC_TO_TICKS(to_ticks);
2026 			tmr = &stcb->asoc.hb_timer;
2027 		}
2028 		break;
2029 	case SCTP_TIMER_TYPE_COOKIE:
2030 		/*
2031 		 * Here we can use the RTO timer from the network since one
2032 		 * RTT was compelete. If a retran happened then we will be
2033 		 * using the RTO initial value.
2034 		 */
2035 		if ((stcb == NULL) || (net == NULL)) {
2036 			return;
2037 		}
2038 		if (net->RTO == 0) {
2039 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2040 		} else {
2041 			to_ticks = MSEC_TO_TICKS(net->RTO);
2042 		}
2043 		tmr = &net->rxt_timer;
2044 		break;
2045 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2046 		/*
2047 		 * nothing needed but the endpoint here ususually about 60
2048 		 * minutes.
2049 		 */
2050 		if (inp == NULL) {
2051 			return;
2052 		}
2053 		tmr = &inp->sctp_ep.signature_change;
2054 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2055 		break;
2056 	case SCTP_TIMER_TYPE_ASOCKILL:
2057 		if (stcb == NULL) {
2058 			return;
2059 		}
2060 		tmr = &stcb->asoc.strreset_timer;
2061 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2062 		break;
2063 	case SCTP_TIMER_TYPE_INPKILL:
2064 		/*
2065 		 * The inp is setup to die. We re-use the signature_chage
2066 		 * timer since that has stopped and we are in the GONE
2067 		 * state.
2068 		 */
2069 		if (inp == NULL) {
2070 			return;
2071 		}
2072 		tmr = &inp->sctp_ep.signature_change;
2073 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2074 		break;
2075 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2076 		/*
2077 		 * Here we use the value found in the EP for PMTU ususually
2078 		 * about 10 minutes.
2079 		 */
2080 		if ((stcb == NULL) || (inp == NULL)) {
2081 			return;
2082 		}
2083 		if (net == NULL) {
2084 			return;
2085 		}
2086 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2087 		tmr = &net->pmtu_timer;
2088 		break;
2089 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2090 		/* Here we use the RTO of the destination */
2091 		if ((stcb == NULL) || (net == NULL)) {
2092 			return;
2093 		}
2094 		if (net->RTO == 0) {
2095 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2096 		} else {
2097 			to_ticks = MSEC_TO_TICKS(net->RTO);
2098 		}
2099 		tmr = &net->rxt_timer;
2100 		break;
2101 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2102 		/*
2103 		 * Here we use the endpoints shutdown guard timer usually
2104 		 * about 3 minutes.
2105 		 */
2106 		if ((inp == NULL) || (stcb == NULL)) {
2107 			return;
2108 		}
2109 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2110 		tmr = &stcb->asoc.shut_guard_timer;
2111 		break;
2112 	case SCTP_TIMER_TYPE_STRRESET:
2113 		/*
2114 		 * Here the timer comes from the stcb but its value is from
2115 		 * the net's RTO.
2116 		 */
2117 		if ((stcb == NULL) || (net == NULL)) {
2118 			return;
2119 		}
2120 		if (net->RTO == 0) {
2121 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2122 		} else {
2123 			to_ticks = MSEC_TO_TICKS(net->RTO);
2124 		}
2125 		tmr = &stcb->asoc.strreset_timer;
2126 		break;
2127 
2128 	case SCTP_TIMER_TYPE_EARLYFR:
2129 		{
2130 			unsigned int msec;
2131 
2132 			if ((stcb == NULL) || (net == NULL)) {
2133 				return;
2134 			}
2135 			if (net->flight_size > net->cwnd) {
2136 				/* no need to start */
2137 				return;
2138 			}
2139 			SCTP_STAT_INCR(sctps_earlyfrstart);
2140 			if (net->lastsa == 0) {
2141 				/* Hmm no rtt estimate yet? */
2142 				msec = stcb->asoc.initial_rto >> 2;
2143 			} else {
2144 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2145 			}
2146 			if (msec < sctp_early_fr_msec) {
2147 				msec = sctp_early_fr_msec;
2148 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2149 					msec = SCTP_MINFR_MSEC_FLOOR;
2150 				}
2151 			}
2152 			to_ticks = MSEC_TO_TICKS(msec);
2153 			tmr = &net->fr_timer;
2154 		}
2155 		break;
2156 	case SCTP_TIMER_TYPE_ASCONF:
2157 		/*
2158 		 * Here the timer comes from the stcb but its value is from
2159 		 * the net's RTO.
2160 		 */
2161 		if ((stcb == NULL) || (net == NULL)) {
2162 			return;
2163 		}
2164 		if (net->RTO == 0) {
2165 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2166 		} else {
2167 			to_ticks = MSEC_TO_TICKS(net->RTO);
2168 		}
2169 		tmr = &stcb->asoc.asconf_timer;
2170 		break;
2171 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2172 		if ((stcb == NULL) || (net != NULL)) {
2173 			return;
2174 		}
2175 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2176 		tmr = &stcb->asoc.delete_prim_timer;
2177 		break;
2178 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2179 		if (stcb == NULL) {
2180 			return;
2181 		}
2182 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2183 			/*
2184 			 * Really an error since stcb is NOT set to
2185 			 * autoclose
2186 			 */
2187 			return;
2188 		}
2189 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2190 		tmr = &stcb->asoc.autoclose_timer;
2191 		break;
2192 	default:
2193 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2194 		    __FUNCTION__, t_type);
2195 		return;
2196 		break;
2197 	};
2198 	if ((to_ticks <= 0) || (tmr == NULL)) {
2199 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2200 		    __FUNCTION__, t_type, to_ticks, tmr);
2201 		return;
2202 	}
2203 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2204 		/*
2205 		 * we do NOT allow you to have it already running. if it is
2206 		 * we leave the current one up unchanged
2207 		 */
2208 		return;
2209 	}
2210 	/* At this point we can proceed */
2211 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2212 		stcb->asoc.num_send_timers_up++;
2213 	}
2214 	tmr->stopped_from = 0;
2215 	tmr->type = t_type;
2216 	tmr->ep = (void *)inp;
2217 	tmr->tcb = (void *)stcb;
2218 	tmr->net = (void *)net;
2219 	tmr->self = (void *)tmr;
2220 	tmr->ticks = sctp_get_tick_count();
2221 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2222 	return;
2223 }
2224 
2225 void
2226 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2227     struct sctp_nets *net, uint32_t from)
2228 {
2229 	struct sctp_timer *tmr;
2230 
2231 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2232 	    (inp == NULL))
2233 		return;
2234 
2235 	tmr = NULL;
2236 	if (stcb) {
2237 		SCTP_TCB_LOCK_ASSERT(stcb);
2238 	}
2239 	switch (t_type) {
2240 	case SCTP_TIMER_TYPE_ZERO_COPY:
2241 		tmr = &inp->sctp_ep.zero_copy_timer;
2242 		break;
2243 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2244 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2245 		break;
2246 	case SCTP_TIMER_TYPE_ADDR_WQ:
2247 		tmr = &sctppcbinfo.addr_wq_timer;
2248 		break;
2249 	case SCTP_TIMER_TYPE_EARLYFR:
2250 		if ((stcb == NULL) || (net == NULL)) {
2251 			return;
2252 		}
2253 		tmr = &net->fr_timer;
2254 		SCTP_STAT_INCR(sctps_earlyfrstop);
2255 		break;
2256 	case SCTP_TIMER_TYPE_ITERATOR:
2257 		{
2258 			struct sctp_iterator *it;
2259 
2260 			it = (struct sctp_iterator *)inp;
2261 			tmr = &it->tmr;
2262 		}
2263 		break;
2264 	case SCTP_TIMER_TYPE_SEND:
2265 		if ((stcb == NULL) || (net == NULL)) {
2266 			return;
2267 		}
2268 		tmr = &net->rxt_timer;
2269 		break;
2270 	case SCTP_TIMER_TYPE_INIT:
2271 		if ((stcb == NULL) || (net == NULL)) {
2272 			return;
2273 		}
2274 		tmr = &net->rxt_timer;
2275 		break;
2276 	case SCTP_TIMER_TYPE_RECV:
2277 		if (stcb == NULL) {
2278 			return;
2279 		}
2280 		tmr = &stcb->asoc.dack_timer;
2281 		break;
2282 	case SCTP_TIMER_TYPE_SHUTDOWN:
2283 		if ((stcb == NULL) || (net == NULL)) {
2284 			return;
2285 		}
2286 		tmr = &net->rxt_timer;
2287 		break;
2288 	case SCTP_TIMER_TYPE_HEARTBEAT:
2289 		if (stcb == NULL) {
2290 			return;
2291 		}
2292 		tmr = &stcb->asoc.hb_timer;
2293 		break;
2294 	case SCTP_TIMER_TYPE_COOKIE:
2295 		if ((stcb == NULL) || (net == NULL)) {
2296 			return;
2297 		}
2298 		tmr = &net->rxt_timer;
2299 		break;
2300 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2301 		/* nothing needed but the endpoint here */
2302 		tmr = &inp->sctp_ep.signature_change;
2303 		/*
2304 		 * We re-use the newcookie timer for the INP kill timer. We
2305 		 * must assure that we do not kill it by accident.
2306 		 */
2307 		break;
2308 	case SCTP_TIMER_TYPE_ASOCKILL:
2309 		/*
2310 		 * Stop the asoc kill timer.
2311 		 */
2312 		if (stcb == NULL) {
2313 			return;
2314 		}
2315 		tmr = &stcb->asoc.strreset_timer;
2316 		break;
2317 
2318 	case SCTP_TIMER_TYPE_INPKILL:
2319 		/*
2320 		 * The inp is setup to die. We re-use the signature_chage
2321 		 * timer since that has stopped and we are in the GONE
2322 		 * state.
2323 		 */
2324 		tmr = &inp->sctp_ep.signature_change;
2325 		break;
2326 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2327 		if ((stcb == NULL) || (net == NULL)) {
2328 			return;
2329 		}
2330 		tmr = &net->pmtu_timer;
2331 		break;
2332 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2333 		if ((stcb == NULL) || (net == NULL)) {
2334 			return;
2335 		}
2336 		tmr = &net->rxt_timer;
2337 		break;
2338 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2339 		if (stcb == NULL) {
2340 			return;
2341 		}
2342 		tmr = &stcb->asoc.shut_guard_timer;
2343 		break;
2344 	case SCTP_TIMER_TYPE_STRRESET:
2345 		if (stcb == NULL) {
2346 			return;
2347 		}
2348 		tmr = &stcb->asoc.strreset_timer;
2349 		break;
2350 	case SCTP_TIMER_TYPE_ASCONF:
2351 		if (stcb == NULL) {
2352 			return;
2353 		}
2354 		tmr = &stcb->asoc.asconf_timer;
2355 		break;
2356 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2357 		if (stcb == NULL) {
2358 			return;
2359 		}
2360 		tmr = &stcb->asoc.delete_prim_timer;
2361 		break;
2362 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2363 		if (stcb == NULL) {
2364 			return;
2365 		}
2366 		tmr = &stcb->asoc.autoclose_timer;
2367 		break;
2368 	default:
2369 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2370 		    __FUNCTION__, t_type);
2371 		break;
2372 	};
2373 	if (tmr == NULL) {
2374 		return;
2375 	}
2376 	if ((tmr->type != t_type) && tmr->type) {
2377 		/*
2378 		 * Ok we have a timer that is under joint use. Cookie timer
2379 		 * per chance with the SEND timer. We therefore are NOT
2380 		 * running the timer that the caller wants stopped.  So just
2381 		 * return.
2382 		 */
2383 		return;
2384 	}
2385 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2386 		stcb->asoc.num_send_timers_up--;
2387 		if (stcb->asoc.num_send_timers_up < 0) {
2388 			stcb->asoc.num_send_timers_up = 0;
2389 		}
2390 	}
2391 	tmr->self = NULL;
2392 	tmr->stopped_from = from;
2393 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2394 	return;
2395 }
2396 
2397 #ifdef SCTP_USE_ADLER32
2398 static uint32_t
2399 update_adler32(uint32_t adler, uint8_t * buf, int32_t len)
2400 {
2401 	uint32_t s1 = adler & 0xffff;
2402 	uint32_t s2 = (adler >> 16) & 0xffff;
2403 	int n;
2404 
2405 	for (n = 0; n < len; n++, buf++) {
2406 		/* s1 = (s1 + buf[n]) % BASE */
2407 		/* first we add */
2408 		s1 = (s1 + *buf);
2409 		/*
2410 		 * now if we need to, we do a mod by subtracting. It seems a
2411 		 * bit faster since I really will only ever do one subtract
2412 		 * at the MOST, since buf[n] is a max of 255.
2413 		 */
2414 		if (s1 >= SCTP_ADLER32_BASE) {
2415 			s1 -= SCTP_ADLER32_BASE;
2416 		}
2417 		/* s2 = (s2 + s1) % BASE */
2418 		/* first we add */
2419 		s2 = (s2 + s1);
2420 		/*
2421 		 * again, it is more efficent (it seems) to subtract since
2422 		 * the most s2 will ever be is (BASE-1 + BASE-1) in the
2423 		 * worse case. This would then be (2 * BASE) - 2, which will
2424 		 * still only do one subtract. On Intel this is much better
2425 		 * to do this way and avoid the divide. Have not -pg'd on
2426 		 * sparc.
2427 		 */
2428 		if (s2 >= SCTP_ADLER32_BASE) {
2429 			s2 -= SCTP_ADLER32_BASE;
2430 		}
2431 	}
2432 	/* Return the adler32 of the bytes buf[0..len-1] */
2433 	return ((s2 << 16) + s1);
2434 }
2435 
2436 #endif
2437 
2438 
2439 uint32_t
2440 sctp_calculate_len(struct mbuf *m)
2441 {
2442 	uint32_t tlen = 0;
2443 	struct mbuf *at;
2444 
2445 	at = m;
2446 	while (at) {
2447 		tlen += SCTP_BUF_LEN(at);
2448 		at = SCTP_BUF_NEXT(at);
2449 	}
2450 	return (tlen);
2451 }
2452 
2453 #if defined(SCTP_WITH_NO_CSUM)
2454 
2455 uint32_t
2456 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2457 {
2458 	/*
2459 	 * given a mbuf chain with a packetheader offset by 'offset'
2460 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2461 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2462 	 * has a side bonus as it will calculate the total length of the
2463 	 * mbuf chain. Note: if offset is greater than the total mbuf
2464 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2465 	 */
2466 	if (pktlen == NULL)
2467 		return (0);
2468 	*pktlen = sctp_calculate_len(m);
2469 	return (0);
2470 }
2471 
2472 #elif defined(SCTP_USE_INCHKSUM)
2473 
2474 #include <machine/in_cksum.h>
2475 
2476 uint32_t
2477 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2478 {
2479 	/*
2480 	 * given a mbuf chain with a packetheader offset by 'offset'
2481 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2482 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2483 	 * has a side bonus as it will calculate the total length of the
2484 	 * mbuf chain. Note: if offset is greater than the total mbuf
2485 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2486 	 */
2487 	int32_t tlen = 0;
2488 	struct mbuf *at;
2489 	uint32_t the_sum, retsum;
2490 
2491 	at = m;
2492 	while (at) {
2493 		tlen += SCTP_BUF_LEN(at);
2494 		at = SCTP_BUF_NEXT(at);
2495 	}
2496 	the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset));
2497 	if (pktlen != NULL)
2498 		*pktlen = (tlen - offset);
2499 	retsum = htons(the_sum);
2500 	return (the_sum);
2501 }
2502 
2503 #else
2504 
2505 uint32_t
2506 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2507 {
2508 	/*
2509 	 * given a mbuf chain with a packetheader offset by 'offset'
2510 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2511 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2512 	 * has a side bonus as it will calculate the total length of the
2513 	 * mbuf chain. Note: if offset is greater than the total mbuf
2514 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2515 	 */
2516 	int32_t tlen = 0;
2517 
2518 #ifdef SCTP_USE_ADLER32
2519 	uint32_t base = 1L;
2520 
2521 #else
2522 	uint32_t base = 0xffffffff;
2523 
2524 #endif
2525 	struct mbuf *at;
2526 
2527 	at = m;
2528 	/* find the correct mbuf and offset into mbuf */
2529 	while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
2530 		offset -= SCTP_BUF_LEN(at);	/* update remaining offset
2531 						 * left */
2532 		at = SCTP_BUF_NEXT(at);
2533 	}
2534 	while (at != NULL) {
2535 		if ((SCTP_BUF_LEN(at) - offset) > 0) {
2536 #ifdef SCTP_USE_ADLER32
2537 			base = update_adler32(base,
2538 			    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2539 			    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2540 #else
2541 			if ((SCTP_BUF_LEN(at) - offset) < 4) {
2542 				/* Use old method if less than 4 bytes */
2543 				base = old_update_crc32(base,
2544 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2545 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2546 			} else {
2547 				base = update_crc32(base,
2548 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2549 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2550 			}
2551 #endif
2552 			tlen += SCTP_BUF_LEN(at) - offset;
2553 			/* we only offset once into the first mbuf */
2554 		}
2555 		if (offset) {
2556 			if (offset < (uint32_t) SCTP_BUF_LEN(at))
2557 				offset = 0;
2558 			else
2559 				offset -= SCTP_BUF_LEN(at);
2560 		}
2561 		at = SCTP_BUF_NEXT(at);
2562 	}
2563 	if (pktlen != NULL) {
2564 		*pktlen = tlen;
2565 	}
2566 #ifdef SCTP_USE_ADLER32
2567 	/* Adler32 */
2568 	base = htonl(base);
2569 #else
2570 	/* CRC-32c */
2571 	base = sctp_csum_finalize(base);
2572 #endif
2573 	return (base);
2574 }
2575 
2576 
2577 #endif
2578 
2579 void
2580 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2581     struct sctp_association *asoc, uint32_t mtu)
2582 {
2583 	/*
2584 	 * Reset the P-MTU size on this association, this involves changing
2585 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2586 	 * allow the DF flag to be cleared.
2587 	 */
2588 	struct sctp_tmit_chunk *chk;
2589 	unsigned int eff_mtu, ovh;
2590 
2591 #ifdef SCTP_PRINT_FOR_B_AND_M
2592 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2593 	    inp, asoc, mtu);
2594 #endif
2595 	asoc->smallest_mtu = mtu;
2596 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2597 		ovh = SCTP_MIN_OVERHEAD;
2598 	} else {
2599 		ovh = SCTP_MIN_V4_OVERHEAD;
2600 	}
2601 	eff_mtu = mtu - ovh;
2602 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2603 
2604 		if (chk->send_size > eff_mtu) {
2605 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2606 		}
2607 	}
2608 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2609 		if (chk->send_size > eff_mtu) {
2610 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2611 		}
2612 	}
2613 }
2614 
2615 
2616 /*
2617  * given an association and starting time of the current RTT period return
2618  * RTO in number of msecs net should point to the current network
2619  */
2620 uint32_t
2621 sctp_calculate_rto(struct sctp_tcb *stcb,
2622     struct sctp_association *asoc,
2623     struct sctp_nets *net,
2624     struct timeval *told,
2625     int safe)
2626 {
2627 	/*-
2628 	 * given an association and the starting time of the current RTT
2629 	 * period (in value1/value2) return RTO in number of msecs.
2630 	 */
2631 	int calc_time = 0;
2632 	int o_calctime;
2633 	uint32_t new_rto = 0;
2634 	int first_measure = 0;
2635 	struct timeval now, then, *old;
2636 
2637 	/* Copy it out for sparc64 */
2638 	if (safe == sctp_align_unsafe_makecopy) {
2639 		old = &then;
2640 		memcpy(&then, told, sizeof(struct timeval));
2641 	} else if (safe == sctp_align_safe_nocopy) {
2642 		old = told;
2643 	} else {
2644 		/* error */
2645 		SCTP_PRINTF("Huh, bad rto calc call\n");
2646 		return (0);
2647 	}
2648 	/************************/
2649 	/* 1. calculate new RTT */
2650 	/************************/
2651 	/* get the current time */
2652 	(void)SCTP_GETTIME_TIMEVAL(&now);
2653 	/* compute the RTT value */
2654 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2655 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2656 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2657 			calc_time += (((u_long)now.tv_usec -
2658 			    (u_long)old->tv_usec) / 1000);
2659 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2660 			/* Borrow 1,000ms from current calculation */
2661 			calc_time -= 1000;
2662 			/* Add in the slop over */
2663 			calc_time += ((int)now.tv_usec / 1000);
2664 			/* Add in the pre-second ms's */
2665 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2666 		}
2667 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2668 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2669 			calc_time = ((u_long)now.tv_usec -
2670 			    (u_long)old->tv_usec) / 1000;
2671 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2672 			/* impossible .. garbage in nothing out */
2673 			goto calc_rto;
2674 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2675 			/*
2676 			 * We have to have 1 usec :-D this must be the
2677 			 * loopback.
2678 			 */
2679 			calc_time = 1;
2680 		} else {
2681 			/* impossible .. garbage in nothing out */
2682 			goto calc_rto;
2683 		}
2684 	} else {
2685 		/* Clock wrapped? */
2686 		goto calc_rto;
2687 	}
2688 	/***************************/
2689 	/* 2. update RTTVAR & SRTT */
2690 	/***************************/
2691 	o_calctime = calc_time;
2692 	/* this is Van Jacobson's integer version */
2693 	if (net->RTO_measured) {
2694 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2695 								 * shift=3 */
2696 		if (sctp_logging_level & SCTP_RTTVAR_LOGGING_ENABLE) {
2697 			rto_logging(net, SCTP_LOG_RTTVAR);
2698 		}
2699 		net->prev_rtt = o_calctime;
2700 		net->lastsa += calc_time;	/* add 7/8th into sa when
2701 						 * shift=3 */
2702 		if (calc_time < 0) {
2703 			calc_time = -calc_time;
2704 		}
2705 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2706 									 * VAR shift=2 */
2707 		net->lastsv += calc_time;
2708 		if (net->lastsv == 0) {
2709 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2710 		}
2711 	} else {
2712 		/* First RTO measurment */
2713 		net->RTO_measured = 1;
2714 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2715 								 * shift=3 */
2716 		net->lastsv = calc_time;
2717 		if (net->lastsv == 0) {
2718 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2719 		}
2720 		first_measure = 1;
2721 		net->prev_rtt = o_calctime;
2722 		if (sctp_logging_level & SCTP_RTTVAR_LOGGING_ENABLE) {
2723 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2724 		}
2725 	}
2726 calc_rto:
2727 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2728 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2729 	    (stcb->asoc.sat_network_lockout == 0)) {
2730 		stcb->asoc.sat_network = 1;
2731 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2732 		stcb->asoc.sat_network = 0;
2733 		stcb->asoc.sat_network_lockout = 1;
2734 	}
2735 	/* bound it, per C6/C7 in Section 5.3.1 */
2736 	if (new_rto < stcb->asoc.minrto) {
2737 		new_rto = stcb->asoc.minrto;
2738 	}
2739 	if (new_rto > stcb->asoc.maxrto) {
2740 		new_rto = stcb->asoc.maxrto;
2741 	}
2742 	/* we are now returning the RTO */
2743 	return (new_rto);
2744 }
2745 
2746 /*
2747  * return a pointer to a contiguous piece of data from the given mbuf chain
2748  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2749  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2750  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2751  */
2752 caddr_t
2753 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2754 {
2755 	uint32_t count;
2756 	uint8_t *ptr;
2757 
2758 	ptr = in_ptr;
2759 	if ((off < 0) || (len <= 0))
2760 		return (NULL);
2761 
2762 	/* find the desired start location */
2763 	while ((m != NULL) && (off > 0)) {
2764 		if (off < SCTP_BUF_LEN(m))
2765 			break;
2766 		off -= SCTP_BUF_LEN(m);
2767 		m = SCTP_BUF_NEXT(m);
2768 	}
2769 	if (m == NULL)
2770 		return (NULL);
2771 
2772 	/* is the current mbuf large enough (eg. contiguous)? */
2773 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2774 		return (mtod(m, caddr_t)+off);
2775 	} else {
2776 		/* else, it spans more than one mbuf, so save a temp copy... */
2777 		while ((m != NULL) && (len > 0)) {
2778 			count = min(SCTP_BUF_LEN(m) - off, len);
2779 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2780 			len -= count;
2781 			ptr += count;
2782 			off = 0;
2783 			m = SCTP_BUF_NEXT(m);
2784 		}
2785 		if ((m == NULL) && (len > 0))
2786 			return (NULL);
2787 		else
2788 			return ((caddr_t)in_ptr);
2789 	}
2790 }
2791 
2792 
2793 
2794 struct sctp_paramhdr *
2795 sctp_get_next_param(struct mbuf *m,
2796     int offset,
2797     struct sctp_paramhdr *pull,
2798     int pull_limit)
2799 {
2800 	/* This just provides a typed signature to Peter's Pull routine */
2801 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2802 	    (uint8_t *) pull));
2803 }
2804 
2805 
2806 int
2807 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2808 {
2809 	/*
2810 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2811 	 * padlen is > 3 this routine will fail.
2812 	 */
2813 	uint8_t *dp;
2814 	int i;
2815 
2816 	if (padlen > 3) {
2817 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2818 		return (ENOBUFS);
2819 	}
2820 	if (M_TRAILINGSPACE(m)) {
2821 		/*
2822 		 * The easy way. We hope the majority of the time we hit
2823 		 * here :)
2824 		 */
2825 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2826 		SCTP_BUF_LEN(m) += padlen;
2827 	} else {
2828 		/* Hard way we must grow the mbuf */
2829 		struct mbuf *tmp;
2830 
2831 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2832 		if (tmp == NULL) {
2833 			/* Out of space GAK! we are in big trouble. */
2834 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2835 			return (ENOSPC);
2836 		}
2837 		/* setup and insert in middle */
2838 		SCTP_BUF_NEXT(tmp) = SCTP_BUF_NEXT(m);
2839 		SCTP_BUF_LEN(tmp) = padlen;
2840 		SCTP_BUF_NEXT(m) = tmp;
2841 		dp = mtod(tmp, uint8_t *);
2842 	}
2843 	/* zero out the pad */
2844 	for (i = 0; i < padlen; i++) {
2845 		*dp = 0;
2846 		dp++;
2847 	}
2848 	return (0);
2849 }
2850 
2851 int
2852 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2853 {
2854 	/* find the last mbuf in chain and pad it */
2855 	struct mbuf *m_at;
2856 
2857 	m_at = m;
2858 	if (last_mbuf) {
2859 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2860 	} else {
2861 		while (m_at) {
2862 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2863 				return (sctp_add_pad_tombuf(m_at, padval));
2864 			}
2865 			m_at = SCTP_BUF_NEXT(m_at);
2866 		}
2867 	}
2868 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2869 	return (EFAULT);
2870 }
2871 
2872 int sctp_asoc_change_wake = 0;
2873 
2874 static void
2875 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2876     uint32_t error, void *data, int so_locked
2877 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2878     SCTP_UNUSED
2879 #endif
2880 )
2881 {
2882 	struct mbuf *m_notify;
2883 	struct sctp_assoc_change *sac;
2884 	struct sctp_queued_to_read *control;
2885 
2886 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2887 	struct socket *so;
2888 
2889 #endif
2890 
2891 	/*
2892 	 * First if we are are going down dump everything we can to the
2893 	 * socket rcv queue.
2894 	 */
2895 
2896 	if ((stcb == NULL) ||
2897 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2898 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2899 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
2900 	    ) {
2901 		/* If the socket is gone we are out of here */
2902 		return;
2903 	}
2904 	/*
2905 	 * For TCP model AND UDP connected sockets we will send an error up
2906 	 * when an ABORT comes in.
2907 	 */
2908 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2909 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2910 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2911 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2912 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2913 			stcb->sctp_socket->so_error = ECONNREFUSED;
2914 		} else {
2915 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2916 			stcb->sctp_socket->so_error = ECONNRESET;
2917 		}
2918 		/* Wake ANY sleepers */
2919 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2920 		so = SCTP_INP_SO(stcb->sctp_ep);
2921 		if (!so_locked) {
2922 			atomic_add_int(&stcb->asoc.refcnt, 1);
2923 			SCTP_TCB_UNLOCK(stcb);
2924 			SCTP_SOCKET_LOCK(so, 1);
2925 			SCTP_TCB_LOCK(stcb);
2926 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2927 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2928 				SCTP_SOCKET_UNLOCK(so, 1);
2929 				return;
2930 			}
2931 		}
2932 #endif
2933 		sorwakeup(stcb->sctp_socket);
2934 		sowwakeup(stcb->sctp_socket);
2935 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2936 		if (!so_locked) {
2937 			SCTP_SOCKET_UNLOCK(so, 1);
2938 		}
2939 #endif
2940 		sctp_asoc_change_wake++;
2941 	}
2942 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2943 		/* event not enabled */
2944 		return;
2945 	}
2946 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2947 	if (m_notify == NULL)
2948 		/* no space left */
2949 		return;
2950 	SCTP_BUF_LEN(m_notify) = 0;
2951 
2952 	sac = mtod(m_notify, struct sctp_assoc_change *);
2953 	sac->sac_type = SCTP_ASSOC_CHANGE;
2954 	sac->sac_flags = 0;
2955 	sac->sac_length = sizeof(struct sctp_assoc_change);
2956 	sac->sac_state = event;
2957 	sac->sac_error = error;
2958 	/* XXX verify these stream counts */
2959 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2960 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2961 	sac->sac_assoc_id = sctp_get_associd(stcb);
2962 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2963 	SCTP_BUF_NEXT(m_notify) = NULL;
2964 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2965 	    0, 0, 0, 0, 0, 0,
2966 	    m_notify);
2967 	if (control == NULL) {
2968 		/* no memory */
2969 		sctp_m_freem(m_notify);
2970 		return;
2971 	}
2972 	control->length = SCTP_BUF_LEN(m_notify);
2973 	/* not that we need this */
2974 	control->tail_mbuf = m_notify;
2975 	control->spec_flags = M_NOTIFICATION;
2976 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2977 	    control,
2978 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
2979 	if (event == SCTP_COMM_LOST) {
2980 		/* Wake up any sleeper */
2981 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2982 		so = SCTP_INP_SO(stcb->sctp_ep);
2983 		if (!so_locked) {
2984 			atomic_add_int(&stcb->asoc.refcnt, 1);
2985 			SCTP_TCB_UNLOCK(stcb);
2986 			SCTP_SOCKET_LOCK(so, 1);
2987 			SCTP_TCB_LOCK(stcb);
2988 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2989 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2990 				SCTP_SOCKET_UNLOCK(so, 1);
2991 				return;
2992 			}
2993 		}
2994 #endif
2995 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2996 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2997 		if (!so_locked) {
2998 			SCTP_SOCKET_UNLOCK(so, 1);
2999 		}
3000 #endif
3001 	}
3002 }
3003 
3004 static void
3005 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3006     struct sockaddr *sa, uint32_t error)
3007 {
3008 	struct mbuf *m_notify;
3009 	struct sctp_paddr_change *spc;
3010 	struct sctp_queued_to_read *control;
3011 
3012 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)))
3013 		/* event not enabled */
3014 		return;
3015 
3016 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
3017 	if (m_notify == NULL)
3018 		return;
3019 	SCTP_BUF_LEN(m_notify) = 0;
3020 	spc = mtod(m_notify, struct sctp_paddr_change *);
3021 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3022 	spc->spc_flags = 0;
3023 	spc->spc_length = sizeof(struct sctp_paddr_change);
3024 	if (sa->sa_family == AF_INET) {
3025 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3026 	} else {
3027 		struct sockaddr_in6 *sin6;
3028 
3029 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3030 
3031 		sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3032 		if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3033 			if (sin6->sin6_scope_id == 0) {
3034 				/* recover scope_id for user */
3035 				(void)sa6_recoverscope(sin6);
3036 			} else {
3037 				/* clear embedded scope_id for user */
3038 				in6_clearscope(&sin6->sin6_addr);
3039 			}
3040 		}
3041 	}
3042 	spc->spc_state = state;
3043 	spc->spc_error = error;
3044 	spc->spc_assoc_id = sctp_get_associd(stcb);
3045 
3046 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3047 	SCTP_BUF_NEXT(m_notify) = NULL;
3048 
3049 	/* append to socket */
3050 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3051 	    0, 0, 0, 0, 0, 0,
3052 	    m_notify);
3053 	if (control == NULL) {
3054 		/* no memory */
3055 		sctp_m_freem(m_notify);
3056 		return;
3057 	}
3058 	control->length = SCTP_BUF_LEN(m_notify);
3059 	control->spec_flags = M_NOTIFICATION;
3060 	/* not that we need this */
3061 	control->tail_mbuf = m_notify;
3062 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3063 	    control,
3064 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3065 }
3066 
3067 
3068 static void
3069 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
3070     struct sctp_tmit_chunk *chk, int so_locked
3071 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3072     SCTP_UNUSED
3073 #endif
3074 )
3075 {
3076 	struct mbuf *m_notify;
3077 	struct sctp_send_failed *ssf;
3078 	struct sctp_queued_to_read *control;
3079 	int length;
3080 
3081 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3082 		/* event not enabled */
3083 		return;
3084 
3085 	length = sizeof(struct sctp_send_failed) + chk->send_size;
3086 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3087 	if (m_notify == NULL)
3088 		/* no space left */
3089 		return;
3090 	SCTP_BUF_LEN(m_notify) = 0;
3091 	ssf = mtod(m_notify, struct sctp_send_failed *);
3092 	ssf->ssf_type = SCTP_SEND_FAILED;
3093 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3094 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3095 	else
3096 		ssf->ssf_flags = SCTP_DATA_SENT;
3097 	ssf->ssf_length = length;
3098 	ssf->ssf_error = error;
3099 	/* not exactly what the user sent in, but should be close :) */
3100 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3101 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3102 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3103 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3104 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3105 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3106 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3107 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3108 	SCTP_BUF_NEXT(m_notify) = chk->data;
3109 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3110 
3111 	/* Steal off the mbuf */
3112 	chk->data = NULL;
3113 	/*
3114 	 * For this case, we check the actual socket buffer, since the assoc
3115 	 * is going away we don't want to overfill the socket buffer for a
3116 	 * non-reader
3117 	 */
3118 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3119 		sctp_m_freem(m_notify);
3120 		return;
3121 	}
3122 	/* append to socket */
3123 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3124 	    0, 0, 0, 0, 0, 0,
3125 	    m_notify);
3126 	if (control == NULL) {
3127 		/* no memory */
3128 		sctp_m_freem(m_notify);
3129 		return;
3130 	}
3131 	control->spec_flags = M_NOTIFICATION;
3132 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3133 	    control,
3134 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3135 }
3136 
3137 
3138 static void
3139 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3140     struct sctp_stream_queue_pending *sp, int so_locked
3141 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3142     SCTP_UNUSED
3143 #endif
3144 )
3145 {
3146 	struct mbuf *m_notify;
3147 	struct sctp_send_failed *ssf;
3148 	struct sctp_queued_to_read *control;
3149 	int length;
3150 
3151 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3152 		/* event not enabled */
3153 		return;
3154 
3155 	length = sizeof(struct sctp_send_failed) + sp->length;
3156 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3157 	if (m_notify == NULL)
3158 		/* no space left */
3159 		return;
3160 	SCTP_BUF_LEN(m_notify) = 0;
3161 	ssf = mtod(m_notify, struct sctp_send_failed *);
3162 	ssf->ssf_type = SCTP_SEND_FAILED;
3163 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3164 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3165 	else
3166 		ssf->ssf_flags = SCTP_DATA_SENT;
3167 	ssf->ssf_length = length;
3168 	ssf->ssf_error = error;
3169 	/* not exactly what the user sent in, but should be close :) */
3170 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3171 	ssf->ssf_info.sinfo_stream = sp->stream;
3172 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3173 	ssf->ssf_info.sinfo_flags = sp->sinfo_flags;
3174 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3175 	ssf->ssf_info.sinfo_context = sp->context;
3176 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3177 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3178 	SCTP_BUF_NEXT(m_notify) = sp->data;
3179 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3180 
3181 	/* Steal off the mbuf */
3182 	sp->data = NULL;
3183 	/*
3184 	 * For this case, we check the actual socket buffer, since the assoc
3185 	 * is going away we don't want to overfill the socket buffer for a
3186 	 * non-reader
3187 	 */
3188 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3189 		sctp_m_freem(m_notify);
3190 		return;
3191 	}
3192 	/* append to socket */
3193 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3194 	    0, 0, 0, 0, 0, 0,
3195 	    m_notify);
3196 	if (control == NULL) {
3197 		/* no memory */
3198 		sctp_m_freem(m_notify);
3199 		return;
3200 	}
3201 	control->spec_flags = M_NOTIFICATION;
3202 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3203 	    control,
3204 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3205 }
3206 
3207 
3208 
3209 static void
3210 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3211     uint32_t error)
3212 {
3213 	struct mbuf *m_notify;
3214 	struct sctp_adaptation_event *sai;
3215 	struct sctp_queued_to_read *control;
3216 
3217 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)))
3218 		/* event not enabled */
3219 		return;
3220 
3221 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3222 	if (m_notify == NULL)
3223 		/* no space left */
3224 		return;
3225 	SCTP_BUF_LEN(m_notify) = 0;
3226 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3227 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3228 	sai->sai_flags = 0;
3229 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3230 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3231 	sai->sai_assoc_id = sctp_get_associd(stcb);
3232 
3233 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3234 	SCTP_BUF_NEXT(m_notify) = NULL;
3235 
3236 	/* append to socket */
3237 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3238 	    0, 0, 0, 0, 0, 0,
3239 	    m_notify);
3240 	if (control == NULL) {
3241 		/* no memory */
3242 		sctp_m_freem(m_notify);
3243 		return;
3244 	}
3245 	control->length = SCTP_BUF_LEN(m_notify);
3246 	control->spec_flags = M_NOTIFICATION;
3247 	/* not that we need this */
3248 	control->tail_mbuf = m_notify;
3249 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3250 	    control,
3251 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3252 }
3253 
3254 /* This always must be called with the read-queue LOCKED in the INP */
3255 void
3256 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3257     int nolock, uint32_t val)
3258 {
3259 	struct mbuf *m_notify;
3260 	struct sctp_pdapi_event *pdapi;
3261 	struct sctp_queued_to_read *control;
3262 	struct sockbuf *sb;
3263 
3264 	if ((stcb == NULL) || (stcb->sctp_socket == NULL) ||
3265 	    sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
3266 		/* event not enabled */
3267 		return;
3268 
3269 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3270 	if (m_notify == NULL)
3271 		/* no space left */
3272 		return;
3273 	SCTP_BUF_LEN(m_notify) = 0;
3274 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3275 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3276 	pdapi->pdapi_flags = 0;
3277 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3278 	pdapi->pdapi_indication = error;
3279 	pdapi->pdapi_stream = (val >> 16);
3280 	pdapi->pdapi_seq = (val & 0x0000ffff);
3281 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3282 
3283 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3284 	SCTP_BUF_NEXT(m_notify) = NULL;
3285 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3286 	    0, 0, 0, 0, 0, 0,
3287 	    m_notify);
3288 	if (control == NULL) {
3289 		/* no memory */
3290 		sctp_m_freem(m_notify);
3291 		return;
3292 	}
3293 	control->spec_flags = M_NOTIFICATION;
3294 	control->length = SCTP_BUF_LEN(m_notify);
3295 	/* not that we need this */
3296 	control->tail_mbuf = m_notify;
3297 	control->held_length = 0;
3298 	control->length = 0;
3299 	if (nolock == 0) {
3300 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
3301 	}
3302 	sb = &stcb->sctp_socket->so_rcv;
3303 	if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
3304 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3305 	}
3306 	sctp_sballoc(stcb, sb, m_notify);
3307 	if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
3308 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3309 	}
3310 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3311 	control->end_added = 1;
3312 	if (stcb->asoc.control_pdapi)
3313 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3314 	else {
3315 		/* we really should not see this case */
3316 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3317 	}
3318 	if (nolock == 0) {
3319 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3320 	}
3321 	if (stcb->sctp_ep && stcb->sctp_socket) {
3322 		/* This should always be the case */
3323 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3324 	}
3325 }
3326 
3327 static void
3328 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3329 {
3330 	struct mbuf *m_notify;
3331 	struct sctp_shutdown_event *sse;
3332 	struct sctp_queued_to_read *control;
3333 
3334 	/*
3335 	 * For TCP model AND UDP connected sockets we will send an error up
3336 	 * when an SHUTDOWN completes
3337 	 */
3338 	if (stcb == NULL) {
3339 		return;
3340 	}
3341 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3342 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3343 		/* mark socket closed for read/write and wakeup! */
3344 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3345 		struct socket *so;
3346 
3347 		so = SCTP_INP_SO(stcb->sctp_ep);
3348 		atomic_add_int(&stcb->asoc.refcnt, 1);
3349 		SCTP_TCB_UNLOCK(stcb);
3350 		SCTP_SOCKET_LOCK(so, 1);
3351 		SCTP_TCB_LOCK(stcb);
3352 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3353 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3354 			SCTP_SOCKET_UNLOCK(so, 1);
3355 			return;
3356 		}
3357 #endif
3358 		socantsendmore(stcb->sctp_socket);
3359 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3360 		SCTP_SOCKET_UNLOCK(so, 1);
3361 #endif
3362 	}
3363 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
3364 		/* event not enabled */
3365 		return;
3366 
3367 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3368 	if (m_notify == NULL)
3369 		/* no space left */
3370 		return;
3371 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3372 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3373 	sse->sse_flags = 0;
3374 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3375 	sse->sse_assoc_id = sctp_get_associd(stcb);
3376 
3377 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3378 	SCTP_BUF_NEXT(m_notify) = NULL;
3379 
3380 	/* append to socket */
3381 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3382 	    0, 0, 0, 0, 0, 0,
3383 	    m_notify);
3384 	if (control == NULL) {
3385 		/* no memory */
3386 		sctp_m_freem(m_notify);
3387 		return;
3388 	}
3389 	control->spec_flags = M_NOTIFICATION;
3390 	control->length = SCTP_BUF_LEN(m_notify);
3391 	/* not that we need this */
3392 	control->tail_mbuf = m_notify;
3393 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3394 	    control,
3395 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3396 }
3397 
3398 static void
3399 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3400     int number_entries, uint16_t * list, int flag)
3401 {
3402 	struct mbuf *m_notify;
3403 	struct sctp_queued_to_read *control;
3404 	struct sctp_stream_reset_event *strreset;
3405 	int len;
3406 
3407 	if (stcb == NULL) {
3408 		return;
3409 	}
3410 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
3411 		/* event not enabled */
3412 		return;
3413 
3414 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3415 	if (m_notify == NULL)
3416 		/* no space left */
3417 		return;
3418 	SCTP_BUF_LEN(m_notify) = 0;
3419 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3420 	if (len > M_TRAILINGSPACE(m_notify)) {
3421 		/* never enough room */
3422 		sctp_m_freem(m_notify);
3423 		return;
3424 	}
3425 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3426 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3427 	if (number_entries == 0) {
3428 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3429 	} else {
3430 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3431 	}
3432 	strreset->strreset_length = len;
3433 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3434 	if (number_entries) {
3435 		int i;
3436 
3437 		for (i = 0; i < number_entries; i++) {
3438 			strreset->strreset_list[i] = ntohs(list[i]);
3439 		}
3440 	}
3441 	SCTP_BUF_LEN(m_notify) = len;
3442 	SCTP_BUF_NEXT(m_notify) = NULL;
3443 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3444 		/* no space */
3445 		sctp_m_freem(m_notify);
3446 		return;
3447 	}
3448 	/* append to socket */
3449 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3450 	    0, 0, 0, 0, 0, 0,
3451 	    m_notify);
3452 	if (control == NULL) {
3453 		/* no memory */
3454 		sctp_m_freem(m_notify);
3455 		return;
3456 	}
3457 	control->spec_flags = M_NOTIFICATION;
3458 	control->length = SCTP_BUF_LEN(m_notify);
3459 	/* not that we need this */
3460 	control->tail_mbuf = m_notify;
3461 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3462 	    control,
3463 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3464 }
3465 
3466 
3467 void
3468 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3469     uint32_t error, void *data, int so_locked
3470 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3471     SCTP_UNUSED
3472 #endif
3473 )
3474 {
3475 	if (stcb == NULL) {
3476 		/* unlikely but */
3477 		return;
3478 	}
3479 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3480 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3481 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
3482 	    ) {
3483 		/* No notifications up when we are in a no socket state */
3484 		return;
3485 	}
3486 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3487 		/* Can't send up to a closed socket any notifications */
3488 		return;
3489 	}
3490 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3491 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3492 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3493 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3494 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3495 			/* Don't report these in front states */
3496 			return;
3497 		}
3498 	}
3499 	switch (notification) {
3500 	case SCTP_NOTIFY_ASSOC_UP:
3501 		if (stcb->asoc.assoc_up_sent == 0) {
3502 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3503 			stcb->asoc.assoc_up_sent = 1;
3504 		}
3505 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3506 			sctp_notify_adaptation_layer(stcb, error);
3507 		}
3508 		break;
3509 	case SCTP_NOTIFY_ASSOC_DOWN:
3510 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3511 		break;
3512 	case SCTP_NOTIFY_INTERFACE_DOWN:
3513 		{
3514 			struct sctp_nets *net;
3515 
3516 			net = (struct sctp_nets *)data;
3517 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3518 			    (struct sockaddr *)&net->ro._l_addr, error);
3519 			break;
3520 		}
3521 	case SCTP_NOTIFY_INTERFACE_UP:
3522 		{
3523 			struct sctp_nets *net;
3524 
3525 			net = (struct sctp_nets *)data;
3526 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3527 			    (struct sockaddr *)&net->ro._l_addr, error);
3528 			break;
3529 		}
3530 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3531 		{
3532 			struct sctp_nets *net;
3533 
3534 			net = (struct sctp_nets *)data;
3535 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3536 			    (struct sockaddr *)&net->ro._l_addr, error);
3537 			break;
3538 		}
3539 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3540 		sctp_notify_send_failed2(stcb, error,
3541 		    (struct sctp_stream_queue_pending *)data, so_locked);
3542 		break;
3543 	case SCTP_NOTIFY_DG_FAIL:
3544 		sctp_notify_send_failed(stcb, error,
3545 		    (struct sctp_tmit_chunk *)data, so_locked);
3546 		break;
3547 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3548 		{
3549 			uint32_t val;
3550 
3551 			val = *((uint32_t *) data);
3552 
3553 			sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3554 		}
3555 		break;
3556 	case SCTP_NOTIFY_STRDATA_ERR:
3557 		break;
3558 	case SCTP_NOTIFY_ASSOC_ABORTED:
3559 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3560 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3561 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3562 		} else {
3563 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3564 		}
3565 		break;
3566 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3567 		break;
3568 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3569 		break;
3570 	case SCTP_NOTIFY_ASSOC_RESTART:
3571 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3572 		break;
3573 	case SCTP_NOTIFY_HB_RESP:
3574 		break;
3575 	case SCTP_NOTIFY_STR_RESET_SEND:
3576 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3577 		break;
3578 	case SCTP_NOTIFY_STR_RESET_RECV:
3579 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3580 		break;
3581 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3582 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3583 		break;
3584 
3585 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3586 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3587 		break;
3588 
3589 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3590 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3591 		    error);
3592 		break;
3593 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3594 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3595 		    error);
3596 		break;
3597 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3598 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3599 		    error);
3600 		break;
3601 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3602 		break;
3603 	case SCTP_NOTIFY_ASCONF_FAILED:
3604 		break;
3605 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3606 		sctp_notify_shutdown_event(stcb);
3607 		break;
3608 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3609 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3610 		    (uint16_t) (uintptr_t) data);
3611 		break;
3612 #if 0
3613 	case SCTP_NOTIFY_AUTH_KEY_CONFLICT:
3614 		sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT,
3615 		    error, (uint16_t) (uintptr_t) data);
3616 		break;
3617 #endif				/* not yet? remove? */
3618 
3619 
3620 	default:
3621 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3622 		    __FUNCTION__, notification, notification);
3623 		break;
3624 	}			/* end switch */
3625 }
3626 
3627 void
3628 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3629 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3630     SCTP_UNUSED
3631 #endif
3632 )
3633 {
3634 	struct sctp_association *asoc;
3635 	struct sctp_stream_out *outs;
3636 	struct sctp_tmit_chunk *chk;
3637 	struct sctp_stream_queue_pending *sp;
3638 	int i;
3639 
3640 	asoc = &stcb->asoc;
3641 
3642 	if (stcb == NULL) {
3643 		return;
3644 	}
3645 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3646 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3647 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3648 		return;
3649 	}
3650 	/* now through all the gunk freeing chunks */
3651 	if (holds_lock == 0) {
3652 		SCTP_TCB_SEND_LOCK(stcb);
3653 	}
3654 	/* sent queue SHOULD be empty */
3655 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3656 		chk = TAILQ_FIRST(&asoc->sent_queue);
3657 		while (chk) {
3658 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3659 			asoc->sent_queue_cnt--;
3660 			if (chk->data) {
3661 				/*
3662 				 * trim off the sctp chunk header(it should
3663 				 * be there)
3664 				 */
3665 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3666 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3667 					sctp_mbuf_crush(chk->data);
3668 					chk->send_size -= sizeof(struct sctp_data_chunk);
3669 				}
3670 			}
3671 			sctp_free_bufspace(stcb, asoc, chk, 1);
3672 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3673 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3674 			if (chk->data) {
3675 				sctp_m_freem(chk->data);
3676 				chk->data = NULL;
3677 			}
3678 			sctp_free_a_chunk(stcb, chk);
3679 			/* sa_ignore FREED_MEMORY */
3680 			chk = TAILQ_FIRST(&asoc->sent_queue);
3681 		}
3682 	}
3683 	/* pending send queue SHOULD be empty */
3684 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3685 		chk = TAILQ_FIRST(&asoc->send_queue);
3686 		while (chk) {
3687 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3688 			asoc->send_queue_cnt--;
3689 			if (chk->data) {
3690 				/*
3691 				 * trim off the sctp chunk header(it should
3692 				 * be there)
3693 				 */
3694 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3695 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3696 					sctp_mbuf_crush(chk->data);
3697 					chk->send_size -= sizeof(struct sctp_data_chunk);
3698 				}
3699 			}
3700 			sctp_free_bufspace(stcb, asoc, chk, 1);
3701 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3702 			if (chk->data) {
3703 				sctp_m_freem(chk->data);
3704 				chk->data = NULL;
3705 			}
3706 			sctp_free_a_chunk(stcb, chk);
3707 			/* sa_ignore FREED_MEMORY */
3708 			chk = TAILQ_FIRST(&asoc->send_queue);
3709 		}
3710 	}
3711 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3712 		/* For each stream */
3713 		outs = &stcb->asoc.strmout[i];
3714 		/* clean up any sends there */
3715 		stcb->asoc.locked_on_sending = NULL;
3716 		sp = TAILQ_FIRST(&outs->outqueue);
3717 		while (sp) {
3718 			stcb->asoc.stream_queue_cnt--;
3719 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3720 			sctp_free_spbufspace(stcb, asoc, sp);
3721 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3722 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3723 			if (sp->data) {
3724 				sctp_m_freem(sp->data);
3725 				sp->data = NULL;
3726 			}
3727 			if (sp->net)
3728 				sctp_free_remote_addr(sp->net);
3729 			sp->net = NULL;
3730 			/* Free the chunk */
3731 			sctp_free_a_strmoq(stcb, sp);
3732 			/* sa_ignore FREED_MEMORY */
3733 			sp = TAILQ_FIRST(&outs->outqueue);
3734 		}
3735 	}
3736 
3737 	if (holds_lock == 0) {
3738 		SCTP_TCB_SEND_UNLOCK(stcb);
3739 	}
3740 }
3741 
3742 void
3743 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3744 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3745     SCTP_UNUSED
3746 #endif
3747 )
3748 {
3749 
3750 	if (stcb == NULL) {
3751 		return;
3752 	}
3753 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3754 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3755 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3756 		return;
3757 	}
3758 	/* Tell them we lost the asoc */
3759 	sctp_report_all_outbound(stcb, 1, so_locked);
3760 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3761 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3762 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3763 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3764 	}
3765 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3766 }
3767 
3768 void
3769 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3770     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3771     uint32_t vrf_id)
3772 {
3773 	uint32_t vtag;
3774 
3775 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3776 	struct socket *so;
3777 
3778 #endif
3779 
3780 	vtag = 0;
3781 	if (stcb != NULL) {
3782 		/* We have a TCB to abort, send notification too */
3783 		vtag = stcb->asoc.peer_vtag;
3784 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3785 		/* get the assoc vrf id and table id */
3786 		vrf_id = stcb->asoc.vrf_id;
3787 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3788 	}
3789 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id);
3790 	if (stcb != NULL) {
3791 		/* Ok, now lets free it */
3792 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3793 		so = SCTP_INP_SO(inp);
3794 		atomic_add_int(&stcb->asoc.refcnt, 1);
3795 		SCTP_TCB_UNLOCK(stcb);
3796 		SCTP_SOCKET_LOCK(so, 1);
3797 		SCTP_TCB_LOCK(stcb);
3798 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3799 #endif
3800 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3801 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3802 		SCTP_SOCKET_UNLOCK(so, 1);
3803 #endif
3804 	} else {
3805 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3806 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3807 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3808 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3809 			}
3810 		}
3811 	}
3812 }
3813 
3814 #ifdef SCTP_ASOCLOG_OF_TSNS
3815 void
3816 sctp_print_out_track_log(struct sctp_tcb *stcb)
3817 {
3818 #ifdef NOSIY_PRINTS
3819 	int i;
3820 
3821 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3822 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3823 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3824 		SCTP_PRINTF("None rcvd\n");
3825 		goto none_in;
3826 	}
3827 	if (stcb->asoc.tsn_in_wrapped) {
3828 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3829 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3830 			    stcb->asoc.in_tsnlog[i].tsn,
3831 			    stcb->asoc.in_tsnlog[i].strm,
3832 			    stcb->asoc.in_tsnlog[i].seq,
3833 			    stcb->asoc.in_tsnlog[i].flgs,
3834 			    stcb->asoc.in_tsnlog[i].sz);
3835 		}
3836 	}
3837 	if (stcb->asoc.tsn_in_at) {
3838 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3839 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3840 			    stcb->asoc.in_tsnlog[i].tsn,
3841 			    stcb->asoc.in_tsnlog[i].strm,
3842 			    stcb->asoc.in_tsnlog[i].seq,
3843 			    stcb->asoc.in_tsnlog[i].flgs,
3844 			    stcb->asoc.in_tsnlog[i].sz);
3845 		}
3846 	}
3847 none_in:
3848 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3849 	if ((stcb->asoc.tsn_out_at == 0) &&
3850 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3851 		SCTP_PRINTF("None sent\n");
3852 	}
3853 	if (stcb->asoc.tsn_out_wrapped) {
3854 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3855 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3856 			    stcb->asoc.out_tsnlog[i].tsn,
3857 			    stcb->asoc.out_tsnlog[i].strm,
3858 			    stcb->asoc.out_tsnlog[i].seq,
3859 			    stcb->asoc.out_tsnlog[i].flgs,
3860 			    stcb->asoc.out_tsnlog[i].sz);
3861 		}
3862 	}
3863 	if (stcb->asoc.tsn_out_at) {
3864 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3865 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3866 			    stcb->asoc.out_tsnlog[i].tsn,
3867 			    stcb->asoc.out_tsnlog[i].strm,
3868 			    stcb->asoc.out_tsnlog[i].seq,
3869 			    stcb->asoc.out_tsnlog[i].flgs,
3870 			    stcb->asoc.out_tsnlog[i].sz);
3871 		}
3872 	}
3873 #endif
3874 }
3875 
3876 #endif
3877 
3878 void
3879 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3880     int error, struct mbuf *op_err,
3881     int so_locked
3882 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3883     SCTP_UNUSED
3884 #endif
3885 )
3886 {
3887 	uint32_t vtag;
3888 
3889 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3890 	struct socket *so;
3891 
3892 #endif
3893 
3894 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3895 	so = SCTP_INP_SO(inp);
3896 #endif
3897 	if (stcb == NULL) {
3898 		/* Got to have a TCB */
3899 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3900 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3901 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3902 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3903 			}
3904 		}
3905 		return;
3906 	} else {
3907 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3908 	}
3909 	vtag = stcb->asoc.peer_vtag;
3910 	/* notify the ulp */
3911 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3912 		sctp_abort_notification(stcb, error, so_locked);
3913 	/* notify the peer */
3914 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3915 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3916 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3917 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3918 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3919 	}
3920 	/* now free the asoc */
3921 #ifdef SCTP_ASOCLOG_OF_TSNS
3922 	sctp_print_out_track_log(stcb);
3923 #endif
3924 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3925 	if (!so_locked) {
3926 		atomic_add_int(&stcb->asoc.refcnt, 1);
3927 		SCTP_TCB_UNLOCK(stcb);
3928 		SCTP_SOCKET_LOCK(so, 1);
3929 		SCTP_TCB_LOCK(stcb);
3930 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3931 	}
3932 #endif
3933 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3934 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3935 	if (!so_locked) {
3936 		SCTP_SOCKET_UNLOCK(so, 1);
3937 	}
3938 #endif
3939 }
3940 
3941 void
3942 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3943     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id)
3944 {
3945 	struct sctp_chunkhdr *ch, chunk_buf;
3946 	unsigned int chk_length;
3947 
3948 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3949 	/* Generate a TO address for future reference */
3950 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3951 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3952 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3953 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3954 		}
3955 	}
3956 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3957 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3958 	while (ch != NULL) {
3959 		chk_length = ntohs(ch->chunk_length);
3960 		if (chk_length < sizeof(*ch)) {
3961 			/* break to abort land */
3962 			break;
3963 		}
3964 		switch (ch->chunk_type) {
3965 		case SCTP_COOKIE_ECHO:
3966 			/* We hit here only if the assoc is being freed */
3967 			return;
3968 		case SCTP_PACKET_DROPPED:
3969 			/* we don't respond to pkt-dropped */
3970 			return;
3971 		case SCTP_ABORT_ASSOCIATION:
3972 			/* we don't respond with an ABORT to an ABORT */
3973 			return;
3974 		case SCTP_SHUTDOWN_COMPLETE:
3975 			/*
3976 			 * we ignore it since we are not waiting for it and
3977 			 * peer is gone
3978 			 */
3979 			return;
3980 		case SCTP_SHUTDOWN_ACK:
3981 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id);
3982 			return;
3983 		default:
3984 			break;
3985 		}
3986 		offset += SCTP_SIZE32(chk_length);
3987 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3988 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3989 	}
3990 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id);
3991 }
3992 
3993 /*
3994  * check the inbound datagram to make sure there is not an abort inside it,
3995  * if there is return 1, else return 0.
3996  */
3997 int
3998 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3999 {
4000 	struct sctp_chunkhdr *ch;
4001 	struct sctp_init_chunk *init_chk, chunk_buf;
4002 	int offset;
4003 	unsigned int chk_length;
4004 
4005 	offset = iphlen + sizeof(struct sctphdr);
4006 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4007 	    (uint8_t *) & chunk_buf);
4008 	while (ch != NULL) {
4009 		chk_length = ntohs(ch->chunk_length);
4010 		if (chk_length < sizeof(*ch)) {
4011 			/* packet is probably corrupt */
4012 			break;
4013 		}
4014 		/* we seem to be ok, is it an abort? */
4015 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4016 			/* yep, tell them */
4017 			return (1);
4018 		}
4019 		if (ch->chunk_type == SCTP_INITIATION) {
4020 			/* need to update the Vtag */
4021 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4022 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4023 			if (init_chk != NULL) {
4024 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4025 			}
4026 		}
4027 		/* Nope, move to the next chunk */
4028 		offset += SCTP_SIZE32(chk_length);
4029 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4030 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4031 	}
4032 	return (0);
4033 }
4034 
4035 /*
4036  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4037  * set (i.e. it's 0) so, create this function to compare link local scopes
4038  */
4039 uint32_t
4040 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4041 {
4042 	struct sockaddr_in6 a, b;
4043 
4044 	/* save copies */
4045 	a = *addr1;
4046 	b = *addr2;
4047 
4048 	if (a.sin6_scope_id == 0)
4049 		if (sa6_recoverscope(&a)) {
4050 			/* can't get scope, so can't match */
4051 			return (0);
4052 		}
4053 	if (b.sin6_scope_id == 0)
4054 		if (sa6_recoverscope(&b)) {
4055 			/* can't get scope, so can't match */
4056 			return (0);
4057 		}
4058 	if (a.sin6_scope_id != b.sin6_scope_id)
4059 		return (0);
4060 
4061 	return (1);
4062 }
4063 
4064 /*
4065  * returns a sockaddr_in6 with embedded scope recovered and removed
4066  */
4067 struct sockaddr_in6 *
4068 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4069 {
4070 	/* check and strip embedded scope junk */
4071 	if (addr->sin6_family == AF_INET6) {
4072 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4073 			if (addr->sin6_scope_id == 0) {
4074 				*store = *addr;
4075 				if (!sa6_recoverscope(store)) {
4076 					/* use the recovered scope */
4077 					addr = store;
4078 				}
4079 			} else {
4080 				/* else, return the original "to" addr */
4081 				in6_clearscope(&addr->sin6_addr);
4082 			}
4083 		}
4084 	}
4085 	return (addr);
4086 }
4087 
4088 /*
4089  * are the two addresses the same?  currently a "scopeless" check returns: 1
4090  * if same, 0 if not
4091  */
4092 int
4093 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4094 {
4095 
4096 	/* must be valid */
4097 	if (sa1 == NULL || sa2 == NULL)
4098 		return (0);
4099 
4100 	/* must be the same family */
4101 	if (sa1->sa_family != sa2->sa_family)
4102 		return (0);
4103 
4104 	if (sa1->sa_family == AF_INET6) {
4105 		/* IPv6 addresses */
4106 		struct sockaddr_in6 *sin6_1, *sin6_2;
4107 
4108 		sin6_1 = (struct sockaddr_in6 *)sa1;
4109 		sin6_2 = (struct sockaddr_in6 *)sa2;
4110 		return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
4111 		    &sin6_2->sin6_addr));
4112 	} else if (sa1->sa_family == AF_INET) {
4113 		/* IPv4 addresses */
4114 		struct sockaddr_in *sin_1, *sin_2;
4115 
4116 		sin_1 = (struct sockaddr_in *)sa1;
4117 		sin_2 = (struct sockaddr_in *)sa2;
4118 		return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4119 	} else {
4120 		/* we don't do these... */
4121 		return (0);
4122 	}
4123 }
4124 
4125 void
4126 sctp_print_address(struct sockaddr *sa)
4127 {
4128 	char ip6buf[INET6_ADDRSTRLEN];
4129 
4130 	ip6buf[0] = 0;
4131 	if (sa->sa_family == AF_INET6) {
4132 		struct sockaddr_in6 *sin6;
4133 
4134 		sin6 = (struct sockaddr_in6 *)sa;
4135 		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4136 		    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4137 		    ntohs(sin6->sin6_port),
4138 		    sin6->sin6_scope_id);
4139 	} else if (sa->sa_family == AF_INET) {
4140 		struct sockaddr_in *sin;
4141 		unsigned char *p;
4142 
4143 		sin = (struct sockaddr_in *)sa;
4144 		p = (unsigned char *)&sin->sin_addr;
4145 		SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4146 		    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4147 	} else {
4148 		SCTP_PRINTF("?\n");
4149 	}
4150 }
4151 
4152 void
4153 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4154 {
4155 	if (iph->ip_v == IPVERSION) {
4156 		struct sockaddr_in lsa, fsa;
4157 
4158 		bzero(&lsa, sizeof(lsa));
4159 		lsa.sin_len = sizeof(lsa);
4160 		lsa.sin_family = AF_INET;
4161 		lsa.sin_addr = iph->ip_src;
4162 		lsa.sin_port = sh->src_port;
4163 		bzero(&fsa, sizeof(fsa));
4164 		fsa.sin_len = sizeof(fsa);
4165 		fsa.sin_family = AF_INET;
4166 		fsa.sin_addr = iph->ip_dst;
4167 		fsa.sin_port = sh->dest_port;
4168 		SCTP_PRINTF("src: ");
4169 		sctp_print_address((struct sockaddr *)&lsa);
4170 		SCTP_PRINTF("dest: ");
4171 		sctp_print_address((struct sockaddr *)&fsa);
4172 	} else if (iph->ip_v == (IPV6_VERSION >> 4)) {
4173 		struct ip6_hdr *ip6;
4174 		struct sockaddr_in6 lsa6, fsa6;
4175 
4176 		ip6 = (struct ip6_hdr *)iph;
4177 		bzero(&lsa6, sizeof(lsa6));
4178 		lsa6.sin6_len = sizeof(lsa6);
4179 		lsa6.sin6_family = AF_INET6;
4180 		lsa6.sin6_addr = ip6->ip6_src;
4181 		lsa6.sin6_port = sh->src_port;
4182 		bzero(&fsa6, sizeof(fsa6));
4183 		fsa6.sin6_len = sizeof(fsa6);
4184 		fsa6.sin6_family = AF_INET6;
4185 		fsa6.sin6_addr = ip6->ip6_dst;
4186 		fsa6.sin6_port = sh->dest_port;
4187 		SCTP_PRINTF("src: ");
4188 		sctp_print_address((struct sockaddr *)&lsa6);
4189 		SCTP_PRINTF("dest: ");
4190 		sctp_print_address((struct sockaddr *)&fsa6);
4191 	}
4192 }
4193 
4194 void
4195 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4196     struct sctp_inpcb *new_inp,
4197     struct sctp_tcb *stcb,
4198     int waitflags)
4199 {
4200 	/*
4201 	 * go through our old INP and pull off any control structures that
4202 	 * belong to stcb and move then to the new inp.
4203 	 */
4204 	struct socket *old_so, *new_so;
4205 	struct sctp_queued_to_read *control, *nctl;
4206 	struct sctp_readhead tmp_queue;
4207 	struct mbuf *m;
4208 	int error = 0;
4209 
4210 	old_so = old_inp->sctp_socket;
4211 	new_so = new_inp->sctp_socket;
4212 	TAILQ_INIT(&tmp_queue);
4213 	error = sblock(&old_so->so_rcv, waitflags);
4214 	if (error) {
4215 		/*
4216 		 * Gak, can't get sblock, we have a problem. data will be
4217 		 * left stranded.. and we don't dare look at it since the
4218 		 * other thread may be reading something. Oh well, its a
4219 		 * screwed up app that does a peeloff OR a accept while
4220 		 * reading from the main socket... actually its only the
4221 		 * peeloff() case, since I think read will fail on a
4222 		 * listening socket..
4223 		 */
4224 		return;
4225 	}
4226 	/* lock the socket buffers */
4227 	SCTP_INP_READ_LOCK(old_inp);
4228 	control = TAILQ_FIRST(&old_inp->read_queue);
4229 	/* Pull off all for out target stcb */
4230 	while (control) {
4231 		nctl = TAILQ_NEXT(control, next);
4232 		if (control->stcb == stcb) {
4233 			/* remove it we want it */
4234 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4235 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4236 			m = control->data;
4237 			while (m) {
4238 				if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4239 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4240 				}
4241 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4242 				if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4243 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4244 				}
4245 				m = SCTP_BUF_NEXT(m);
4246 			}
4247 		}
4248 		control = nctl;
4249 	}
4250 	SCTP_INP_READ_UNLOCK(old_inp);
4251 	/* Remove the sb-lock on the old socket */
4252 
4253 	sbunlock(&old_so->so_rcv);
4254 	/* Now we move them over to the new socket buffer */
4255 	control = TAILQ_FIRST(&tmp_queue);
4256 	SCTP_INP_READ_LOCK(new_inp);
4257 	while (control) {
4258 		nctl = TAILQ_NEXT(control, next);
4259 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4260 		m = control->data;
4261 		while (m) {
4262 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4263 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4264 			}
4265 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4266 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4267 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4268 			}
4269 			m = SCTP_BUF_NEXT(m);
4270 		}
4271 		control = nctl;
4272 	}
4273 	SCTP_INP_READ_UNLOCK(new_inp);
4274 }
4275 
4276 
4277 void
4278 sctp_add_to_readq(struct sctp_inpcb *inp,
4279     struct sctp_tcb *stcb,
4280     struct sctp_queued_to_read *control,
4281     struct sockbuf *sb,
4282     int end,
4283     int so_locked
4284 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4285     SCTP_UNUSED
4286 #endif
4287 )
4288 {
4289 	/*
4290 	 * Here we must place the control on the end of the socket read
4291 	 * queue AND increment sb_cc so that select will work properly on
4292 	 * read.
4293 	 */
4294 	struct mbuf *m, *prev = NULL;
4295 
4296 	if (inp == NULL) {
4297 		/* Gak, TSNH!! */
4298 #ifdef INVARIANTS
4299 		panic("Gak, inp NULL on add_to_readq");
4300 #endif
4301 		return;
4302 	}
4303 	SCTP_INP_READ_LOCK(inp);
4304 	if (!(control->spec_flags & M_NOTIFICATION)) {
4305 		atomic_add_int(&inp->total_recvs, 1);
4306 		if (!control->do_not_ref_stcb) {
4307 			atomic_add_int(&stcb->total_recvs, 1);
4308 		}
4309 	}
4310 	m = control->data;
4311 	control->held_length = 0;
4312 	control->length = 0;
4313 	while (m) {
4314 		if (SCTP_BUF_LEN(m) == 0) {
4315 			/* Skip mbufs with NO length */
4316 			if (prev == NULL) {
4317 				/* First one */
4318 				control->data = sctp_m_free(m);
4319 				m = control->data;
4320 			} else {
4321 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4322 				m = SCTP_BUF_NEXT(prev);
4323 			}
4324 			if (m == NULL) {
4325 				control->tail_mbuf = prev;;
4326 			}
4327 			continue;
4328 		}
4329 		prev = m;
4330 		if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4331 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4332 		}
4333 		sctp_sballoc(stcb, sb, m);
4334 		if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4335 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4336 		}
4337 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4338 		m = SCTP_BUF_NEXT(m);
4339 	}
4340 	if (prev != NULL) {
4341 		control->tail_mbuf = prev;
4342 	} else {
4343 		/* Everything got collapsed out?? */
4344 		return;
4345 	}
4346 	if (end) {
4347 		control->end_added = 1;
4348 	}
4349 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4350 	SCTP_INP_READ_UNLOCK(inp);
4351 	if (inp && inp->sctp_socket) {
4352 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4353 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4354 		} else {
4355 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4356 			struct socket *so;
4357 
4358 			so = SCTP_INP_SO(inp);
4359 			if (!so_locked) {
4360 				atomic_add_int(&stcb->asoc.refcnt, 1);
4361 				SCTP_TCB_UNLOCK(stcb);
4362 				SCTP_SOCKET_LOCK(so, 1);
4363 				SCTP_TCB_LOCK(stcb);
4364 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4365 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4366 					SCTP_SOCKET_UNLOCK(so, 1);
4367 					return;
4368 				}
4369 			}
4370 #endif
4371 			sctp_sorwakeup(inp, inp->sctp_socket);
4372 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4373 			if (!so_locked) {
4374 				SCTP_SOCKET_UNLOCK(so, 1);
4375 			}
4376 #endif
4377 		}
4378 	}
4379 }
4380 
4381 
4382 int
4383 sctp_append_to_readq(struct sctp_inpcb *inp,
4384     struct sctp_tcb *stcb,
4385     struct sctp_queued_to_read *control,
4386     struct mbuf *m,
4387     int end,
4388     int ctls_cumack,
4389     struct sockbuf *sb)
4390 {
4391 	/*
4392 	 * A partial delivery API event is underway. OR we are appending on
4393 	 * the reassembly queue.
4394 	 *
4395 	 * If PDAPI this means we need to add m to the end of the data.
4396 	 * Increase the length in the control AND increment the sb_cc.
4397 	 * Otherwise sb is NULL and all we need to do is put it at the end
4398 	 * of the mbuf chain.
4399 	 */
4400 	int len = 0;
4401 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4402 
4403 	if (inp) {
4404 		SCTP_INP_READ_LOCK(inp);
4405 	}
4406 	if (control == NULL) {
4407 get_out:
4408 		if (inp) {
4409 			SCTP_INP_READ_UNLOCK(inp);
4410 		}
4411 		return (-1);
4412 	}
4413 	if (control->end_added) {
4414 		/* huh this one is complete? */
4415 		goto get_out;
4416 	}
4417 	mm = m;
4418 	if (mm == NULL) {
4419 		goto get_out;
4420 	}
4421 	while (mm) {
4422 		if (SCTP_BUF_LEN(mm) == 0) {
4423 			/* Skip mbufs with NO lenght */
4424 			if (prev == NULL) {
4425 				/* First one */
4426 				m = sctp_m_free(mm);
4427 				mm = m;
4428 			} else {
4429 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4430 				mm = SCTP_BUF_NEXT(prev);
4431 			}
4432 			continue;
4433 		}
4434 		prev = mm;
4435 		len += SCTP_BUF_LEN(mm);
4436 		if (sb) {
4437 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4438 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4439 			}
4440 			sctp_sballoc(stcb, sb, mm);
4441 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4442 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4443 			}
4444 		}
4445 		mm = SCTP_BUF_NEXT(mm);
4446 	}
4447 	if (prev) {
4448 		tail = prev;
4449 	} else {
4450 		/* Really there should always be a prev */
4451 		if (m == NULL) {
4452 			/* Huh nothing left? */
4453 #ifdef INVARIANTS
4454 			panic("Nothing left to add?");
4455 #else
4456 			goto get_out;
4457 #endif
4458 		}
4459 		tail = m;
4460 	}
4461 	if (control->tail_mbuf) {
4462 		/* append */
4463 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4464 		control->tail_mbuf = tail;
4465 	} else {
4466 		/* nothing there */
4467 #ifdef INVARIANTS
4468 		if (control->data != NULL) {
4469 			panic("This should NOT happen");
4470 		}
4471 #endif
4472 		control->data = m;
4473 		control->tail_mbuf = tail;
4474 	}
4475 	atomic_add_int(&control->length, len);
4476 	if (end) {
4477 		/* message is complete */
4478 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4479 			stcb->asoc.control_pdapi = NULL;
4480 		}
4481 		control->held_length = 0;
4482 		control->end_added = 1;
4483 	}
4484 	if (stcb == NULL) {
4485 		control->do_not_ref_stcb = 1;
4486 	}
4487 	/*
4488 	 * When we are appending in partial delivery, the cum-ack is used
4489 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4490 	 * is populated in the outbound sinfo structure from the true cumack
4491 	 * if the association exists...
4492 	 */
4493 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4494 	if (inp) {
4495 		SCTP_INP_READ_UNLOCK(inp);
4496 	}
4497 	if (inp && inp->sctp_socket) {
4498 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4499 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4500 		} else {
4501 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4502 			struct socket *so;
4503 
4504 			so = SCTP_INP_SO(inp);
4505 			atomic_add_int(&stcb->asoc.refcnt, 1);
4506 			SCTP_TCB_UNLOCK(stcb);
4507 			SCTP_SOCKET_LOCK(so, 1);
4508 			SCTP_TCB_LOCK(stcb);
4509 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4510 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4511 				SCTP_SOCKET_UNLOCK(so, 1);
4512 				return (0);
4513 			}
4514 #endif
4515 			sctp_sorwakeup(inp, inp->sctp_socket);
4516 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4517 			SCTP_SOCKET_UNLOCK(so, 1);
4518 #endif
4519 		}
4520 	}
4521 	return (0);
4522 }
4523 
4524 
4525 
4526 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4527  *************ALTERNATE ROUTING CODE
4528  */
4529 
4530 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4531  *************ALTERNATE ROUTING CODE
4532  */
4533 
4534 struct mbuf *
4535 sctp_generate_invmanparam(int err)
4536 {
4537 	/* Return a MBUF with a invalid mandatory parameter */
4538 	struct mbuf *m;
4539 
4540 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4541 	if (m) {
4542 		struct sctp_paramhdr *ph;
4543 
4544 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4545 		ph = mtod(m, struct sctp_paramhdr *);
4546 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4547 		ph->param_type = htons(err);
4548 	}
4549 	return (m);
4550 }
4551 
4552 #ifdef SCTP_MBCNT_LOGGING
4553 void
4554 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4555     struct sctp_tmit_chunk *tp1, int chk_cnt)
4556 {
4557 	if (tp1->data == NULL) {
4558 		return;
4559 	}
4560 	asoc->chunks_on_out_queue -= chk_cnt;
4561 	if (sctp_logging_level & SCTP_MBCNT_LOGGING_ENABLE) {
4562 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4563 		    asoc->total_output_queue_size,
4564 		    tp1->book_size,
4565 		    0,
4566 		    tp1->mbcnt);
4567 	}
4568 	if (asoc->total_output_queue_size >= tp1->book_size) {
4569 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4570 	} else {
4571 		asoc->total_output_queue_size = 0;
4572 	}
4573 
4574 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4575 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4576 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4577 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4578 		} else {
4579 			stcb->sctp_socket->so_snd.sb_cc = 0;
4580 
4581 		}
4582 	}
4583 }
4584 
4585 #endif
4586 
4587 int
4588 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4589     int reason, struct sctpchunk_listhead *queue, int so_locked
4590 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4591     SCTP_UNUSED
4592 #endif
4593 )
4594 {
4595 	int ret_sz = 0;
4596 	int notdone;
4597 	uint8_t foundeom = 0;
4598 
4599 	do {
4600 		ret_sz += tp1->book_size;
4601 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4602 		if (tp1->data) {
4603 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4604 			struct socket *so;
4605 
4606 #endif
4607 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4608 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, SCTP_SO_NOT_LOCKED);
4609 			sctp_m_freem(tp1->data);
4610 			tp1->data = NULL;
4611 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4612 			so = SCTP_INP_SO(stcb->sctp_ep);
4613 			if (!so_locked) {
4614 				atomic_add_int(&stcb->asoc.refcnt, 1);
4615 				SCTP_TCB_UNLOCK(stcb);
4616 				SCTP_SOCKET_LOCK(so, 1);
4617 				SCTP_TCB_LOCK(stcb);
4618 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4619 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4620 					/*
4621 					 * assoc was freed while we were
4622 					 * unlocked
4623 					 */
4624 					SCTP_SOCKET_UNLOCK(so, 1);
4625 					return (ret_sz);
4626 				}
4627 			}
4628 #endif
4629 			sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4630 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4631 			if (!so_locked) {
4632 				SCTP_SOCKET_UNLOCK(so, 1);
4633 			}
4634 #endif
4635 		}
4636 		if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4637 			stcb->asoc.sent_queue_cnt_removeable--;
4638 		}
4639 		if (queue == &stcb->asoc.send_queue) {
4640 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4641 			/* on to the sent queue */
4642 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4643 			    sctp_next);
4644 			stcb->asoc.sent_queue_cnt++;
4645 		}
4646 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4647 		    SCTP_DATA_NOT_FRAG) {
4648 			/* not frag'ed we ae done   */
4649 			notdone = 0;
4650 			foundeom = 1;
4651 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4652 			/* end of frag, we are done */
4653 			notdone = 0;
4654 			foundeom = 1;
4655 		} else {
4656 			/*
4657 			 * Its a begin or middle piece, we must mark all of
4658 			 * it
4659 			 */
4660 			notdone = 1;
4661 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4662 		}
4663 	} while (tp1 && notdone);
4664 	if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
4665 		/*
4666 		 * The multi-part message was scattered across the send and
4667 		 * sent queue.
4668 		 */
4669 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4670 		/*
4671 		 * recurse throught the send_queue too, starting at the
4672 		 * beginning.
4673 		 */
4674 		if (tp1) {
4675 			ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
4676 			    &stcb->asoc.send_queue, so_locked);
4677 		} else {
4678 			SCTP_PRINTF("hmm, nothing on the send queue and no EOM?\n");
4679 		}
4680 	}
4681 	return (ret_sz);
4682 }
4683 
4684 /*
4685  * checks to see if the given address, sa, is one that is currently known by
4686  * the kernel note: can't distinguish the same address on multiple interfaces
4687  * and doesn't handle multiple addresses with different zone/scope id's note:
4688  * ifa_ifwithaddr() compares the entire sockaddr struct
4689  */
4690 struct sctp_ifa *
4691 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4692     int holds_lock)
4693 {
4694 	struct sctp_laddr *laddr;
4695 
4696 	if (holds_lock == 0) {
4697 		SCTP_INP_RLOCK(inp);
4698 	}
4699 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4700 		if (laddr->ifa == NULL)
4701 			continue;
4702 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4703 			continue;
4704 		if (addr->sa_family == AF_INET) {
4705 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4706 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4707 				/* found him. */
4708 				if (holds_lock == 0) {
4709 					SCTP_INP_RUNLOCK(inp);
4710 				}
4711 				return (laddr->ifa);
4712 				break;
4713 			}
4714 		} else if (addr->sa_family == AF_INET6) {
4715 			if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
4716 			    &laddr->ifa->address.sin6.sin6_addr)) {
4717 				/* found him. */
4718 				if (holds_lock == 0) {
4719 					SCTP_INP_RUNLOCK(inp);
4720 				}
4721 				return (laddr->ifa);
4722 				break;
4723 			}
4724 		}
4725 	}
4726 	if (holds_lock == 0) {
4727 		SCTP_INP_RUNLOCK(inp);
4728 	}
4729 	return (NULL);
4730 }
4731 
4732 uint32_t
4733 sctp_get_ifa_hash_val(struct sockaddr *addr)
4734 {
4735 	if (addr->sa_family == AF_INET) {
4736 		struct sockaddr_in *sin;
4737 
4738 		sin = (struct sockaddr_in *)addr;
4739 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4740 	} else if (addr->sa_family == AF_INET6) {
4741 		struct sockaddr_in6 *sin6;
4742 		uint32_t hash_of_addr;
4743 
4744 		sin6 = (struct sockaddr_in6 *)addr;
4745 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4746 		    sin6->sin6_addr.s6_addr32[1] +
4747 		    sin6->sin6_addr.s6_addr32[2] +
4748 		    sin6->sin6_addr.s6_addr32[3]);
4749 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4750 		return (hash_of_addr);
4751 	}
4752 	return (0);
4753 }
4754 
4755 struct sctp_ifa *
4756 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4757 {
4758 	struct sctp_ifa *sctp_ifap;
4759 	struct sctp_vrf *vrf;
4760 	struct sctp_ifalist *hash_head;
4761 	uint32_t hash_of_addr;
4762 
4763 	if (holds_lock == 0)
4764 		SCTP_IPI_ADDR_RLOCK();
4765 
4766 	vrf = sctp_find_vrf(vrf_id);
4767 	if (vrf == NULL) {
4768 		if (holds_lock == 0)
4769 			SCTP_IPI_ADDR_RUNLOCK();
4770 		return (NULL);
4771 	}
4772 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4773 
4774 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4775 	if (hash_head == NULL) {
4776 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4777 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4778 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4779 		sctp_print_address(addr);
4780 		SCTP_PRINTF("No such bucket for address\n");
4781 		if (holds_lock == 0)
4782 			SCTP_IPI_ADDR_RUNLOCK();
4783 
4784 		return (NULL);
4785 	}
4786 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4787 		if (sctp_ifap == NULL) {
4788 			panic("Huh LIST_FOREACH corrupt");
4789 		}
4790 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4791 			continue;
4792 		if (addr->sa_family == AF_INET) {
4793 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4794 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4795 				/* found him. */
4796 				if (holds_lock == 0)
4797 					SCTP_IPI_ADDR_RUNLOCK();
4798 				return (sctp_ifap);
4799 				break;
4800 			}
4801 		} else if (addr->sa_family == AF_INET6) {
4802 			if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
4803 			    &sctp_ifap->address.sin6.sin6_addr)) {
4804 				/* found him. */
4805 				if (holds_lock == 0)
4806 					SCTP_IPI_ADDR_RUNLOCK();
4807 				return (sctp_ifap);
4808 				break;
4809 			}
4810 		}
4811 	}
4812 	if (holds_lock == 0)
4813 		SCTP_IPI_ADDR_RUNLOCK();
4814 	return (NULL);
4815 }
4816 
4817 static void
4818 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4819     uint32_t rwnd_req)
4820 {
4821 	/* User pulled some data, do we need a rwnd update? */
4822 	int r_unlocked = 0;
4823 	uint32_t dif, rwnd;
4824 	struct socket *so = NULL;
4825 
4826 	if (stcb == NULL)
4827 		return;
4828 
4829 	atomic_add_int(&stcb->asoc.refcnt, 1);
4830 
4831 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4832 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4833 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4834 		/* Pre-check If we are freeing no update */
4835 		goto no_lock;
4836 	}
4837 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4838 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4839 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4840 		goto out;
4841 	}
4842 	so = stcb->sctp_socket;
4843 	if (so == NULL) {
4844 		goto out;
4845 	}
4846 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4847 	/* Have you have freed enough to look */
4848 	*freed_so_far = 0;
4849 	/* Yep, its worth a look and the lock overhead */
4850 
4851 	/* Figure out what the rwnd would be */
4852 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4853 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4854 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4855 	} else {
4856 		dif = 0;
4857 	}
4858 	if (dif >= rwnd_req) {
4859 		if (hold_rlock) {
4860 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4861 			r_unlocked = 1;
4862 		}
4863 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4864 			/*
4865 			 * One last check before we allow the guy possibly
4866 			 * to get in. There is a race, where the guy has not
4867 			 * reached the gate. In that case
4868 			 */
4869 			goto out;
4870 		}
4871 		SCTP_TCB_LOCK(stcb);
4872 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4873 			/* No reports here */
4874 			SCTP_TCB_UNLOCK(stcb);
4875 			goto out;
4876 		}
4877 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4878 		sctp_send_sack(stcb);
4879 		sctp_chunk_output(stcb->sctp_ep, stcb,
4880 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4881 		/* make sure no timer is running */
4882 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4883 		SCTP_TCB_UNLOCK(stcb);
4884 	} else {
4885 		/* Update how much we have pending */
4886 		stcb->freed_by_sorcv_sincelast = dif;
4887 	}
4888 out:
4889 	if (so && r_unlocked && hold_rlock) {
4890 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4891 	}
4892 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4893 no_lock:
4894 	atomic_add_int(&stcb->asoc.refcnt, -1);
4895 	return;
4896 }
4897 
4898 int
4899 sctp_sorecvmsg(struct socket *so,
4900     struct uio *uio,
4901     struct mbuf **mp,
4902     struct sockaddr *from,
4903     int fromlen,
4904     int *msg_flags,
4905     struct sctp_sndrcvinfo *sinfo,
4906     int filling_sinfo)
4907 {
4908 	/*
4909 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4910 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4911 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4912 	 * On the way out we may send out any combination of:
4913 	 * MSG_NOTIFICATION MSG_EOR
4914 	 *
4915 	 */
4916 	struct sctp_inpcb *inp = NULL;
4917 	int my_len = 0;
4918 	int cp_len = 0, error = 0;
4919 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4920 	struct mbuf *m = NULL, *embuf = NULL;
4921 	struct sctp_tcb *stcb = NULL;
4922 	int wakeup_read_socket = 0;
4923 	int freecnt_applied = 0;
4924 	int out_flags = 0, in_flags = 0;
4925 	int block_allowed = 1;
4926 	uint32_t freed_so_far = 0;
4927 	int copied_so_far = 0;
4928 	int in_eeor_mode = 0;
4929 	int no_rcv_needed = 0;
4930 	uint32_t rwnd_req = 0;
4931 	int hold_sblock = 0;
4932 	int hold_rlock = 0;
4933 	int slen = 0;
4934 	uint32_t held_length = 0;
4935 	int sockbuf_lock = 0;
4936 
4937 	if (uio == NULL) {
4938 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
4939 		return (EINVAL);
4940 	}
4941 	if (from && fromlen <= 0) {
4942 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
4943 		return (EINVAL);
4944 	}
4945 	if (msg_flags) {
4946 		in_flags = *msg_flags;
4947 		if (in_flags & MSG_PEEK)
4948 			SCTP_STAT_INCR(sctps_read_peeks);
4949 	} else {
4950 		in_flags = 0;
4951 	}
4952 	slen = uio->uio_resid;
4953 
4954 	/* Pull in and set up our int flags */
4955 	if (in_flags & MSG_OOB) {
4956 		/* Out of band's NOT supported */
4957 		return (EOPNOTSUPP);
4958 	}
4959 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
4960 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
4961 		return (EINVAL);
4962 	}
4963 	if ((in_flags & (MSG_DONTWAIT
4964 	    | MSG_NBIO
4965 	    )) ||
4966 	    SCTP_SO_IS_NBIO(so)) {
4967 		block_allowed = 0;
4968 	}
4969 	/* setup the endpoint */
4970 	inp = (struct sctp_inpcb *)so->so_pcb;
4971 	if (inp == NULL) {
4972 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
4973 		return (EFAULT);
4974 	}
4975 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
4976 	/* Must be at least a MTU's worth */
4977 	if (rwnd_req < SCTP_MIN_RWND)
4978 		rwnd_req = SCTP_MIN_RWND;
4979 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
4980 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
4981 		sctp_misc_ints(SCTP_SORECV_ENTER,
4982 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
4983 	}
4984 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
4985 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
4986 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
4987 	}
4988 	error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
4989 	sockbuf_lock = 1;
4990 	if (error) {
4991 		goto release_unlocked;
4992 	}
4993 restart:
4994 
4995 
4996 restart_nosblocks:
4997 	if (hold_sblock == 0) {
4998 		SOCKBUF_LOCK(&so->so_rcv);
4999 		hold_sblock = 1;
5000 	}
5001 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5002 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5003 		goto out;
5004 	}
5005 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5006 		if (so->so_error) {
5007 			error = so->so_error;
5008 			if ((in_flags & MSG_PEEK) == 0)
5009 				so->so_error = 0;
5010 		} else {
5011 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5012 			/* indicate EOF */
5013 			error = 0;
5014 		}
5015 		goto out;
5016 	}
5017 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5018 		/* we need to wait for data */
5019 		if ((so->so_rcv.sb_cc == 0) &&
5020 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5021 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5022 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5023 				/*
5024 				 * For active open side clear flags for
5025 				 * re-use passive open is blocked by
5026 				 * connect.
5027 				 */
5028 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5029 					/*
5030 					 * You were aborted, passive side
5031 					 * always hits here
5032 					 */
5033 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5034 					error = ECONNRESET;
5035 					/*
5036 					 * You get this once if you are
5037 					 * active open side
5038 					 */
5039 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5040 						/*
5041 						 * Remove flag if on the
5042 						 * active open side
5043 						 */
5044 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5045 					}
5046 				}
5047 				so->so_state &= ~(SS_ISCONNECTING |
5048 				    SS_ISDISCONNECTING |
5049 				    SS_ISCONFIRMING |
5050 				    SS_ISCONNECTED);
5051 				if (error == 0) {
5052 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5053 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5054 						error = ENOTCONN;
5055 					} else {
5056 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5057 					}
5058 				}
5059 				goto out;
5060 			}
5061 		}
5062 		error = sbwait(&so->so_rcv);
5063 		if (error) {
5064 			goto out;
5065 		}
5066 		held_length = 0;
5067 		goto restart_nosblocks;
5068 	} else if (so->so_rcv.sb_cc == 0) {
5069 		if (so->so_error) {
5070 			error = so->so_error;
5071 			if ((in_flags & MSG_PEEK) == 0)
5072 				so->so_error = 0;
5073 		} else {
5074 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5075 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5076 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5077 					/*
5078 					 * For active open side clear flags
5079 					 * for re-use passive open is
5080 					 * blocked by connect.
5081 					 */
5082 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5083 						/*
5084 						 * You were aborted, passive
5085 						 * side always hits here
5086 						 */
5087 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5088 						error = ECONNRESET;
5089 						/*
5090 						 * You get this once if you
5091 						 * are active open side
5092 						 */
5093 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5094 							/*
5095 							 * Remove flag if on
5096 							 * the active open
5097 							 * side
5098 							 */
5099 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5100 						}
5101 					}
5102 					so->so_state &= ~(SS_ISCONNECTING |
5103 					    SS_ISDISCONNECTING |
5104 					    SS_ISCONFIRMING |
5105 					    SS_ISCONNECTED);
5106 					if (error == 0) {
5107 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5108 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5109 							error = ENOTCONN;
5110 						} else {
5111 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5112 						}
5113 					}
5114 					goto out;
5115 				}
5116 			}
5117 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5118 			error = EWOULDBLOCK;
5119 		}
5120 		goto out;
5121 	}
5122 	if (hold_sblock == 1) {
5123 		SOCKBUF_UNLOCK(&so->so_rcv);
5124 		hold_sblock = 0;
5125 	}
5126 	/* we possibly have data we can read */
5127 	/* sa_ignore FREED_MEMORY */
5128 	control = TAILQ_FIRST(&inp->read_queue);
5129 	if (control == NULL) {
5130 		/*
5131 		 * This could be happening since the appender did the
5132 		 * increment but as not yet did the tailq insert onto the
5133 		 * read_queue
5134 		 */
5135 		if (hold_rlock == 0) {
5136 			SCTP_INP_READ_LOCK(inp);
5137 			hold_rlock = 1;
5138 		}
5139 		control = TAILQ_FIRST(&inp->read_queue);
5140 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5141 #ifdef INVARIANTS
5142 			panic("Huh, its non zero and nothing on control?");
5143 #endif
5144 			so->so_rcv.sb_cc = 0;
5145 		}
5146 		SCTP_INP_READ_UNLOCK(inp);
5147 		hold_rlock = 0;
5148 		goto restart;
5149 	}
5150 	if ((control->length == 0) &&
5151 	    (control->do_not_ref_stcb)) {
5152 		/*
5153 		 * Clean up code for freeing assoc that left behind a
5154 		 * pdapi.. maybe a peer in EEOR that just closed after
5155 		 * sending and never indicated a EOR.
5156 		 */
5157 		if (hold_rlock == 0) {
5158 			hold_rlock = 1;
5159 			SCTP_INP_READ_LOCK(inp);
5160 		}
5161 		control->held_length = 0;
5162 		if (control->data) {
5163 			/* Hmm there is data here .. fix */
5164 			struct mbuf *m_tmp;
5165 			int cnt = 0;
5166 
5167 			m_tmp = control->data;
5168 			while (m_tmp) {
5169 				cnt += SCTP_BUF_LEN(m_tmp);
5170 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5171 					control->tail_mbuf = m_tmp;
5172 					control->end_added = 1;
5173 				}
5174 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5175 			}
5176 			control->length = cnt;
5177 		} else {
5178 			/* remove it */
5179 			TAILQ_REMOVE(&inp->read_queue, control, next);
5180 			/* Add back any hiddend data */
5181 			sctp_free_remote_addr(control->whoFrom);
5182 			sctp_free_a_readq(stcb, control);
5183 		}
5184 		if (hold_rlock) {
5185 			hold_rlock = 0;
5186 			SCTP_INP_READ_UNLOCK(inp);
5187 		}
5188 		goto restart;
5189 	}
5190 	if (control->length == 0) {
5191 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5192 		    (filling_sinfo)) {
5193 			/* find a more suitable one then this */
5194 			ctl = TAILQ_NEXT(control, next);
5195 			while (ctl) {
5196 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5197 				    (ctl->some_taken ||
5198 				    (ctl->spec_flags & M_NOTIFICATION) ||
5199 				    ((ctl->do_not_ref_stcb == 0) &&
5200 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5201 				    ) {
5202 					/*-
5203 					 * If we have a different TCB next, and there is data
5204 					 * present. If we have already taken some (pdapi), OR we can
5205 					 * ref the tcb and no delivery as started on this stream, we
5206 					 * take it. Note we allow a notification on a different
5207 					 * assoc to be delivered..
5208 					 */
5209 					control = ctl;
5210 					goto found_one;
5211 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5212 					    (ctl->length) &&
5213 					    ((ctl->some_taken) ||
5214 					    ((ctl->do_not_ref_stcb == 0) &&
5215 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5216 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5217 				    ) {
5218 					/*-
5219 					 * If we have the same tcb, and there is data present, and we
5220 					 * have the strm interleave feature present. Then if we have
5221 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5222 					 * not started a delivery for this stream, we can take it.
5223 					 * Note we do NOT allow a notificaiton on the same assoc to
5224 					 * be delivered.
5225 					 */
5226 					control = ctl;
5227 					goto found_one;
5228 				}
5229 				ctl = TAILQ_NEXT(ctl, next);
5230 			}
5231 		}
5232 		/*
5233 		 * if we reach here, not suitable replacement is available
5234 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5235 		 * into the our held count, and its time to sleep again.
5236 		 */
5237 		held_length = so->so_rcv.sb_cc;
5238 		control->held_length = so->so_rcv.sb_cc;
5239 		goto restart;
5240 	}
5241 	/* Clear the held length since there is something to read */
5242 	control->held_length = 0;
5243 	if (hold_rlock) {
5244 		SCTP_INP_READ_UNLOCK(inp);
5245 		hold_rlock = 0;
5246 	}
5247 found_one:
5248 	/*
5249 	 * If we reach here, control has a some data for us to read off.
5250 	 * Note that stcb COULD be NULL.
5251 	 */
5252 	control->some_taken = 1;
5253 	if (hold_sblock) {
5254 		SOCKBUF_UNLOCK(&so->so_rcv);
5255 		hold_sblock = 0;
5256 	}
5257 	stcb = control->stcb;
5258 	if (stcb) {
5259 		if ((control->do_not_ref_stcb == 0) &&
5260 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5261 			if (freecnt_applied == 0)
5262 				stcb = NULL;
5263 		} else if (control->do_not_ref_stcb == 0) {
5264 			/* you can't free it on me please */
5265 			/*
5266 			 * The lock on the socket buffer protects us so the
5267 			 * free code will stop. But since we used the
5268 			 * socketbuf lock and the sender uses the tcb_lock
5269 			 * to increment, we need to use the atomic add to
5270 			 * the refcnt
5271 			 */
5272 			if (freecnt_applied) {
5273 #ifdef INVARIANTS
5274 				panic("refcnt already incremented");
5275 #else
5276 				printf("refcnt already incremented?\n");
5277 #endif
5278 			} else {
5279 				atomic_add_int(&stcb->asoc.refcnt, 1);
5280 				freecnt_applied = 1;
5281 			}
5282 			/*
5283 			 * Setup to remember how much we have not yet told
5284 			 * the peer our rwnd has opened up. Note we grab the
5285 			 * value from the tcb from last time. Note too that
5286 			 * sack sending clears this when a sack is sent,
5287 			 * which is fine. Once we hit the rwnd_req, we then
5288 			 * will go to the sctp_user_rcvd() that will not
5289 			 * lock until it KNOWs it MUST send a WUP-SACK.
5290 			 */
5291 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5292 			stcb->freed_by_sorcv_sincelast = 0;
5293 		}
5294 	}
5295 	if (stcb &&
5296 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5297 	    control->do_not_ref_stcb == 0) {
5298 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5299 	}
5300 	/* First lets get off the sinfo and sockaddr info */
5301 	if ((sinfo) && filling_sinfo) {
5302 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5303 		nxt = TAILQ_NEXT(control, next);
5304 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5305 			struct sctp_extrcvinfo *s_extra;
5306 
5307 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5308 			if ((nxt) &&
5309 			    (nxt->length)) {
5310 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5311 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5312 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5313 				}
5314 				if (nxt->spec_flags & M_NOTIFICATION) {
5315 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5316 				}
5317 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5318 				s_extra->sreinfo_next_length = nxt->length;
5319 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5320 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5321 				if (nxt->tail_mbuf != NULL) {
5322 					if (nxt->end_added) {
5323 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5324 					}
5325 				}
5326 			} else {
5327 				/*
5328 				 * we explicitly 0 this, since the memcpy
5329 				 * got some other things beyond the older
5330 				 * sinfo_ that is on the control's structure
5331 				 * :-D
5332 				 */
5333 				nxt = NULL;
5334 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5335 				s_extra->sreinfo_next_aid = 0;
5336 				s_extra->sreinfo_next_length = 0;
5337 				s_extra->sreinfo_next_ppid = 0;
5338 				s_extra->sreinfo_next_stream = 0;
5339 			}
5340 		}
5341 		/*
5342 		 * update off the real current cum-ack, if we have an stcb.
5343 		 */
5344 		if ((control->do_not_ref_stcb == 0) && stcb)
5345 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5346 		/*
5347 		 * mask off the high bits, we keep the actual chunk bits in
5348 		 * there.
5349 		 */
5350 		sinfo->sinfo_flags &= 0x00ff;
5351 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5352 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5353 		}
5354 	}
5355 #ifdef SCTP_ASOCLOG_OF_TSNS
5356 	{
5357 		int index, newindex;
5358 		struct sctp_pcbtsn_rlog *entry;
5359 
5360 		do {
5361 			index = inp->readlog_index;
5362 			newindex = index + 1;
5363 			if (newindex >= SCTP_READ_LOG_SIZE) {
5364 				newindex = 0;
5365 			}
5366 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5367 		entry = &inp->readlog[index];
5368 		entry->vtag = control->sinfo_assoc_id;
5369 		entry->strm = control->sinfo_stream;
5370 		entry->seq = control->sinfo_ssn;
5371 		entry->sz = control->length;
5372 		entry->flgs = control->sinfo_flags;
5373 	}
5374 #endif
5375 	if (fromlen && from) {
5376 		struct sockaddr *to;
5377 
5378 #ifdef INET
5379 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5380 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5381 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5382 #else
5383 		/* No AF_INET use AF_INET6 */
5384 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5385 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5386 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5387 #endif
5388 
5389 		to = from;
5390 #if defined(INET) && defined(INET6)
5391 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
5392 		    (to->sa_family == AF_INET) &&
5393 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5394 			struct sockaddr_in *sin;
5395 			struct sockaddr_in6 sin6;
5396 
5397 			sin = (struct sockaddr_in *)to;
5398 			bzero(&sin6, sizeof(sin6));
5399 			sin6.sin6_family = AF_INET6;
5400 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5401 			sin6.sin6_addr.s6_addr16[2] = 0xffff;
5402 			bcopy(&sin->sin_addr,
5403 			    &sin6.sin6_addr.s6_addr16[3],
5404 			    sizeof(sin6.sin6_addr.s6_addr16[3]));
5405 			sin6.sin6_port = sin->sin_port;
5406 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5407 		}
5408 #endif
5409 #if defined(INET6)
5410 		{
5411 			struct sockaddr_in6 lsa6, *to6;
5412 
5413 			to6 = (struct sockaddr_in6 *)to;
5414 			sctp_recover_scope_mac(to6, (&lsa6));
5415 		}
5416 #endif
5417 	}
5418 	/* now copy out what data we can */
5419 	if (mp == NULL) {
5420 		/* copy out each mbuf in the chain up to length */
5421 get_more_data:
5422 		m = control->data;
5423 		while (m) {
5424 			/* Move out all we can */
5425 			cp_len = (int)uio->uio_resid;
5426 			my_len = (int)SCTP_BUF_LEN(m);
5427 			if (cp_len > my_len) {
5428 				/* not enough in this buf */
5429 				cp_len = my_len;
5430 			}
5431 			if (hold_rlock) {
5432 				SCTP_INP_READ_UNLOCK(inp);
5433 				hold_rlock = 0;
5434 			}
5435 			if (cp_len > 0)
5436 				error = uiomove(mtod(m, char *), cp_len, uio);
5437 			/* re-read */
5438 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5439 				goto release;
5440 			}
5441 			if ((control->do_not_ref_stcb == 0) && stcb &&
5442 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5443 				no_rcv_needed = 1;
5444 			}
5445 			if (error) {
5446 				/* error we are out of here */
5447 				goto release;
5448 			}
5449 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5450 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5451 			    ((control->end_added == 0) ||
5452 			    (control->end_added &&
5453 			    (TAILQ_NEXT(control, next) == NULL)))
5454 			    ) {
5455 				SCTP_INP_READ_LOCK(inp);
5456 				hold_rlock = 1;
5457 			}
5458 			if (cp_len == SCTP_BUF_LEN(m)) {
5459 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5460 				    (control->end_added)) {
5461 					out_flags |= MSG_EOR;
5462 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5463 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5464 				}
5465 				if (control->spec_flags & M_NOTIFICATION) {
5466 					out_flags |= MSG_NOTIFICATION;
5467 				}
5468 				/* we ate up the mbuf */
5469 				if (in_flags & MSG_PEEK) {
5470 					/* just looking */
5471 					m = SCTP_BUF_NEXT(m);
5472 					copied_so_far += cp_len;
5473 				} else {
5474 					/* dispose of the mbuf */
5475 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5476 						sctp_sblog(&so->so_rcv,
5477 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5478 					}
5479 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5480 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5481 						sctp_sblog(&so->so_rcv,
5482 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5483 					}
5484 					embuf = m;
5485 					copied_so_far += cp_len;
5486 					freed_so_far += cp_len;
5487 					freed_so_far += MSIZE;
5488 					atomic_subtract_int(&control->length, cp_len);
5489 					control->data = sctp_m_free(m);
5490 					m = control->data;
5491 					/*
5492 					 * been through it all, must hold sb
5493 					 * lock ok to null tail
5494 					 */
5495 					if (control->data == NULL) {
5496 #ifdef INVARIANTS
5497 						if ((control->end_added == 0) ||
5498 						    (TAILQ_NEXT(control, next) == NULL)) {
5499 							/*
5500 							 * If the end is not
5501 							 * added, OR the
5502 							 * next is NOT null
5503 							 * we MUST have the
5504 							 * lock.
5505 							 */
5506 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5507 								panic("Hmm we don't own the lock?");
5508 							}
5509 						}
5510 #endif
5511 						control->tail_mbuf = NULL;
5512 #ifdef INVARIANTS
5513 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5514 							panic("end_added, nothing left and no MSG_EOR");
5515 						}
5516 #endif
5517 					}
5518 				}
5519 			} else {
5520 				/* Do we need to trim the mbuf? */
5521 				if (control->spec_flags & M_NOTIFICATION) {
5522 					out_flags |= MSG_NOTIFICATION;
5523 				}
5524 				if ((in_flags & MSG_PEEK) == 0) {
5525 					SCTP_BUF_RESV_UF(m, cp_len);
5526 					SCTP_BUF_LEN(m) -= cp_len;
5527 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5528 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5529 					}
5530 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5531 					if ((control->do_not_ref_stcb == 0) &&
5532 					    stcb) {
5533 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5534 					}
5535 					copied_so_far += cp_len;
5536 					embuf = m;
5537 					freed_so_far += cp_len;
5538 					freed_so_far += MSIZE;
5539 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5540 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5541 						    SCTP_LOG_SBRESULT, 0);
5542 					}
5543 					atomic_subtract_int(&control->length, cp_len);
5544 				} else {
5545 					copied_so_far += cp_len;
5546 				}
5547 			}
5548 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5549 				break;
5550 			}
5551 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5552 			    (control->do_not_ref_stcb == 0) &&
5553 			    (freed_so_far >= rwnd_req)) {
5554 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5555 			}
5556 		}		/* end while(m) */
5557 		/*
5558 		 * At this point we have looked at it all and we either have
5559 		 * a MSG_EOR/or read all the user wants... <OR>
5560 		 * control->length == 0.
5561 		 */
5562 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5563 			/* we are done with this control */
5564 			if (control->length == 0) {
5565 				if (control->data) {
5566 #ifdef INVARIANTS
5567 					panic("control->data not null at read eor?");
5568 #else
5569 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5570 					sctp_m_freem(control->data);
5571 					control->data = NULL;
5572 #endif
5573 				}
5574 		done_with_control:
5575 				if (TAILQ_NEXT(control, next) == NULL) {
5576 					/*
5577 					 * If we don't have a next we need a
5578 					 * lock, if there is a next interrupt
5579 					 * is filling ahead of us and we
5580 					 * don't need a lock to remove this
5581 					 * guy (which is the head of the
5582 					 * queue).
5583 					 */
5584 					if (hold_rlock == 0) {
5585 						SCTP_INP_READ_LOCK(inp);
5586 						hold_rlock = 1;
5587 					}
5588 				}
5589 				TAILQ_REMOVE(&inp->read_queue, control, next);
5590 				/* Add back any hiddend data */
5591 				if (control->held_length) {
5592 					held_length = 0;
5593 					control->held_length = 0;
5594 					wakeup_read_socket = 1;
5595 				}
5596 				if (control->aux_data) {
5597 					sctp_m_free(control->aux_data);
5598 					control->aux_data = NULL;
5599 				}
5600 				no_rcv_needed = control->do_not_ref_stcb;
5601 				sctp_free_remote_addr(control->whoFrom);
5602 				control->data = NULL;
5603 				sctp_free_a_readq(stcb, control);
5604 				control = NULL;
5605 				if ((freed_so_far >= rwnd_req) &&
5606 				    (no_rcv_needed == 0))
5607 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5608 
5609 			} else {
5610 				/*
5611 				 * The user did not read all of this
5612 				 * message, turn off the returned MSG_EOR
5613 				 * since we are leaving more behind on the
5614 				 * control to read.
5615 				 */
5616 #ifdef INVARIANTS
5617 				if (control->end_added &&
5618 				    (control->data == NULL) &&
5619 				    (control->tail_mbuf == NULL)) {
5620 					panic("Gak, control->length is corrupt?");
5621 				}
5622 #endif
5623 				no_rcv_needed = control->do_not_ref_stcb;
5624 				out_flags &= ~MSG_EOR;
5625 			}
5626 		}
5627 		if (out_flags & MSG_EOR) {
5628 			goto release;
5629 		}
5630 		if ((uio->uio_resid == 0) ||
5631 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5632 		    ) {
5633 			goto release;
5634 		}
5635 		/*
5636 		 * If I hit here the receiver wants more and this message is
5637 		 * NOT done (pd-api). So two questions. Can we block? if not
5638 		 * we are done. Did the user NOT set MSG_WAITALL?
5639 		 */
5640 		if (block_allowed == 0) {
5641 			goto release;
5642 		}
5643 		/*
5644 		 * We need to wait for more data a few things: - We don't
5645 		 * sbunlock() so we don't get someone else reading. - We
5646 		 * must be sure to account for the case where what is added
5647 		 * is NOT to our control when we wakeup.
5648 		 */
5649 
5650 		/*
5651 		 * Do we need to tell the transport a rwnd update might be
5652 		 * needed before we go to sleep?
5653 		 */
5654 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5655 		    ((freed_so_far >= rwnd_req) &&
5656 		    (control->do_not_ref_stcb == 0) &&
5657 		    (no_rcv_needed == 0))) {
5658 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5659 		}
5660 wait_some_more:
5661 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5662 			goto release;
5663 		}
5664 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5665 			goto release;
5666 
5667 		if (hold_rlock == 1) {
5668 			SCTP_INP_READ_UNLOCK(inp);
5669 			hold_rlock = 0;
5670 		}
5671 		if (hold_sblock == 0) {
5672 			SOCKBUF_LOCK(&so->so_rcv);
5673 			hold_sblock = 1;
5674 		}
5675 		if ((copied_so_far) && (control->length == 0) &&
5676 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5677 		    ) {
5678 			goto release;
5679 		}
5680 		if (so->so_rcv.sb_cc <= control->held_length) {
5681 			error = sbwait(&so->so_rcv);
5682 			if (error) {
5683 				goto release;
5684 			}
5685 			control->held_length = 0;
5686 		}
5687 		if (hold_sblock) {
5688 			SOCKBUF_UNLOCK(&so->so_rcv);
5689 			hold_sblock = 0;
5690 		}
5691 		if (control->length == 0) {
5692 			/* still nothing here */
5693 			if (control->end_added == 1) {
5694 				/* he aborted, or is done i.e.did a shutdown */
5695 				out_flags |= MSG_EOR;
5696 				if (control->pdapi_aborted) {
5697 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5698 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5699 
5700 					out_flags |= MSG_TRUNC;
5701 				} else {
5702 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5703 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5704 				}
5705 				goto done_with_control;
5706 			}
5707 			if (so->so_rcv.sb_cc > held_length) {
5708 				control->held_length = so->so_rcv.sb_cc;
5709 				held_length = 0;
5710 			}
5711 			goto wait_some_more;
5712 		} else if (control->data == NULL) {
5713 			/*
5714 			 * we must re-sync since data is probably being
5715 			 * added
5716 			 */
5717 			SCTP_INP_READ_LOCK(inp);
5718 			if ((control->length > 0) && (control->data == NULL)) {
5719 				/*
5720 				 * big trouble.. we have the lock and its
5721 				 * corrupt?
5722 				 */
5723 				panic("Impossible data==NULL length !=0");
5724 			}
5725 			SCTP_INP_READ_UNLOCK(inp);
5726 			/* We will fall around to get more data */
5727 		}
5728 		goto get_more_data;
5729 	} else {
5730 		/*-
5731 		 * Give caller back the mbuf chain,
5732 		 * store in uio_resid the length
5733 		 */
5734 		wakeup_read_socket = 0;
5735 		if ((control->end_added == 0) ||
5736 		    (TAILQ_NEXT(control, next) == NULL)) {
5737 			/* Need to get rlock */
5738 			if (hold_rlock == 0) {
5739 				SCTP_INP_READ_LOCK(inp);
5740 				hold_rlock = 1;
5741 			}
5742 		}
5743 		if (control->end_added) {
5744 			out_flags |= MSG_EOR;
5745 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5746 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5747 		}
5748 		if (control->spec_flags & M_NOTIFICATION) {
5749 			out_flags |= MSG_NOTIFICATION;
5750 		}
5751 		uio->uio_resid = control->length;
5752 		*mp = control->data;
5753 		m = control->data;
5754 		while (m) {
5755 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5756 				sctp_sblog(&so->so_rcv,
5757 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5758 			}
5759 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5760 			freed_so_far += SCTP_BUF_LEN(m);
5761 			freed_so_far += MSIZE;
5762 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5763 				sctp_sblog(&so->so_rcv,
5764 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5765 			}
5766 			m = SCTP_BUF_NEXT(m);
5767 		}
5768 		control->data = control->tail_mbuf = NULL;
5769 		control->length = 0;
5770 		if (out_flags & MSG_EOR) {
5771 			/* Done with this control */
5772 			goto done_with_control;
5773 		}
5774 	}
5775 release:
5776 	if (hold_rlock == 1) {
5777 		SCTP_INP_READ_UNLOCK(inp);
5778 		hold_rlock = 0;
5779 	}
5780 	if (hold_sblock == 1) {
5781 		SOCKBUF_UNLOCK(&so->so_rcv);
5782 		hold_sblock = 0;
5783 	}
5784 	sbunlock(&so->so_rcv);
5785 	sockbuf_lock = 0;
5786 
5787 release_unlocked:
5788 	if (hold_sblock) {
5789 		SOCKBUF_UNLOCK(&so->so_rcv);
5790 		hold_sblock = 0;
5791 	}
5792 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5793 		if ((freed_so_far >= rwnd_req) &&
5794 		    (control && (control->do_not_ref_stcb == 0)) &&
5795 		    (no_rcv_needed == 0))
5796 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5797 	}
5798 	if (msg_flags)
5799 		*msg_flags |= out_flags;
5800 out:
5801 	if (((out_flags & MSG_EOR) == 0) &&
5802 	    ((in_flags & MSG_PEEK) == 0) &&
5803 	    (sinfo) &&
5804 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5805 		struct sctp_extrcvinfo *s_extra;
5806 
5807 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5808 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5809 	}
5810 	if (hold_rlock == 1) {
5811 		SCTP_INP_READ_UNLOCK(inp);
5812 		hold_rlock = 0;
5813 	}
5814 	if (hold_sblock) {
5815 		SOCKBUF_UNLOCK(&so->so_rcv);
5816 		hold_sblock = 0;
5817 	}
5818 	if (sockbuf_lock) {
5819 		sbunlock(&so->so_rcv);
5820 	}
5821 	if (freecnt_applied) {
5822 		/*
5823 		 * The lock on the socket buffer protects us so the free
5824 		 * code will stop. But since we used the socketbuf lock and
5825 		 * the sender uses the tcb_lock to increment, we need to use
5826 		 * the atomic add to the refcnt.
5827 		 */
5828 		if (stcb == NULL) {
5829 			panic("stcb for refcnt has gone NULL?");
5830 		}
5831 		atomic_add_int(&stcb->asoc.refcnt, -1);
5832 		freecnt_applied = 0;
5833 		/* Save the value back for next time */
5834 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5835 	}
5836 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
5837 		if (stcb) {
5838 			sctp_misc_ints(SCTP_SORECV_DONE,
5839 			    freed_so_far,
5840 			    ((uio) ? (slen - uio->uio_resid) : slen),
5841 			    stcb->asoc.my_rwnd,
5842 			    so->so_rcv.sb_cc);
5843 		} else {
5844 			sctp_misc_ints(SCTP_SORECV_DONE,
5845 			    freed_so_far,
5846 			    ((uio) ? (slen - uio->uio_resid) : slen),
5847 			    0,
5848 			    so->so_rcv.sb_cc);
5849 		}
5850 	}
5851 	if (wakeup_read_socket) {
5852 		sctp_sorwakeup(inp, so);
5853 	}
5854 	return (error);
5855 }
5856 
5857 
5858 #ifdef SCTP_MBUF_LOGGING
5859 struct mbuf *
5860 sctp_m_free(struct mbuf *m)
5861 {
5862 	if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
5863 		if (SCTP_BUF_IS_EXTENDED(m)) {
5864 			sctp_log_mb(m, SCTP_MBUF_IFREE);
5865 		}
5866 	}
5867 	return (m_free(m));
5868 }
5869 
5870 void
5871 sctp_m_freem(struct mbuf *mb)
5872 {
5873 	while (mb != NULL)
5874 		mb = sctp_m_free(mb);
5875 }
5876 
5877 #endif
5878 
5879 int
5880 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5881 {
5882 	/*
5883 	 * Given a local address. For all associations that holds the
5884 	 * address, request a peer-set-primary.
5885 	 */
5886 	struct sctp_ifa *ifa;
5887 	struct sctp_laddr *wi;
5888 
5889 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5890 	if (ifa == NULL) {
5891 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
5892 		return (EADDRNOTAVAIL);
5893 	}
5894 	/*
5895 	 * Now that we have the ifa we must awaken the iterator with this
5896 	 * message.
5897 	 */
5898 	wi = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
5899 	if (wi == NULL) {
5900 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
5901 		return (ENOMEM);
5902 	}
5903 	/* Now incr the count and int wi structure */
5904 	SCTP_INCR_LADDR_COUNT();
5905 	bzero(wi, sizeof(*wi));
5906 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
5907 	wi->ifa = ifa;
5908 	wi->action = SCTP_SET_PRIM_ADDR;
5909 	atomic_add_int(&ifa->refcount, 1);
5910 
5911 	/* Now add it to the work queue */
5912 	SCTP_IPI_ITERATOR_WQ_LOCK();
5913 	/*
5914 	 * Should this really be a tailq? As it is we will process the
5915 	 * newest first :-0
5916 	 */
5917 	LIST_INSERT_HEAD(&sctppcbinfo.addr_wq, wi, sctp_nxt_addr);
5918 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
5919 	    (struct sctp_inpcb *)NULL,
5920 	    (struct sctp_tcb *)NULL,
5921 	    (struct sctp_nets *)NULL);
5922 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
5923 	return (0);
5924 }
5925 
5926 
5927 
5928 
5929 int
5930 sctp_soreceive(struct socket *so,
5931     struct sockaddr **psa,
5932     struct uio *uio,
5933     struct mbuf **mp0,
5934     struct mbuf **controlp,
5935     int *flagsp)
5936 {
5937 	int error, fromlen;
5938 	uint8_t sockbuf[256];
5939 	struct sockaddr *from;
5940 	struct sctp_extrcvinfo sinfo;
5941 	int filling_sinfo = 1;
5942 	struct sctp_inpcb *inp;
5943 
5944 	inp = (struct sctp_inpcb *)so->so_pcb;
5945 	/* pickup the assoc we are reading from */
5946 	if (inp == NULL) {
5947 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5948 		return (EINVAL);
5949 	}
5950 	if ((sctp_is_feature_off(inp,
5951 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
5952 	    (controlp == NULL)) {
5953 		/* user does not want the sndrcv ctl */
5954 		filling_sinfo = 0;
5955 	}
5956 	if (psa) {
5957 		from = (struct sockaddr *)sockbuf;
5958 		fromlen = sizeof(sockbuf);
5959 		from->sa_len = 0;
5960 	} else {
5961 		from = NULL;
5962 		fromlen = 0;
5963 	}
5964 
5965 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
5966 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
5967 	if ((controlp) && (filling_sinfo)) {
5968 		/* copy back the sinfo in a CMSG format */
5969 		if (filling_sinfo)
5970 			*controlp = sctp_build_ctl_nchunk(inp,
5971 			    (struct sctp_sndrcvinfo *)&sinfo);
5972 		else
5973 			*controlp = NULL;
5974 	}
5975 	if (psa) {
5976 		/* copy back the address info */
5977 		if (from && from->sa_len) {
5978 			*psa = sodupsockaddr(from, M_NOWAIT);
5979 		} else {
5980 			*psa = NULL;
5981 		}
5982 	}
5983 	return (error);
5984 }
5985 
5986 
5987 int
5988 sctp_l_soreceive(struct socket *so,
5989     struct sockaddr **name,
5990     struct uio *uio,
5991     char **controlp,
5992     int *controllen,
5993     int *flag)
5994 {
5995 	int error, fromlen;
5996 	uint8_t sockbuf[256];
5997 	struct sockaddr *from;
5998 	struct sctp_extrcvinfo sinfo;
5999 	int filling_sinfo = 1;
6000 	struct sctp_inpcb *inp;
6001 
6002 	inp = (struct sctp_inpcb *)so->so_pcb;
6003 	/* pickup the assoc we are reading from */
6004 	if (inp == NULL) {
6005 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6006 		return (EINVAL);
6007 	}
6008 	if ((sctp_is_feature_off(inp,
6009 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6010 	    (controlp == NULL)) {
6011 		/* user does not want the sndrcv ctl */
6012 		filling_sinfo = 0;
6013 	}
6014 	if (name) {
6015 		from = (struct sockaddr *)sockbuf;
6016 		fromlen = sizeof(sockbuf);
6017 		from->sa_len = 0;
6018 	} else {
6019 		from = NULL;
6020 		fromlen = 0;
6021 	}
6022 
6023 	error = sctp_sorecvmsg(so, uio,
6024 	    (struct mbuf **)NULL,
6025 	    from, fromlen, flag,
6026 	    (struct sctp_sndrcvinfo *)&sinfo,
6027 	    filling_sinfo);
6028 	if ((controlp) && (filling_sinfo)) {
6029 		/*
6030 		 * copy back the sinfo in a CMSG format note that the caller
6031 		 * has reponsibility for freeing the memory.
6032 		 */
6033 		if (filling_sinfo)
6034 			*controlp = sctp_build_ctl_cchunk(inp,
6035 			    controllen,
6036 			    (struct sctp_sndrcvinfo *)&sinfo);
6037 	}
6038 	if (name) {
6039 		/* copy back the address info */
6040 		if (from && from->sa_len) {
6041 			*name = sodupsockaddr(from, M_WAIT);
6042 		} else {
6043 			*name = NULL;
6044 		}
6045 	}
6046 	return (error);
6047 }
6048 
6049 
6050 
6051 
6052 
6053 
6054 
6055 int
6056 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6057     int totaddr, int *error)
6058 {
6059 	int added = 0;
6060 	int i;
6061 	struct sctp_inpcb *inp;
6062 	struct sockaddr *sa;
6063 	size_t incr = 0;
6064 
6065 	sa = addr;
6066 	inp = stcb->sctp_ep;
6067 	*error = 0;
6068 	for (i = 0; i < totaddr; i++) {
6069 		if (sa->sa_family == AF_INET) {
6070 			incr = sizeof(struct sockaddr_in);
6071 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6072 				/* assoc gone no un-lock */
6073 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6074 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6075 				*error = ENOBUFS;
6076 				goto out_now;
6077 			}
6078 			added++;
6079 		} else if (sa->sa_family == AF_INET6) {
6080 			incr = sizeof(struct sockaddr_in6);
6081 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6082 				/* assoc gone no un-lock */
6083 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6084 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6085 				*error = ENOBUFS;
6086 				goto out_now;
6087 			}
6088 			added++;
6089 		}
6090 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6091 	}
6092 out_now:
6093 	return (added);
6094 }
6095 
6096 struct sctp_tcb *
6097 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6098     int *totaddr, int *num_v4, int *num_v6, int *error,
6099     int limit, int *bad_addr)
6100 {
6101 	struct sockaddr *sa;
6102 	struct sctp_tcb *stcb = NULL;
6103 	size_t incr, at, i;
6104 
6105 	at = incr = 0;
6106 	sa = addr;
6107 	*error = *num_v6 = *num_v4 = 0;
6108 	/* account and validate addresses */
6109 	for (i = 0; i < (size_t)*totaddr; i++) {
6110 		if (sa->sa_family == AF_INET) {
6111 			(*num_v4) += 1;
6112 			incr = sizeof(struct sockaddr_in);
6113 			if (sa->sa_len != incr) {
6114 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6115 				*error = EINVAL;
6116 				*bad_addr = 1;
6117 				return (NULL);
6118 			}
6119 		} else if (sa->sa_family == AF_INET6) {
6120 			struct sockaddr_in6 *sin6;
6121 
6122 			sin6 = (struct sockaddr_in6 *)sa;
6123 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6124 				/* Must be non-mapped for connectx */
6125 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6126 				*error = EINVAL;
6127 				*bad_addr = 1;
6128 				return (NULL);
6129 			}
6130 			(*num_v6) += 1;
6131 			incr = sizeof(struct sockaddr_in6);
6132 			if (sa->sa_len != incr) {
6133 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6134 				*error = EINVAL;
6135 				*bad_addr = 1;
6136 				return (NULL);
6137 			}
6138 		} else {
6139 			*totaddr = i;
6140 			/* we are done */
6141 			break;
6142 		}
6143 		SCTP_INP_INCR_REF(inp);
6144 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6145 		if (stcb != NULL) {
6146 			/* Already have or am bring up an association */
6147 			return (stcb);
6148 		} else {
6149 			SCTP_INP_DECR_REF(inp);
6150 		}
6151 		if ((at + incr) > (size_t)limit) {
6152 			*totaddr = i;
6153 			break;
6154 		}
6155 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6156 	}
6157 	return ((struct sctp_tcb *)NULL);
6158 }
6159 
6160 /*
6161  * sctp_bindx(ADD) for one address.
6162  * assumes all arguments are valid/checked by caller.
6163  */
6164 void
6165 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6166     struct sockaddr *sa, sctp_assoc_t assoc_id,
6167     uint32_t vrf_id, int *error, void *p)
6168 {
6169 	struct sockaddr *addr_touse;
6170 	struct sockaddr_in sin;
6171 
6172 	/* see if we're bound all already! */
6173 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6174 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6175 		*error = EINVAL;
6176 		return;
6177 	}
6178 	addr_touse = sa;
6179 #if defined(INET6)
6180 	if (sa->sa_family == AF_INET6) {
6181 		struct sockaddr_in6 *sin6;
6182 
6183 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6184 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6185 			*error = EINVAL;
6186 			return;
6187 		}
6188 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6189 			/* can only bind v6 on PF_INET6 sockets */
6190 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6191 			*error = EINVAL;
6192 			return;
6193 		}
6194 		sin6 = (struct sockaddr_in6 *)addr_touse;
6195 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6196 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6197 			    SCTP_IPV6_V6ONLY(inp)) {
6198 				/* can't bind v4-mapped on PF_INET sockets */
6199 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6200 				*error = EINVAL;
6201 				return;
6202 			}
6203 			in6_sin6_2_sin(&sin, sin6);
6204 			addr_touse = (struct sockaddr *)&sin;
6205 		}
6206 	}
6207 #endif
6208 	if (sa->sa_family == AF_INET) {
6209 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6210 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6211 			*error = EINVAL;
6212 			return;
6213 		}
6214 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6215 		    SCTP_IPV6_V6ONLY(inp)) {
6216 			/* can't bind v4 on PF_INET sockets */
6217 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6218 			*error = EINVAL;
6219 			return;
6220 		}
6221 	}
6222 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6223 		if (p == NULL) {
6224 			/* Can't get proc for Net/Open BSD */
6225 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6226 			*error = EINVAL;
6227 			return;
6228 		}
6229 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6230 		return;
6231 	}
6232 	/*
6233 	 * No locks required here since bind and mgmt_ep_sa all do their own
6234 	 * locking. If we do something for the FIX: below we may need to
6235 	 * lock in that case.
6236 	 */
6237 	if (assoc_id == 0) {
6238 		/* add the address */
6239 		struct sctp_inpcb *lep;
6240 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6241 
6242 		/* validate the incoming port */
6243 		if ((lsin->sin_port != 0) &&
6244 		    (lsin->sin_port != inp->sctp_lport)) {
6245 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6246 			*error = EINVAL;
6247 			return;
6248 		} else {
6249 			/* user specified 0 port, set it to existing port */
6250 			lsin->sin_port = inp->sctp_lport;
6251 		}
6252 
6253 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6254 		if (lep != NULL) {
6255 			/*
6256 			 * We must decrement the refcount since we have the
6257 			 * ep already and are binding. No remove going on
6258 			 * here.
6259 			 */
6260 			SCTP_INP_DECR_REF(inp);
6261 		}
6262 		if (lep == inp) {
6263 			/* already bound to it.. ok */
6264 			return;
6265 		} else if (lep == NULL) {
6266 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6267 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6268 			    SCTP_ADD_IP_ADDRESS,
6269 			    vrf_id, NULL);
6270 		} else {
6271 			*error = EADDRINUSE;
6272 		}
6273 		if (*error)
6274 			return;
6275 	} else {
6276 		/*
6277 		 * FIX: decide whether we allow assoc based bindx
6278 		 */
6279 	}
6280 }
6281 
6282 /*
6283  * sctp_bindx(DELETE) for one address.
6284  * assumes all arguments are valid/checked by caller.
6285  */
6286 void
6287 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6288     struct sockaddr *sa, sctp_assoc_t assoc_id,
6289     uint32_t vrf_id, int *error)
6290 {
6291 	struct sockaddr *addr_touse;
6292 	struct sockaddr_in sin;
6293 
6294 	/* see if we're bound all already! */
6295 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6296 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6297 		*error = EINVAL;
6298 		return;
6299 	}
6300 	addr_touse = sa;
6301 #if defined(INET6)
6302 	if (sa->sa_family == AF_INET6) {
6303 		struct sockaddr_in6 *sin6;
6304 
6305 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6306 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6307 			*error = EINVAL;
6308 			return;
6309 		}
6310 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6311 			/* can only bind v6 on PF_INET6 sockets */
6312 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6313 			*error = EINVAL;
6314 			return;
6315 		}
6316 		sin6 = (struct sockaddr_in6 *)addr_touse;
6317 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6318 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6319 			    SCTP_IPV6_V6ONLY(inp)) {
6320 				/* can't bind mapped-v4 on PF_INET sockets */
6321 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6322 				*error = EINVAL;
6323 				return;
6324 			}
6325 			in6_sin6_2_sin(&sin, sin6);
6326 			addr_touse = (struct sockaddr *)&sin;
6327 		}
6328 	}
6329 #endif
6330 	if (sa->sa_family == AF_INET) {
6331 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6332 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6333 			*error = EINVAL;
6334 			return;
6335 		}
6336 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6337 		    SCTP_IPV6_V6ONLY(inp)) {
6338 			/* can't bind v4 on PF_INET sockets */
6339 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6340 			*error = EINVAL;
6341 			return;
6342 		}
6343 	}
6344 	/*
6345 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6346 	 * below is ever changed we may need to lock before calling
6347 	 * association level binding.
6348 	 */
6349 	if (assoc_id == 0) {
6350 		/* delete the address */
6351 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6352 		    SCTP_DEL_IP_ADDRESS,
6353 		    vrf_id, NULL);
6354 	} else {
6355 		/*
6356 		 * FIX: decide whether we allow assoc based bindx
6357 		 */
6358 	}
6359 }
6360 
6361 /*
6362  * returns the valid local address count for an assoc, taking into account
6363  * all scoping rules
6364  */
6365 int
6366 sctp_local_addr_count(struct sctp_tcb *stcb)
6367 {
6368 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6369 	int ipv4_addr_legal, ipv6_addr_legal;
6370 	struct sctp_vrf *vrf;
6371 	struct sctp_ifn *sctp_ifn;
6372 	struct sctp_ifa *sctp_ifa;
6373 	int count = 0;
6374 
6375 	/* Turn on all the appropriate scopes */
6376 	loopback_scope = stcb->asoc.loopback_scope;
6377 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6378 	local_scope = stcb->asoc.local_scope;
6379 	site_scope = stcb->asoc.site_scope;
6380 	ipv4_addr_legal = ipv6_addr_legal = 0;
6381 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6382 		ipv6_addr_legal = 1;
6383 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6384 			ipv4_addr_legal = 1;
6385 		}
6386 	} else {
6387 		ipv4_addr_legal = 1;
6388 	}
6389 
6390 	SCTP_IPI_ADDR_RLOCK();
6391 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6392 	if (vrf == NULL) {
6393 		/* no vrf, no addresses */
6394 		SCTP_IPI_ADDR_RUNLOCK();
6395 		return (0);
6396 	}
6397 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6398 		/*
6399 		 * bound all case: go through all ifns on the vrf
6400 		 */
6401 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6402 			if ((loopback_scope == 0) &&
6403 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6404 				continue;
6405 			}
6406 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6407 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6408 					continue;
6409 
6410 				if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
6411 				    (ipv4_addr_legal)) {
6412 					struct sockaddr_in *sin;
6413 
6414 					sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6415 					if (sin->sin_addr.s_addr == 0) {
6416 						/* skip unspecified addrs */
6417 						continue;
6418 					}
6419 					if ((ipv4_local_scope == 0) &&
6420 					    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6421 						continue;
6422 					}
6423 					/* count this one */
6424 					count++;
6425 				} else if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
6426 				    (ipv6_addr_legal)) {
6427 					struct sockaddr_in6 *sin6;
6428 
6429 					sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6430 					if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6431 						continue;
6432 					}
6433 					if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6434 						if (local_scope == 0)
6435 							continue;
6436 						if (sin6->sin6_scope_id == 0) {
6437 							if (sa6_recoverscope(sin6) != 0)
6438 								/*
6439 								 * bad link
6440 								 * local
6441 								 * address
6442 								 */
6443 								continue;
6444 						}
6445 					}
6446 					if ((site_scope == 0) &&
6447 					    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6448 						continue;
6449 					}
6450 					/* count this one */
6451 					count++;
6452 				}
6453 			}
6454 		}
6455 	} else {
6456 		/*
6457 		 * subset bound case
6458 		 */
6459 		struct sctp_laddr *laddr;
6460 
6461 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6462 		    sctp_nxt_addr) {
6463 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6464 				continue;
6465 			}
6466 			/* count this one */
6467 			count++;
6468 		}
6469 	}
6470 	SCTP_IPI_ADDR_RUNLOCK();
6471 	return (count);
6472 }
6473 
6474 #if defined(SCTP_LOCAL_TRACE_BUF)
6475 
6476 void
6477 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6478 {
6479 	uint32_t saveindex, newindex;
6480 
6481 	do {
6482 		saveindex = sctp_log.index;
6483 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6484 			newindex = 1;
6485 		} else {
6486 			newindex = saveindex + 1;
6487 		}
6488 	} while (atomic_cmpset_int(&sctp_log.index, saveindex, newindex) == 0);
6489 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6490 		saveindex = 0;
6491 	}
6492 	sctp_log.entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6493 	sctp_log.entry[saveindex].subsys = subsys;
6494 	sctp_log.entry[saveindex].params[0] = a;
6495 	sctp_log.entry[saveindex].params[1] = b;
6496 	sctp_log.entry[saveindex].params[2] = c;
6497 	sctp_log.entry[saveindex].params[3] = d;
6498 	sctp_log.entry[saveindex].params[4] = e;
6499 	sctp_log.entry[saveindex].params[5] = f;
6500 }
6501 
6502 #endif
6503