xref: /freebsd/sys/netinet/sctputil.c (revision b28624fde638caadd4a89f50c9b7e7da0f98c4d2)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_crc32.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_cc_functions.h>
53 
54 #define NUMBER_OF_MTU_SIZES 18
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 void
62 sctp_sblog(struct sockbuf *sb,
63     struct sctp_tcb *stcb, int from, int incr)
64 {
65 	struct sctp_cwnd_log sctp_clog;
66 
67 	sctp_clog.x.sb.stcb = stcb;
68 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
69 	if (stcb)
70 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
71 	else
72 		sctp_clog.x.sb.stcb_sbcc = 0;
73 	sctp_clog.x.sb.incr = incr;
74 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
75 	    SCTP_LOG_EVENT_SB,
76 	    from,
77 	    sctp_clog.x.misc.log1,
78 	    sctp_clog.x.misc.log2,
79 	    sctp_clog.x.misc.log3,
80 	    sctp_clog.x.misc.log4);
81 }
82 
83 void
84 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
85 {
86 	struct sctp_cwnd_log sctp_clog;
87 
88 	sctp_clog.x.close.inp = (void *)inp;
89 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
90 	if (stcb) {
91 		sctp_clog.x.close.stcb = (void *)stcb;
92 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
93 	} else {
94 		sctp_clog.x.close.stcb = 0;
95 		sctp_clog.x.close.state = 0;
96 	}
97 	sctp_clog.x.close.loc = loc;
98 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
99 	    SCTP_LOG_EVENT_CLOSE,
100 	    0,
101 	    sctp_clog.x.misc.log1,
102 	    sctp_clog.x.misc.log2,
103 	    sctp_clog.x.misc.log3,
104 	    sctp_clog.x.misc.log4);
105 }
106 
107 
108 void
109 rto_logging(struct sctp_nets *net, int from)
110 {
111 	struct sctp_cwnd_log sctp_clog;
112 
113 	sctp_clog.x.rto.net = (void *)net;
114 	sctp_clog.x.rto.rtt = net->prev_rtt;
115 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 	    SCTP_LOG_EVENT_RTT,
117 	    from,
118 	    sctp_clog.x.misc.log1,
119 	    sctp_clog.x.misc.log2,
120 	    sctp_clog.x.misc.log3,
121 	    sctp_clog.x.misc.log4);
122 
123 }
124 
125 void
126 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
127 {
128 	struct sctp_cwnd_log sctp_clog;
129 
130 	sctp_clog.x.strlog.stcb = stcb;
131 	sctp_clog.x.strlog.n_tsn = tsn;
132 	sctp_clog.x.strlog.n_sseq = sseq;
133 	sctp_clog.x.strlog.e_tsn = 0;
134 	sctp_clog.x.strlog.e_sseq = 0;
135 	sctp_clog.x.strlog.strm = stream;
136 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
137 	    SCTP_LOG_EVENT_STRM,
138 	    from,
139 	    sctp_clog.x.misc.log1,
140 	    sctp_clog.x.misc.log2,
141 	    sctp_clog.x.misc.log3,
142 	    sctp_clog.x.misc.log4);
143 
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 	struct sctp_cwnd_log sctp_clog;
170 
171 	sctp_clog.x.sack.cumack = cumack;
172 	sctp_clog.x.sack.oldcumack = old_cumack;
173 	sctp_clog.x.sack.tsn = tsn;
174 	sctp_clog.x.sack.numGaps = gaps;
175 	sctp_clog.x.sack.numDups = dups;
176 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 	    SCTP_LOG_EVENT_SACK,
178 	    from,
179 	    sctp_clog.x.misc.log1,
180 	    sctp_clog.x.misc.log2,
181 	    sctp_clog.x.misc.log3,
182 	    sctp_clog.x.misc.log4);
183 }
184 
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
204     int from)
205 {
206 	struct sctp_cwnd_log sctp_clog;
207 
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 
219 }
220 
221 
222 void
223 sctp_log_mb(struct mbuf *m, int from)
224 {
225 	struct sctp_cwnd_log sctp_clog;
226 
227 	sctp_clog.x.mb.mp = m;
228 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
229 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
230 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
231 	if (SCTP_BUF_IS_EXTENDED(m)) {
232 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
233 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
234 	} else {
235 		sctp_clog.x.mb.ext = 0;
236 		sctp_clog.x.mb.refcnt = 0;
237 	}
238 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
239 	    SCTP_LOG_EVENT_MBUF,
240 	    from,
241 	    sctp_clog.x.misc.log1,
242 	    sctp_clog.x.misc.log2,
243 	    sctp_clog.x.misc.log3,
244 	    sctp_clog.x.misc.log4);
245 }
246 
247 
248 void
249 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
250     int from)
251 {
252 	struct sctp_cwnd_log sctp_clog;
253 
254 	if (control == NULL) {
255 		SCTP_PRINTF("Gak log of NULL?\n");
256 		return;
257 	}
258 	sctp_clog.x.strlog.stcb = control->stcb;
259 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
260 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
261 	sctp_clog.x.strlog.strm = control->sinfo_stream;
262 	if (poschk != NULL) {
263 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
264 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
265 	} else {
266 		sctp_clog.x.strlog.e_tsn = 0;
267 		sctp_clog.x.strlog.e_sseq = 0;
268 	}
269 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
270 	    SCTP_LOG_EVENT_STRM,
271 	    from,
272 	    sctp_clog.x.misc.log1,
273 	    sctp_clog.x.misc.log2,
274 	    sctp_clog.x.misc.log3,
275 	    sctp_clog.x.misc.log4);
276 
277 }
278 
279 void
280 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
281 {
282 	struct sctp_cwnd_log sctp_clog;
283 
284 	sctp_clog.x.cwnd.net = net;
285 	if (stcb->asoc.send_queue_cnt > 255)
286 		sctp_clog.x.cwnd.cnt_in_send = 255;
287 	else
288 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
289 	if (stcb->asoc.stream_queue_cnt > 255)
290 		sctp_clog.x.cwnd.cnt_in_str = 255;
291 	else
292 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
293 
294 	if (net) {
295 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
296 		sctp_clog.x.cwnd.inflight = net->flight_size;
297 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
298 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
299 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
300 	}
301 	if (SCTP_CWNDLOG_PRESEND == from) {
302 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
303 	}
304 	sctp_clog.x.cwnd.cwnd_augment = augment;
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_CWND,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 
313 }
314 
315 void
316 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
317 {
318 	struct sctp_cwnd_log sctp_clog;
319 
320 	if (inp) {
321 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
322 
323 	} else {
324 		sctp_clog.x.lock.sock = (void *)NULL;
325 	}
326 	sctp_clog.x.lock.inp = (void *)inp;
327 	if (stcb) {
328 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
329 	} else {
330 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
331 	}
332 	if (inp) {
333 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
334 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
335 	} else {
336 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
337 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
338 	}
339 	sctp_clog.x.lock.info_lock = mtx_owned(&sctppcbinfo.ipi_ep_mtx);
340 	if (inp->sctp_socket) {
341 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
342 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
343 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
344 	} else {
345 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
346 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
347 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
348 	}
349 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
350 	    SCTP_LOG_LOCK_EVENT,
351 	    from,
352 	    sctp_clog.x.misc.log1,
353 	    sctp_clog.x.misc.log2,
354 	    sctp_clog.x.misc.log3,
355 	    sctp_clog.x.misc.log4);
356 
357 }
358 
359 void
360 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
361 {
362 	struct sctp_cwnd_log sctp_clog;
363 
364 	sctp_clog.x.cwnd.net = net;
365 	sctp_clog.x.cwnd.cwnd_new_value = error;
366 	sctp_clog.x.cwnd.inflight = net->flight_size;
367 	sctp_clog.x.cwnd.cwnd_augment = burst;
368 	if (stcb->asoc.send_queue_cnt > 255)
369 		sctp_clog.x.cwnd.cnt_in_send = 255;
370 	else
371 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
372 	if (stcb->asoc.stream_queue_cnt > 255)
373 		sctp_clog.x.cwnd.cnt_in_str = 255;
374 	else
375 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
376 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
377 	    SCTP_LOG_EVENT_MAXBURST,
378 	    from,
379 	    sctp_clog.x.misc.log1,
380 	    sctp_clog.x.misc.log2,
381 	    sctp_clog.x.misc.log3,
382 	    sctp_clog.x.misc.log4);
383 
384 }
385 
386 void
387 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
388 {
389 	struct sctp_cwnd_log sctp_clog;
390 
391 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
392 	sctp_clog.x.rwnd.send_size = snd_size;
393 	sctp_clog.x.rwnd.overhead = overhead;
394 	sctp_clog.x.rwnd.new_rwnd = 0;
395 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
396 	    SCTP_LOG_EVENT_RWND,
397 	    from,
398 	    sctp_clog.x.misc.log1,
399 	    sctp_clog.x.misc.log2,
400 	    sctp_clog.x.misc.log3,
401 	    sctp_clog.x.misc.log4);
402 }
403 
404 void
405 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
406 {
407 	struct sctp_cwnd_log sctp_clog;
408 
409 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
410 	sctp_clog.x.rwnd.send_size = flight_size;
411 	sctp_clog.x.rwnd.overhead = overhead;
412 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
413 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
414 	    SCTP_LOG_EVENT_RWND,
415 	    from,
416 	    sctp_clog.x.misc.log1,
417 	    sctp_clog.x.misc.log2,
418 	    sctp_clog.x.misc.log3,
419 	    sctp_clog.x.misc.log4);
420 }
421 
422 void
423 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
424 {
425 	struct sctp_cwnd_log sctp_clog;
426 
427 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
428 	sctp_clog.x.mbcnt.size_change = book;
429 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
430 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
431 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
432 	    SCTP_LOG_EVENT_MBCNT,
433 	    from,
434 	    sctp_clog.x.misc.log1,
435 	    sctp_clog.x.misc.log2,
436 	    sctp_clog.x.misc.log3,
437 	    sctp_clog.x.misc.log4);
438 
439 }
440 
441 void
442 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
443 {
444 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
445 	    SCTP_LOG_MISC_EVENT,
446 	    from,
447 	    a, b, c, d);
448 }
449 
450 void
451 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
452 {
453 	struct sctp_cwnd_log sctp_clog;
454 
455 	sctp_clog.x.wake.stcb = (void *)stcb;
456 	sctp_clog.x.wake.wake_cnt = wake_cnt;
457 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
458 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
459 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
460 
461 	if (stcb->asoc.stream_queue_cnt < 0xff)
462 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
463 	else
464 		sctp_clog.x.wake.stream_qcnt = 0xff;
465 
466 	if (stcb->asoc.chunks_on_out_queue < 0xff)
467 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
468 	else
469 		sctp_clog.x.wake.chunks_on_oque = 0xff;
470 
471 	sctp_clog.x.wake.sctpflags = 0;
472 	/* set in the defered mode stuff */
473 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
474 		sctp_clog.x.wake.sctpflags |= 1;
475 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
476 		sctp_clog.x.wake.sctpflags |= 2;
477 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
478 		sctp_clog.x.wake.sctpflags |= 4;
479 	/* what about the sb */
480 	if (stcb->sctp_socket) {
481 		struct socket *so = stcb->sctp_socket;
482 
483 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
484 	} else {
485 		sctp_clog.x.wake.sbflags = 0xff;
486 	}
487 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
488 	    SCTP_LOG_EVENT_WAKE,
489 	    from,
490 	    sctp_clog.x.misc.log1,
491 	    sctp_clog.x.misc.log2,
492 	    sctp_clog.x.misc.log3,
493 	    sctp_clog.x.misc.log4);
494 
495 }
496 
497 void
498 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
499 {
500 	struct sctp_cwnd_log sctp_clog;
501 
502 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
503 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
504 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
505 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
506 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
507 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
508 	sctp_clog.x.blk.sndlen = sendlen;
509 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
510 	    SCTP_LOG_EVENT_BLOCK,
511 	    from,
512 	    sctp_clog.x.misc.log1,
513 	    sctp_clog.x.misc.log2,
514 	    sctp_clog.x.misc.log3,
515 	    sctp_clog.x.misc.log4);
516 
517 }
518 
519 int
520 sctp_fill_stat_log(void *optval, size_t *optsize)
521 {
522 	/* May need to fix this if ktrdump does not work */
523 	return (0);
524 }
525 
526 #ifdef SCTP_AUDITING_ENABLED
527 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
528 static int sctp_audit_indx = 0;
529 
530 static
531 void
532 sctp_print_audit_report(void)
533 {
534 	int i;
535 	int cnt;
536 
537 	cnt = 0;
538 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
539 		if ((sctp_audit_data[i][0] == 0xe0) &&
540 		    (sctp_audit_data[i][1] == 0x01)) {
541 			cnt = 0;
542 			SCTP_PRINTF("\n");
543 		} else if (sctp_audit_data[i][0] == 0xf0) {
544 			cnt = 0;
545 			SCTP_PRINTF("\n");
546 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			SCTP_PRINTF("\n");
549 			cnt = 0;
550 		}
551 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
552 		    (uint32_t) sctp_audit_data[i][1]);
553 		cnt++;
554 		if ((cnt % 14) == 0)
555 			SCTP_PRINTF("\n");
556 	}
557 	for (i = 0; i < sctp_audit_indx; i++) {
558 		if ((sctp_audit_data[i][0] == 0xe0) &&
559 		    (sctp_audit_data[i][1] == 0x01)) {
560 			cnt = 0;
561 			SCTP_PRINTF("\n");
562 		} else if (sctp_audit_data[i][0] == 0xf0) {
563 			cnt = 0;
564 			SCTP_PRINTF("\n");
565 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			SCTP_PRINTF("\n");
568 			cnt = 0;
569 		}
570 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
571 		    (uint32_t) sctp_audit_data[i][1]);
572 		cnt++;
573 		if ((cnt % 14) == 0)
574 			SCTP_PRINTF("\n");
575 	}
576 	SCTP_PRINTF("\n");
577 }
578 
579 void
580 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
581     struct sctp_nets *net)
582 {
583 	int resend_cnt, tot_out, rep, tot_book_cnt;
584 	struct sctp_nets *lnet;
585 	struct sctp_tmit_chunk *chk;
586 
587 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
588 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
589 	sctp_audit_indx++;
590 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
591 		sctp_audit_indx = 0;
592 	}
593 	if (inp == NULL) {
594 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
595 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
596 		sctp_audit_indx++;
597 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 			sctp_audit_indx = 0;
599 		}
600 		return;
601 	}
602 	if (stcb == NULL) {
603 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
604 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
605 		sctp_audit_indx++;
606 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 			sctp_audit_indx = 0;
608 		}
609 		return;
610 	}
611 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
612 	sctp_audit_data[sctp_audit_indx][1] =
613 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
614 	sctp_audit_indx++;
615 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
616 		sctp_audit_indx = 0;
617 	}
618 	rep = 0;
619 	tot_book_cnt = 0;
620 	resend_cnt = tot_out = 0;
621 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
622 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
623 			resend_cnt++;
624 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
625 			tot_out += chk->book_size;
626 			tot_book_cnt++;
627 		}
628 	}
629 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
630 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
631 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
632 		sctp_audit_indx++;
633 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
634 			sctp_audit_indx = 0;
635 		}
636 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
637 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
638 		rep = 1;
639 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
640 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
641 		sctp_audit_data[sctp_audit_indx][1] =
642 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
643 		sctp_audit_indx++;
644 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645 			sctp_audit_indx = 0;
646 		}
647 	}
648 	if (tot_out != stcb->asoc.total_flight) {
649 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
650 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
651 		sctp_audit_indx++;
652 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
653 			sctp_audit_indx = 0;
654 		}
655 		rep = 1;
656 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
657 		    (int)stcb->asoc.total_flight);
658 		stcb->asoc.total_flight = tot_out;
659 	}
660 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
661 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
662 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
663 		sctp_audit_indx++;
664 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
665 			sctp_audit_indx = 0;
666 		}
667 		rep = 1;
668 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
669 
670 		stcb->asoc.total_flight_count = tot_book_cnt;
671 	}
672 	tot_out = 0;
673 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
674 		tot_out += lnet->flight_size;
675 	}
676 	if (tot_out != stcb->asoc.total_flight) {
677 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
678 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
679 		sctp_audit_indx++;
680 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
681 			sctp_audit_indx = 0;
682 		}
683 		rep = 1;
684 		SCTP_PRINTF("real flight:%d net total was %d\n",
685 		    stcb->asoc.total_flight, tot_out);
686 		/* now corrective action */
687 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
688 
689 			tot_out = 0;
690 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
691 				if ((chk->whoTo == lnet) &&
692 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
693 					tot_out += chk->book_size;
694 				}
695 			}
696 			if (lnet->flight_size != tot_out) {
697 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
698 				    (uint32_t) lnet, lnet->flight_size,
699 				    tot_out);
700 				lnet->flight_size = tot_out;
701 			}
702 		}
703 	}
704 	if (rep) {
705 		sctp_print_audit_report();
706 	}
707 }
708 
709 void
710 sctp_audit_log(uint8_t ev, uint8_t fd)
711 {
712 
713 	sctp_audit_data[sctp_audit_indx][0] = ev;
714 	sctp_audit_data[sctp_audit_indx][1] = fd;
715 	sctp_audit_indx++;
716 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 		sctp_audit_indx = 0;
718 	}
719 }
720 
721 #endif
722 
723 /*
724  * a list of sizes based on typical mtu's, used only if next hop size not
725  * returned.
726  */
727 static int sctp_mtu_sizes[] = {
728 	68,
729 	296,
730 	508,
731 	512,
732 	544,
733 	576,
734 	1006,
735 	1492,
736 	1500,
737 	1536,
738 	2002,
739 	2048,
740 	4352,
741 	4464,
742 	8166,
743 	17914,
744 	32000,
745 	65535
746 };
747 
748 void
749 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
750 {
751 	struct sctp_association *asoc;
752 	struct sctp_nets *net;
753 
754 	asoc = &stcb->asoc;
755 
756 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
757 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
758 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
759 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
760 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
761 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
762 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
763 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
764 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
765 	}
766 }
767 
768 int
769 find_next_best_mtu(int totsz)
770 {
771 	int i, perfer;
772 
773 	/*
774 	 * if we are in here we must find the next best fit based on the
775 	 * size of the dg that failed to be sent.
776 	 */
777 	perfer = 0;
778 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
779 		if (totsz < sctp_mtu_sizes[i]) {
780 			perfer = i - 1;
781 			if (perfer < 0)
782 				perfer = 0;
783 			break;
784 		}
785 	}
786 	return (sctp_mtu_sizes[perfer]);
787 }
788 
789 void
790 sctp_fill_random_store(struct sctp_pcb *m)
791 {
792 	/*
793 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
794 	 * our counter. The result becomes our good random numbers and we
795 	 * then setup to give these out. Note that we do no locking to
796 	 * protect this. This is ok, since if competing folks call this we
797 	 * will get more gobbled gook in the random store which is what we
798 	 * want. There is a danger that two guys will use the same random
799 	 * numbers, but thats ok too since that is random as well :->
800 	 */
801 	m->store_at = 0;
802 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
803 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
804 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
805 	m->random_counter++;
806 }
807 
808 uint32_t
809 sctp_select_initial_TSN(struct sctp_pcb *inp)
810 {
811 	/*
812 	 * A true implementation should use random selection process to get
813 	 * the initial stream sequence number, using RFC1750 as a good
814 	 * guideline
815 	 */
816 	uint32_t x, *xp;
817 	uint8_t *p;
818 	int store_at, new_store;
819 
820 	if (inp->initial_sequence_debug != 0) {
821 		uint32_t ret;
822 
823 		ret = inp->initial_sequence_debug;
824 		inp->initial_sequence_debug++;
825 		return (ret);
826 	}
827 retry:
828 	store_at = inp->store_at;
829 	new_store = store_at + sizeof(uint32_t);
830 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
831 		new_store = 0;
832 	}
833 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
834 		goto retry;
835 	}
836 	if (new_store == 0) {
837 		/* Refill the random store */
838 		sctp_fill_random_store(inp);
839 	}
840 	p = &inp->random_store[store_at];
841 	xp = (uint32_t *) p;
842 	x = *xp;
843 	return (x);
844 }
845 
846 uint32_t
847 sctp_select_a_tag(struct sctp_inpcb *inp)
848 {
849 	u_long x, not_done;
850 	struct timeval now;
851 
852 	(void)SCTP_GETTIME_TIMEVAL(&now);
853 	not_done = 1;
854 	while (not_done) {
855 		x = sctp_select_initial_TSN(&inp->sctp_ep);
856 		if (x == 0) {
857 			/* we never use 0 */
858 			continue;
859 		}
860 		if (sctp_is_vtag_good(inp, x, &now)) {
861 			not_done = 0;
862 		}
863 	}
864 	return (x);
865 }
866 
867 int
868 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
869     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
870 {
871 	struct sctp_association *asoc;
872 
873 	/*
874 	 * Anything set to zero is taken care of by the allocation routine's
875 	 * bzero
876 	 */
877 
878 	/*
879 	 * Up front select what scoping to apply on addresses I tell my peer
880 	 * Not sure what to do with these right now, we will need to come up
881 	 * with a way to set them. We may need to pass them through from the
882 	 * caller in the sctp_aloc_assoc() function.
883 	 */
884 	int i;
885 
886 	asoc = &stcb->asoc;
887 	/* init all variables to a known value. */
888 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
889 	asoc->max_burst = m->sctp_ep.max_burst;
890 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
891 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
892 	asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off;
893 	/* JRS 5/21/07 - Init CMT PF variables */
894 	asoc->sctp_cmt_pf = (uint8_t) sctp_cmt_pf;
895 	asoc->sctp_frag_point = m->sctp_frag_point;
896 #ifdef INET
897 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
898 #else
899 	asoc->default_tos = 0;
900 #endif
901 
902 #ifdef INET6
903 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
904 #else
905 	asoc->default_flowlabel = 0;
906 #endif
907 	if (override_tag) {
908 		struct timeval now;
909 
910 		(void)SCTP_GETTIME_TIMEVAL(&now);
911 		if (sctp_is_vtag_good(m, override_tag, &now)) {
912 			asoc->my_vtag = override_tag;
913 		} else {
914 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
915 			return (ENOMEM);
916 		}
917 
918 	} else {
919 		asoc->my_vtag = sctp_select_a_tag(m);
920 	}
921 	/* Get the nonce tags */
922 	asoc->my_vtag_nonce = sctp_select_a_tag(m);
923 	asoc->peer_vtag_nonce = sctp_select_a_tag(m);
924 	asoc->vrf_id = vrf_id;
925 
926 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
927 		asoc->hb_is_disabled = 1;
928 	else
929 		asoc->hb_is_disabled = 0;
930 
931 #ifdef SCTP_ASOCLOG_OF_TSNS
932 	asoc->tsn_in_at = 0;
933 	asoc->tsn_out_at = 0;
934 	asoc->tsn_in_wrapped = 0;
935 	asoc->tsn_out_wrapped = 0;
936 	asoc->cumack_log_at = 0;
937 #endif
938 #ifdef SCTP_FS_SPEC_LOG
939 	asoc->fs_index = 0;
940 #endif
941 	asoc->refcnt = 0;
942 	asoc->assoc_up_sent = 0;
943 	asoc->assoc_id = asoc->my_vtag;
944 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
945 	    sctp_select_initial_TSN(&m->sctp_ep);
946 	/* we are optimisitic here */
947 	asoc->peer_supports_pktdrop = 1;
948 
949 	asoc->sent_queue_retran_cnt = 0;
950 
951 	/* for CMT */
952 	asoc->last_net_data_came_from = NULL;
953 
954 	/* This will need to be adjusted */
955 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
956 	asoc->last_acked_seq = asoc->init_seq_number - 1;
957 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
958 	asoc->asconf_seq_in = asoc->last_acked_seq;
959 
960 	/* here we are different, we hold the next one we expect */
961 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
962 
963 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
964 	asoc->initial_rto = m->sctp_ep.initial_rto;
965 
966 	asoc->max_init_times = m->sctp_ep.max_init_times;
967 	asoc->max_send_times = m->sctp_ep.max_send_times;
968 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
969 	asoc->free_chunk_cnt = 0;
970 
971 	asoc->iam_blocking = 0;
972 	/* ECN Nonce initialization */
973 	asoc->context = m->sctp_context;
974 	asoc->def_send = m->def_send;
975 	asoc->ecn_nonce_allowed = 0;
976 	asoc->receiver_nonce_sum = 1;
977 	asoc->nonce_sum_expect_base = 1;
978 	asoc->nonce_sum_check = 1;
979 	asoc->nonce_resync_tsn = 0;
980 	asoc->nonce_wait_for_ecne = 0;
981 	asoc->nonce_wait_tsn = 0;
982 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
983 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
984 	asoc->pr_sctp_cnt = 0;
985 	asoc->total_output_queue_size = 0;
986 
987 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
988 		struct in6pcb *inp6;
989 
990 		/* Its a V6 socket */
991 		inp6 = (struct in6pcb *)m;
992 		asoc->ipv6_addr_legal = 1;
993 		/* Now look at the binding flag to see if V4 will be legal */
994 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
995 			asoc->ipv4_addr_legal = 1;
996 		} else {
997 			/* V4 addresses are NOT legal on the association */
998 			asoc->ipv4_addr_legal = 0;
999 		}
1000 	} else {
1001 		/* Its a V4 socket, no - V6 */
1002 		asoc->ipv4_addr_legal = 1;
1003 		asoc->ipv6_addr_legal = 0;
1004 	}
1005 
1006 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1007 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1008 
1009 	asoc->smallest_mtu = m->sctp_frag_point;
1010 #ifdef SCTP_PRINT_FOR_B_AND_M
1011 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1012 	    asoc->smallest_mtu);
1013 #endif
1014 	asoc->minrto = m->sctp_ep.sctp_minrto;
1015 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1016 
1017 	asoc->locked_on_sending = NULL;
1018 	asoc->stream_locked_on = 0;
1019 	asoc->ecn_echo_cnt_onq = 0;
1020 	asoc->stream_locked = 0;
1021 
1022 	asoc->send_sack = 1;
1023 
1024 	LIST_INIT(&asoc->sctp_restricted_addrs);
1025 
1026 	TAILQ_INIT(&asoc->nets);
1027 	TAILQ_INIT(&asoc->pending_reply_queue);
1028 	TAILQ_INIT(&asoc->asconf_ack_sent);
1029 	/* Setup to fill the hb random cache at first HB */
1030 	asoc->hb_random_idx = 4;
1031 
1032 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1033 
1034 	/*
1035 	 * JRS - Pick the default congestion control module based on the
1036 	 * sysctl.
1037 	 */
1038 	switch (m->sctp_ep.sctp_default_cc_module) {
1039 		/* JRS - Standard TCP congestion control */
1040 	case SCTP_CC_RFC2581:
1041 		{
1042 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1043 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1044 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1045 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1046 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1047 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1048 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1049 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1050 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1051 			break;
1052 		}
1053 		/* JRS - High Speed TCP congestion control (Floyd) */
1054 	case SCTP_CC_HSTCP:
1055 		{
1056 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1057 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1058 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1059 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1060 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1061 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1064 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1065 			break;
1066 		}
1067 		/* JRS - HTCP congestion control */
1068 	case SCTP_CC_HTCP:
1069 		{
1070 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1071 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1072 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1073 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1074 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1075 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1078 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1079 			break;
1080 		}
1081 		/* JRS - By default, use RFC2581 */
1082 	default:
1083 		{
1084 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1085 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1086 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1087 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1088 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1089 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1092 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1093 			break;
1094 		}
1095 	}
1096 
1097 	/*
1098 	 * Now the stream parameters, here we allocate space for all streams
1099 	 * that we request by default.
1100 	 */
1101 	asoc->streamoutcnt = asoc->pre_open_streams =
1102 	    m->sctp_ep.pre_open_stream_count;
1103 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1104 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1105 	    SCTP_M_STRMO);
1106 	if (asoc->strmout == NULL) {
1107 		/* big trouble no memory */
1108 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1109 		return (ENOMEM);
1110 	}
1111 	for (i = 0; i < asoc->streamoutcnt; i++) {
1112 		/*
1113 		 * inbound side must be set to 0xffff, also NOTE when we get
1114 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1115 		 * count (streamoutcnt) but first check if we sent to any of
1116 		 * the upper streams that were dropped (if some were). Those
1117 		 * that were dropped must be notified to the upper layer as
1118 		 * failed to send.
1119 		 */
1120 		asoc->strmout[i].next_sequence_sent = 0x0;
1121 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1122 		asoc->strmout[i].stream_no = i;
1123 		asoc->strmout[i].last_msg_incomplete = 0;
1124 		asoc->strmout[i].next_spoke.tqe_next = 0;
1125 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1126 	}
1127 	/* Now the mapping array */
1128 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1129 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1130 	    SCTP_M_MAP);
1131 	if (asoc->mapping_array == NULL) {
1132 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1133 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1134 		return (ENOMEM);
1135 	}
1136 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1137 	/* Now the init of the other outqueues */
1138 	TAILQ_INIT(&asoc->free_chunks);
1139 	TAILQ_INIT(&asoc->out_wheel);
1140 	TAILQ_INIT(&asoc->control_send_queue);
1141 	TAILQ_INIT(&asoc->send_queue);
1142 	TAILQ_INIT(&asoc->sent_queue);
1143 	TAILQ_INIT(&asoc->reasmqueue);
1144 	TAILQ_INIT(&asoc->resetHead);
1145 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1146 	TAILQ_INIT(&asoc->asconf_queue);
1147 	/* authentication fields */
1148 	asoc->authinfo.random = NULL;
1149 	asoc->authinfo.assoc_key = NULL;
1150 	asoc->authinfo.assoc_keyid = 0;
1151 	asoc->authinfo.recv_key = NULL;
1152 	asoc->authinfo.recv_keyid = 0;
1153 	LIST_INIT(&asoc->shared_keys);
1154 	asoc->marked_retrans = 0;
1155 	asoc->timoinit = 0;
1156 	asoc->timodata = 0;
1157 	asoc->timosack = 0;
1158 	asoc->timoshutdown = 0;
1159 	asoc->timoheartbeat = 0;
1160 	asoc->timocookie = 0;
1161 	asoc->timoshutdownack = 0;
1162 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1163 	asoc->discontinuity_time = asoc->start_time;
1164 	/*
1165 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1166 	 * freed later whe the association is freed.
1167 	 */
1168 	return (0);
1169 }
1170 
1171 int
1172 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1173 {
1174 	/* mapping array needs to grow */
1175 	uint8_t *new_array;
1176 	uint32_t new_size;
1177 
1178 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1179 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1180 	if (new_array == NULL) {
1181 		/* can't get more, forget it */
1182 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1183 		    new_size);
1184 		return (-1);
1185 	}
1186 	memset(new_array, 0, new_size);
1187 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1188 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1189 	asoc->mapping_array = new_array;
1190 	asoc->mapping_array_size = new_size;
1191 	return (0);
1192 }
1193 
1194 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1195 static void
1196 sctp_iterator_work(struct sctp_iterator *it)
1197 {
1198 	int iteration_count = 0;
1199 	int inp_skip = 0;
1200 
1201 	SCTP_ITERATOR_LOCK();
1202 	if (it->inp) {
1203 		SCTP_INP_DECR_REF(it->inp);
1204 	}
1205 	if (it->inp == NULL) {
1206 		/* iterator is complete */
1207 done_with_iterator:
1208 		SCTP_ITERATOR_UNLOCK();
1209 		if (it->function_atend != NULL) {
1210 			(*it->function_atend) (it->pointer, it->val);
1211 		}
1212 		SCTP_FREE(it, SCTP_M_ITER);
1213 		return;
1214 	}
1215 select_a_new_ep:
1216 	SCTP_INP_WLOCK(it->inp);
1217 	while (((it->pcb_flags) &&
1218 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1219 	    ((it->pcb_features) &&
1220 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1221 		/* endpoint flags or features don't match, so keep looking */
1222 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1223 			SCTP_INP_WUNLOCK(it->inp);
1224 			goto done_with_iterator;
1225 		}
1226 		SCTP_INP_WUNLOCK(it->inp);
1227 		it->inp = LIST_NEXT(it->inp, sctp_list);
1228 		if (it->inp == NULL) {
1229 			goto done_with_iterator;
1230 		}
1231 		SCTP_INP_WLOCK(it->inp);
1232 	}
1233 
1234 	SCTP_INP_WUNLOCK(it->inp);
1235 	SCTP_INP_RLOCK(it->inp);
1236 
1237 	/* now go through each assoc which is in the desired state */
1238 	if (it->done_current_ep == 0) {
1239 		if (it->function_inp != NULL)
1240 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1241 		it->done_current_ep = 1;
1242 	}
1243 	if (it->stcb == NULL) {
1244 		/* run the per instance function */
1245 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1246 	}
1247 	if ((inp_skip) || it->stcb == NULL) {
1248 		if (it->function_inp_end != NULL) {
1249 			inp_skip = (*it->function_inp_end) (it->inp,
1250 			    it->pointer,
1251 			    it->val);
1252 		}
1253 		SCTP_INP_RUNLOCK(it->inp);
1254 		goto no_stcb;
1255 	}
1256 	while (it->stcb) {
1257 		SCTP_TCB_LOCK(it->stcb);
1258 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1259 			/* not in the right state... keep looking */
1260 			SCTP_TCB_UNLOCK(it->stcb);
1261 			goto next_assoc;
1262 		}
1263 		/* see if we have limited out the iterator loop */
1264 		iteration_count++;
1265 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1266 			/* Pause to let others grab the lock */
1267 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1268 			SCTP_TCB_UNLOCK(it->stcb);
1269 
1270 			SCTP_INP_INCR_REF(it->inp);
1271 			SCTP_INP_RUNLOCK(it->inp);
1272 			SCTP_ITERATOR_UNLOCK();
1273 			SCTP_ITERATOR_LOCK();
1274 			SCTP_INP_RLOCK(it->inp);
1275 
1276 			SCTP_INP_DECR_REF(it->inp);
1277 			SCTP_TCB_LOCK(it->stcb);
1278 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1279 			iteration_count = 0;
1280 		}
1281 		/* run function on this one */
1282 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1283 
1284 		/*
1285 		 * we lie here, it really needs to have its own type but
1286 		 * first I must verify that this won't effect things :-0
1287 		 */
1288 		if (it->no_chunk_output == 0)
1289 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1290 
1291 		SCTP_TCB_UNLOCK(it->stcb);
1292 next_assoc:
1293 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1294 		if (it->stcb == NULL) {
1295 			/* Run last function */
1296 			if (it->function_inp_end != NULL) {
1297 				inp_skip = (*it->function_inp_end) (it->inp,
1298 				    it->pointer,
1299 				    it->val);
1300 			}
1301 		}
1302 	}
1303 	SCTP_INP_RUNLOCK(it->inp);
1304 no_stcb:
1305 	/* done with all assocs on this endpoint, move on to next endpoint */
1306 	it->done_current_ep = 0;
1307 	SCTP_INP_WLOCK(it->inp);
1308 	SCTP_INP_WUNLOCK(it->inp);
1309 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1310 		it->inp = NULL;
1311 	} else {
1312 		SCTP_INP_INFO_RLOCK();
1313 		it->inp = LIST_NEXT(it->inp, sctp_list);
1314 		SCTP_INP_INFO_RUNLOCK();
1315 	}
1316 	if (it->inp == NULL) {
1317 		goto done_with_iterator;
1318 	}
1319 	goto select_a_new_ep;
1320 }
1321 
1322 void
1323 sctp_iterator_worker(void)
1324 {
1325 	struct sctp_iterator *it = NULL;
1326 
1327 	/* This function is called with the WQ lock in place */
1328 
1329 	sctppcbinfo.iterator_running = 1;
1330 again:
1331 	it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
1332 	while (it) {
1333 		/* now lets work on this one */
1334 		TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
1335 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1336 		sctp_iterator_work(it);
1337 		SCTP_IPI_ITERATOR_WQ_LOCK();
1338 		/* sa_ignore FREED_MEMORY */
1339 		it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
1340 	}
1341 	if (TAILQ_FIRST(&sctppcbinfo.iteratorhead)) {
1342 		goto again;
1343 	}
1344 	sctppcbinfo.iterator_running = 0;
1345 	return;
1346 }
1347 
1348 #endif
1349 
1350 
1351 static void
1352 sctp_handle_addr_wq(void)
1353 {
1354 	/* deal with the ADDR wq from the rtsock calls */
1355 	struct sctp_laddr *wi;
1356 	struct sctp_asconf_iterator *asc;
1357 
1358 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1359 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1360 	if (asc == NULL) {
1361 		/* Try later, no memory */
1362 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1363 		    (struct sctp_inpcb *)NULL,
1364 		    (struct sctp_tcb *)NULL,
1365 		    (struct sctp_nets *)NULL);
1366 		return;
1367 	}
1368 	LIST_INIT(&asc->list_of_work);
1369 	asc->cnt = 0;
1370 	SCTP_IPI_ITERATOR_WQ_LOCK();
1371 	wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1372 	while (wi != NULL) {
1373 		LIST_REMOVE(wi, sctp_nxt_addr);
1374 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1375 		asc->cnt++;
1376 		wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1377 	}
1378 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1379 	if (asc->cnt == 0) {
1380 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1381 	} else {
1382 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1383 		    sctp_asconf_iterator_stcb,
1384 		    NULL,	/* No ep end for boundall */
1385 		    SCTP_PCB_FLAGS_BOUNDALL,
1386 		    SCTP_PCB_ANY_FEATURES,
1387 		    SCTP_ASOC_ANY_STATE,
1388 		    (void *)asc, 0,
1389 		    sctp_asconf_iterator_end, NULL, 0);
1390 	}
1391 }
1392 
1393 int retcode = 0;
1394 int cur_oerr = 0;
1395 
1396 void
1397 sctp_timeout_handler(void *t)
1398 {
1399 	struct sctp_inpcb *inp;
1400 	struct sctp_tcb *stcb;
1401 	struct sctp_nets *net;
1402 	struct sctp_timer *tmr;
1403 
1404 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1405 	struct socket *so;
1406 
1407 #endif
1408 	int did_output;
1409 	struct sctp_iterator *it = NULL;
1410 
1411 	tmr = (struct sctp_timer *)t;
1412 	inp = (struct sctp_inpcb *)tmr->ep;
1413 	stcb = (struct sctp_tcb *)tmr->tcb;
1414 	net = (struct sctp_nets *)tmr->net;
1415 	did_output = 1;
1416 
1417 #ifdef SCTP_AUDITING_ENABLED
1418 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1419 	sctp_auditing(3, inp, stcb, net);
1420 #endif
1421 
1422 	/* sanity checks... */
1423 	if (tmr->self != (void *)tmr) {
1424 		/*
1425 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1426 		 * tmr);
1427 		 */
1428 		return;
1429 	}
1430 	tmr->stopped_from = 0xa001;
1431 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1432 		/*
1433 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1434 		 * tmr->type);
1435 		 */
1436 		return;
1437 	}
1438 	tmr->stopped_from = 0xa002;
1439 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1440 		return;
1441 	}
1442 	/* if this is an iterator timeout, get the struct and clear inp */
1443 	tmr->stopped_from = 0xa003;
1444 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1445 		it = (struct sctp_iterator *)inp;
1446 		inp = NULL;
1447 	}
1448 	if (inp) {
1449 		SCTP_INP_INCR_REF(inp);
1450 		if ((inp->sctp_socket == 0) &&
1451 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1452 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1453 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1454 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1455 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1456 		    ) {
1457 			SCTP_INP_DECR_REF(inp);
1458 			return;
1459 		}
1460 	}
1461 	tmr->stopped_from = 0xa004;
1462 	if (stcb) {
1463 		atomic_add_int(&stcb->asoc.refcnt, 1);
1464 		if (stcb->asoc.state == 0) {
1465 			atomic_add_int(&stcb->asoc.refcnt, -1);
1466 			if (inp) {
1467 				SCTP_INP_DECR_REF(inp);
1468 			}
1469 			return;
1470 		}
1471 	}
1472 	tmr->stopped_from = 0xa005;
1473 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1474 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1475 		if (inp) {
1476 			SCTP_INP_DECR_REF(inp);
1477 		}
1478 		if (stcb) {
1479 			atomic_add_int(&stcb->asoc.refcnt, -1);
1480 		}
1481 		return;
1482 	}
1483 	tmr->stopped_from = 0xa006;
1484 
1485 	if (stcb) {
1486 		SCTP_TCB_LOCK(stcb);
1487 		atomic_add_int(&stcb->asoc.refcnt, -1);
1488 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1489 		    ((stcb->asoc.state == 0) ||
1490 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1491 			SCTP_TCB_UNLOCK(stcb);
1492 			if (inp) {
1493 				SCTP_INP_DECR_REF(inp);
1494 			}
1495 			return;
1496 		}
1497 	}
1498 	/* record in stopped what t-o occured */
1499 	tmr->stopped_from = tmr->type;
1500 
1501 	/* mark as being serviced now */
1502 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1503 		/*
1504 		 * Callout has been rescheduled.
1505 		 */
1506 		goto get_out;
1507 	}
1508 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1509 		/*
1510 		 * Not active, so no action.
1511 		 */
1512 		goto get_out;
1513 	}
1514 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1515 
1516 	/* call the handler for the appropriate timer type */
1517 	switch (tmr->type) {
1518 	case SCTP_TIMER_TYPE_ZERO_COPY:
1519 		if (inp == NULL) {
1520 			break;
1521 		}
1522 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1523 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1524 		}
1525 		break;
1526 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1527 		if (inp == NULL) {
1528 			break;
1529 		}
1530 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1531 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1532 		}
1533 		break;
1534 	case SCTP_TIMER_TYPE_ADDR_WQ:
1535 		sctp_handle_addr_wq();
1536 		break;
1537 	case SCTP_TIMER_TYPE_ITERATOR:
1538 		SCTP_STAT_INCR(sctps_timoiterator);
1539 		sctp_iterator_timer(it);
1540 		break;
1541 	case SCTP_TIMER_TYPE_SEND:
1542 		if ((stcb == NULL) || (inp == NULL)) {
1543 			break;
1544 		}
1545 		SCTP_STAT_INCR(sctps_timodata);
1546 		stcb->asoc.timodata++;
1547 		stcb->asoc.num_send_timers_up--;
1548 		if (stcb->asoc.num_send_timers_up < 0) {
1549 			stcb->asoc.num_send_timers_up = 0;
1550 		}
1551 		SCTP_TCB_LOCK_ASSERT(stcb);
1552 		cur_oerr = stcb->asoc.overall_error_count;
1553 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1554 		if (retcode) {
1555 			/* no need to unlock on tcb its gone */
1556 
1557 			goto out_decr;
1558 		}
1559 		SCTP_TCB_LOCK_ASSERT(stcb);
1560 #ifdef SCTP_AUDITING_ENABLED
1561 		sctp_auditing(4, inp, stcb, net);
1562 #endif
1563 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1564 		if ((stcb->asoc.num_send_timers_up == 0) &&
1565 		    (stcb->asoc.sent_queue_cnt > 0)
1566 		    ) {
1567 			struct sctp_tmit_chunk *chk;
1568 
1569 			/*
1570 			 * safeguard. If there on some on the sent queue
1571 			 * somewhere but no timers running something is
1572 			 * wrong... so we start a timer on the first chunk
1573 			 * on the send queue on whatever net it is sent to.
1574 			 */
1575 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1576 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1577 			    chk->whoTo);
1578 		}
1579 		break;
1580 	case SCTP_TIMER_TYPE_INIT:
1581 		if ((stcb == NULL) || (inp == NULL)) {
1582 			break;
1583 		}
1584 		SCTP_STAT_INCR(sctps_timoinit);
1585 		stcb->asoc.timoinit++;
1586 		if (sctp_t1init_timer(inp, stcb, net)) {
1587 			/* no need to unlock on tcb its gone */
1588 			goto out_decr;
1589 		}
1590 		/* We do output but not here */
1591 		did_output = 0;
1592 		break;
1593 	case SCTP_TIMER_TYPE_RECV:
1594 		if ((stcb == NULL) || (inp == NULL)) {
1595 			break;
1596 		} {
1597 			int abort_flag;
1598 
1599 			SCTP_STAT_INCR(sctps_timosack);
1600 			stcb->asoc.timosack++;
1601 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1602 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1603 			sctp_send_sack(stcb);
1604 		}
1605 #ifdef SCTP_AUDITING_ENABLED
1606 		sctp_auditing(4, inp, stcb, net);
1607 #endif
1608 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1609 		break;
1610 	case SCTP_TIMER_TYPE_SHUTDOWN:
1611 		if ((stcb == NULL) || (inp == NULL)) {
1612 			break;
1613 		}
1614 		if (sctp_shutdown_timer(inp, stcb, net)) {
1615 			/* no need to unlock on tcb its gone */
1616 			goto out_decr;
1617 		}
1618 		SCTP_STAT_INCR(sctps_timoshutdown);
1619 		stcb->asoc.timoshutdown++;
1620 #ifdef SCTP_AUDITING_ENABLED
1621 		sctp_auditing(4, inp, stcb, net);
1622 #endif
1623 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1624 		break;
1625 	case SCTP_TIMER_TYPE_HEARTBEAT:
1626 		{
1627 			struct sctp_nets *lnet;
1628 			int cnt_of_unconf = 0;
1629 
1630 			if ((stcb == NULL) || (inp == NULL)) {
1631 				break;
1632 			}
1633 			SCTP_STAT_INCR(sctps_timoheartbeat);
1634 			stcb->asoc.timoheartbeat++;
1635 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1636 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1637 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1638 					cnt_of_unconf++;
1639 				}
1640 			}
1641 			if (cnt_of_unconf == 0) {
1642 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1643 				    cnt_of_unconf)) {
1644 					/* no need to unlock on tcb its gone */
1645 					goto out_decr;
1646 				}
1647 			}
1648 #ifdef SCTP_AUDITING_ENABLED
1649 			sctp_auditing(4, inp, stcb, lnet);
1650 #endif
1651 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1652 			    stcb->sctp_ep, stcb, lnet);
1653 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1654 		}
1655 		break;
1656 	case SCTP_TIMER_TYPE_COOKIE:
1657 		if ((stcb == NULL) || (inp == NULL)) {
1658 			break;
1659 		}
1660 		if (sctp_cookie_timer(inp, stcb, net)) {
1661 			/* no need to unlock on tcb its gone */
1662 			goto out_decr;
1663 		}
1664 		SCTP_STAT_INCR(sctps_timocookie);
1665 		stcb->asoc.timocookie++;
1666 #ifdef SCTP_AUDITING_ENABLED
1667 		sctp_auditing(4, inp, stcb, net);
1668 #endif
1669 		/*
1670 		 * We consider T3 and Cookie timer pretty much the same with
1671 		 * respect to where from in chunk_output.
1672 		 */
1673 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1674 		break;
1675 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1676 		{
1677 			struct timeval tv;
1678 			int i, secret;
1679 
1680 			if (inp == NULL) {
1681 				break;
1682 			}
1683 			SCTP_STAT_INCR(sctps_timosecret);
1684 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1685 			SCTP_INP_WLOCK(inp);
1686 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1687 			inp->sctp_ep.last_secret_number =
1688 			    inp->sctp_ep.current_secret_number;
1689 			inp->sctp_ep.current_secret_number++;
1690 			if (inp->sctp_ep.current_secret_number >=
1691 			    SCTP_HOW_MANY_SECRETS) {
1692 				inp->sctp_ep.current_secret_number = 0;
1693 			}
1694 			secret = (int)inp->sctp_ep.current_secret_number;
1695 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1696 				inp->sctp_ep.secret_key[secret][i] =
1697 				    sctp_select_initial_TSN(&inp->sctp_ep);
1698 			}
1699 			SCTP_INP_WUNLOCK(inp);
1700 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1701 		}
1702 		did_output = 0;
1703 		break;
1704 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1705 		if ((stcb == NULL) || (inp == NULL)) {
1706 			break;
1707 		}
1708 		SCTP_STAT_INCR(sctps_timopathmtu);
1709 		sctp_pathmtu_timer(inp, stcb, net);
1710 		did_output = 0;
1711 		break;
1712 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1713 		if ((stcb == NULL) || (inp == NULL)) {
1714 			break;
1715 		}
1716 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1717 			/* no need to unlock on tcb its gone */
1718 			goto out_decr;
1719 		}
1720 		SCTP_STAT_INCR(sctps_timoshutdownack);
1721 		stcb->asoc.timoshutdownack++;
1722 #ifdef SCTP_AUDITING_ENABLED
1723 		sctp_auditing(4, inp, stcb, net);
1724 #endif
1725 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1726 		break;
1727 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1728 		if ((stcb == NULL) || (inp == NULL)) {
1729 			break;
1730 		}
1731 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1732 		sctp_abort_an_association(inp, stcb,
1733 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1734 		/* no need to unlock on tcb its gone */
1735 		goto out_decr;
1736 
1737 	case SCTP_TIMER_TYPE_STRRESET:
1738 		if ((stcb == NULL) || (inp == NULL)) {
1739 			break;
1740 		}
1741 		if (sctp_strreset_timer(inp, stcb, net)) {
1742 			/* no need to unlock on tcb its gone */
1743 			goto out_decr;
1744 		}
1745 		SCTP_STAT_INCR(sctps_timostrmrst);
1746 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1747 		break;
1748 	case SCTP_TIMER_TYPE_EARLYFR:
1749 		/* Need to do FR of things for net */
1750 		if ((stcb == NULL) || (inp == NULL)) {
1751 			break;
1752 		}
1753 		SCTP_STAT_INCR(sctps_timoearlyfr);
1754 		sctp_early_fr_timer(inp, stcb, net);
1755 		break;
1756 	case SCTP_TIMER_TYPE_ASCONF:
1757 		if ((stcb == NULL) || (inp == NULL)) {
1758 			break;
1759 		}
1760 		if (sctp_asconf_timer(inp, stcb, net)) {
1761 			/* no need to unlock on tcb its gone */
1762 			goto out_decr;
1763 		}
1764 		SCTP_STAT_INCR(sctps_timoasconf);
1765 #ifdef SCTP_AUDITING_ENABLED
1766 		sctp_auditing(4, inp, stcb, net);
1767 #endif
1768 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1769 		break;
1770 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1771 		if ((stcb == NULL) || (inp == NULL)) {
1772 			break;
1773 		}
1774 		if (sctp_delete_prim_timer(inp, stcb, net)) {
1775 			goto out_decr;
1776 		}
1777 		SCTP_STAT_INCR(sctps_timodelprim);
1778 		break;
1779 
1780 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1781 		if ((stcb == NULL) || (inp == NULL)) {
1782 			break;
1783 		}
1784 		SCTP_STAT_INCR(sctps_timoautoclose);
1785 		sctp_autoclose_timer(inp, stcb, net);
1786 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1787 		did_output = 0;
1788 		break;
1789 	case SCTP_TIMER_TYPE_ASOCKILL:
1790 		if ((stcb == NULL) || (inp == NULL)) {
1791 			break;
1792 		}
1793 		SCTP_STAT_INCR(sctps_timoassockill);
1794 		/* Can we free it yet? */
1795 		SCTP_INP_DECR_REF(inp);
1796 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1797 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1798 		so = SCTP_INP_SO(inp);
1799 		atomic_add_int(&stcb->asoc.refcnt, 1);
1800 		SCTP_TCB_UNLOCK(stcb);
1801 		SCTP_SOCKET_LOCK(so, 1);
1802 		SCTP_TCB_LOCK(stcb);
1803 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1804 #endif
1805 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1806 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1807 		SCTP_SOCKET_UNLOCK(so, 1);
1808 #endif
1809 		/*
1810 		 * free asoc, always unlocks (or destroy's) so prevent
1811 		 * duplicate unlock or unlock of a free mtx :-0
1812 		 */
1813 		stcb = NULL;
1814 		goto out_no_decr;
1815 	case SCTP_TIMER_TYPE_INPKILL:
1816 		SCTP_STAT_INCR(sctps_timoinpkill);
1817 		if (inp == NULL) {
1818 			break;
1819 		}
1820 		/*
1821 		 * special case, take away our increment since WE are the
1822 		 * killer
1823 		 */
1824 		SCTP_INP_DECR_REF(inp);
1825 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1826 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1827 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1828 		goto out_no_decr;
1829 	default:
1830 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1831 		    tmr->type);
1832 		break;
1833 	};
1834 #ifdef SCTP_AUDITING_ENABLED
1835 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1836 	if (inp)
1837 		sctp_auditing(5, inp, stcb, net);
1838 #endif
1839 	if ((did_output) && stcb) {
1840 		/*
1841 		 * Now we need to clean up the control chunk chain if an
1842 		 * ECNE is on it. It must be marked as UNSENT again so next
1843 		 * call will continue to send it until such time that we get
1844 		 * a CWR, to remove it. It is, however, less likely that we
1845 		 * will find a ecn echo on the chain though.
1846 		 */
1847 		sctp_fix_ecn_echo(&stcb->asoc);
1848 	}
1849 get_out:
1850 	if (stcb) {
1851 		SCTP_TCB_UNLOCK(stcb);
1852 	}
1853 out_decr:
1854 	if (inp) {
1855 		SCTP_INP_DECR_REF(inp);
1856 	}
1857 out_no_decr:
1858 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1859 	    tmr->type);
1860 	if (inp) {
1861 	}
1862 }
1863 
1864 void
1865 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1866     struct sctp_nets *net)
1867 {
1868 	int to_ticks;
1869 	struct sctp_timer *tmr;
1870 
1871 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1872 		return;
1873 
1874 	to_ticks = 0;
1875 
1876 	tmr = NULL;
1877 	if (stcb) {
1878 		SCTP_TCB_LOCK_ASSERT(stcb);
1879 	}
1880 	switch (t_type) {
1881 	case SCTP_TIMER_TYPE_ZERO_COPY:
1882 		tmr = &inp->sctp_ep.zero_copy_timer;
1883 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1884 		break;
1885 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1886 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1887 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1888 		break;
1889 	case SCTP_TIMER_TYPE_ADDR_WQ:
1890 		/* Only 1 tick away :-) */
1891 		tmr = &sctppcbinfo.addr_wq_timer;
1892 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1893 		break;
1894 	case SCTP_TIMER_TYPE_ITERATOR:
1895 		{
1896 			struct sctp_iterator *it;
1897 
1898 			it = (struct sctp_iterator *)inp;
1899 			tmr = &it->tmr;
1900 			to_ticks = SCTP_ITERATOR_TICKS;
1901 		}
1902 		break;
1903 	case SCTP_TIMER_TYPE_SEND:
1904 		/* Here we use the RTO timer */
1905 		{
1906 			int rto_val;
1907 
1908 			if ((stcb == NULL) || (net == NULL)) {
1909 				return;
1910 			}
1911 			tmr = &net->rxt_timer;
1912 			if (net->RTO == 0) {
1913 				rto_val = stcb->asoc.initial_rto;
1914 			} else {
1915 				rto_val = net->RTO;
1916 			}
1917 			to_ticks = MSEC_TO_TICKS(rto_val);
1918 		}
1919 		break;
1920 	case SCTP_TIMER_TYPE_INIT:
1921 		/*
1922 		 * Here we use the INIT timer default usually about 1
1923 		 * minute.
1924 		 */
1925 		if ((stcb == NULL) || (net == NULL)) {
1926 			return;
1927 		}
1928 		tmr = &net->rxt_timer;
1929 		if (net->RTO == 0) {
1930 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1931 		} else {
1932 			to_ticks = MSEC_TO_TICKS(net->RTO);
1933 		}
1934 		break;
1935 	case SCTP_TIMER_TYPE_RECV:
1936 		/*
1937 		 * Here we use the Delayed-Ack timer value from the inp
1938 		 * ususually about 200ms.
1939 		 */
1940 		if (stcb == NULL) {
1941 			return;
1942 		}
1943 		tmr = &stcb->asoc.dack_timer;
1944 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1945 		break;
1946 	case SCTP_TIMER_TYPE_SHUTDOWN:
1947 		/* Here we use the RTO of the destination. */
1948 		if ((stcb == NULL) || (net == NULL)) {
1949 			return;
1950 		}
1951 		if (net->RTO == 0) {
1952 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1953 		} else {
1954 			to_ticks = MSEC_TO_TICKS(net->RTO);
1955 		}
1956 		tmr = &net->rxt_timer;
1957 		break;
1958 	case SCTP_TIMER_TYPE_HEARTBEAT:
1959 		/*
1960 		 * the net is used here so that we can add in the RTO. Even
1961 		 * though we use a different timer. We also add the HB timer
1962 		 * PLUS a random jitter.
1963 		 */
1964 		if ((inp == NULL) || (stcb == NULL)) {
1965 			return;
1966 		} else {
1967 			uint32_t rndval;
1968 			uint8_t this_random;
1969 			int cnt_of_unconf = 0;
1970 			struct sctp_nets *lnet;
1971 
1972 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1973 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1974 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1975 					cnt_of_unconf++;
1976 				}
1977 			}
1978 			if (cnt_of_unconf) {
1979 				net = lnet = NULL;
1980 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
1981 			}
1982 			if (stcb->asoc.hb_random_idx > 3) {
1983 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1984 				memcpy(stcb->asoc.hb_random_values, &rndval,
1985 				    sizeof(stcb->asoc.hb_random_values));
1986 				stcb->asoc.hb_random_idx = 0;
1987 			}
1988 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1989 			stcb->asoc.hb_random_idx++;
1990 			stcb->asoc.hb_ect_randombit = 0;
1991 			/*
1992 			 * this_random will be 0 - 256 ms RTO is in ms.
1993 			 */
1994 			if ((stcb->asoc.hb_is_disabled) &&
1995 			    (cnt_of_unconf == 0)) {
1996 				return;
1997 			}
1998 			if (net) {
1999 				int delay;
2000 
2001 				delay = stcb->asoc.heart_beat_delay;
2002 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2003 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2004 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2005 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2006 						delay = 0;
2007 					}
2008 				}
2009 				if (net->RTO == 0) {
2010 					/* Never been checked */
2011 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2012 				} else {
2013 					/* set rto_val to the ms */
2014 					to_ticks = delay + net->RTO + this_random;
2015 				}
2016 			} else {
2017 				if (cnt_of_unconf) {
2018 					to_ticks = this_random + stcb->asoc.initial_rto;
2019 				} else {
2020 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2021 				}
2022 			}
2023 			/*
2024 			 * Now we must convert the to_ticks that are now in
2025 			 * ms to ticks.
2026 			 */
2027 			to_ticks = MSEC_TO_TICKS(to_ticks);
2028 			tmr = &stcb->asoc.hb_timer;
2029 		}
2030 		break;
2031 	case SCTP_TIMER_TYPE_COOKIE:
2032 		/*
2033 		 * Here we can use the RTO timer from the network since one
2034 		 * RTT was compelete. If a retran happened then we will be
2035 		 * using the RTO initial value.
2036 		 */
2037 		if ((stcb == NULL) || (net == NULL)) {
2038 			return;
2039 		}
2040 		if (net->RTO == 0) {
2041 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2042 		} else {
2043 			to_ticks = MSEC_TO_TICKS(net->RTO);
2044 		}
2045 		tmr = &net->rxt_timer;
2046 		break;
2047 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2048 		/*
2049 		 * nothing needed but the endpoint here ususually about 60
2050 		 * minutes.
2051 		 */
2052 		if (inp == NULL) {
2053 			return;
2054 		}
2055 		tmr = &inp->sctp_ep.signature_change;
2056 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2057 		break;
2058 	case SCTP_TIMER_TYPE_ASOCKILL:
2059 		if (stcb == NULL) {
2060 			return;
2061 		}
2062 		tmr = &stcb->asoc.strreset_timer;
2063 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2064 		break;
2065 	case SCTP_TIMER_TYPE_INPKILL:
2066 		/*
2067 		 * The inp is setup to die. We re-use the signature_chage
2068 		 * timer since that has stopped and we are in the GONE
2069 		 * state.
2070 		 */
2071 		if (inp == NULL) {
2072 			return;
2073 		}
2074 		tmr = &inp->sctp_ep.signature_change;
2075 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2076 		break;
2077 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2078 		/*
2079 		 * Here we use the value found in the EP for PMTU ususually
2080 		 * about 10 minutes.
2081 		 */
2082 		if ((stcb == NULL) || (inp == NULL)) {
2083 			return;
2084 		}
2085 		if (net == NULL) {
2086 			return;
2087 		}
2088 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2089 		tmr = &net->pmtu_timer;
2090 		break;
2091 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2092 		/* Here we use the RTO of the destination */
2093 		if ((stcb == NULL) || (net == NULL)) {
2094 			return;
2095 		}
2096 		if (net->RTO == 0) {
2097 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2098 		} else {
2099 			to_ticks = MSEC_TO_TICKS(net->RTO);
2100 		}
2101 		tmr = &net->rxt_timer;
2102 		break;
2103 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2104 		/*
2105 		 * Here we use the endpoints shutdown guard timer usually
2106 		 * about 3 minutes.
2107 		 */
2108 		if ((inp == NULL) || (stcb == NULL)) {
2109 			return;
2110 		}
2111 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2112 		tmr = &stcb->asoc.shut_guard_timer;
2113 		break;
2114 	case SCTP_TIMER_TYPE_STRRESET:
2115 		/*
2116 		 * Here the timer comes from the stcb but its value is from
2117 		 * the net's RTO.
2118 		 */
2119 		if ((stcb == NULL) || (net == NULL)) {
2120 			return;
2121 		}
2122 		if (net->RTO == 0) {
2123 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2124 		} else {
2125 			to_ticks = MSEC_TO_TICKS(net->RTO);
2126 		}
2127 		tmr = &stcb->asoc.strreset_timer;
2128 		break;
2129 
2130 	case SCTP_TIMER_TYPE_EARLYFR:
2131 		{
2132 			unsigned int msec;
2133 
2134 			if ((stcb == NULL) || (net == NULL)) {
2135 				return;
2136 			}
2137 			if (net->flight_size > net->cwnd) {
2138 				/* no need to start */
2139 				return;
2140 			}
2141 			SCTP_STAT_INCR(sctps_earlyfrstart);
2142 			if (net->lastsa == 0) {
2143 				/* Hmm no rtt estimate yet? */
2144 				msec = stcb->asoc.initial_rto >> 2;
2145 			} else {
2146 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2147 			}
2148 			if (msec < sctp_early_fr_msec) {
2149 				msec = sctp_early_fr_msec;
2150 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2151 					msec = SCTP_MINFR_MSEC_FLOOR;
2152 				}
2153 			}
2154 			to_ticks = MSEC_TO_TICKS(msec);
2155 			tmr = &net->fr_timer;
2156 		}
2157 		break;
2158 	case SCTP_TIMER_TYPE_ASCONF:
2159 		/*
2160 		 * Here the timer comes from the stcb but its value is from
2161 		 * the net's RTO.
2162 		 */
2163 		if ((stcb == NULL) || (net == NULL)) {
2164 			return;
2165 		}
2166 		if (net->RTO == 0) {
2167 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2168 		} else {
2169 			to_ticks = MSEC_TO_TICKS(net->RTO);
2170 		}
2171 		tmr = &stcb->asoc.asconf_timer;
2172 		break;
2173 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2174 		if ((stcb == NULL) || (net != NULL)) {
2175 			return;
2176 		}
2177 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2178 		tmr = &stcb->asoc.delete_prim_timer;
2179 		break;
2180 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2181 		if (stcb == NULL) {
2182 			return;
2183 		}
2184 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2185 			/*
2186 			 * Really an error since stcb is NOT set to
2187 			 * autoclose
2188 			 */
2189 			return;
2190 		}
2191 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2192 		tmr = &stcb->asoc.autoclose_timer;
2193 		break;
2194 	default:
2195 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2196 		    __FUNCTION__, t_type);
2197 		return;
2198 		break;
2199 	};
2200 	if ((to_ticks <= 0) || (tmr == NULL)) {
2201 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2202 		    __FUNCTION__, t_type, to_ticks, tmr);
2203 		return;
2204 	}
2205 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2206 		/*
2207 		 * we do NOT allow you to have it already running. if it is
2208 		 * we leave the current one up unchanged
2209 		 */
2210 		return;
2211 	}
2212 	/* At this point we can proceed */
2213 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2214 		stcb->asoc.num_send_timers_up++;
2215 	}
2216 	tmr->stopped_from = 0;
2217 	tmr->type = t_type;
2218 	tmr->ep = (void *)inp;
2219 	tmr->tcb = (void *)stcb;
2220 	tmr->net = (void *)net;
2221 	tmr->self = (void *)tmr;
2222 	tmr->ticks = sctp_get_tick_count();
2223 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2224 	return;
2225 }
2226 
2227 void
2228 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2229     struct sctp_nets *net, uint32_t from)
2230 {
2231 	struct sctp_timer *tmr;
2232 
2233 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2234 	    (inp == NULL))
2235 		return;
2236 
2237 	tmr = NULL;
2238 	if (stcb) {
2239 		SCTP_TCB_LOCK_ASSERT(stcb);
2240 	}
2241 	switch (t_type) {
2242 	case SCTP_TIMER_TYPE_ZERO_COPY:
2243 		tmr = &inp->sctp_ep.zero_copy_timer;
2244 		break;
2245 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2246 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2247 		break;
2248 	case SCTP_TIMER_TYPE_ADDR_WQ:
2249 		tmr = &sctppcbinfo.addr_wq_timer;
2250 		break;
2251 	case SCTP_TIMER_TYPE_EARLYFR:
2252 		if ((stcb == NULL) || (net == NULL)) {
2253 			return;
2254 		}
2255 		tmr = &net->fr_timer;
2256 		SCTP_STAT_INCR(sctps_earlyfrstop);
2257 		break;
2258 	case SCTP_TIMER_TYPE_ITERATOR:
2259 		{
2260 			struct sctp_iterator *it;
2261 
2262 			it = (struct sctp_iterator *)inp;
2263 			tmr = &it->tmr;
2264 		}
2265 		break;
2266 	case SCTP_TIMER_TYPE_SEND:
2267 		if ((stcb == NULL) || (net == NULL)) {
2268 			return;
2269 		}
2270 		tmr = &net->rxt_timer;
2271 		break;
2272 	case SCTP_TIMER_TYPE_INIT:
2273 		if ((stcb == NULL) || (net == NULL)) {
2274 			return;
2275 		}
2276 		tmr = &net->rxt_timer;
2277 		break;
2278 	case SCTP_TIMER_TYPE_RECV:
2279 		if (stcb == NULL) {
2280 			return;
2281 		}
2282 		tmr = &stcb->asoc.dack_timer;
2283 		break;
2284 	case SCTP_TIMER_TYPE_SHUTDOWN:
2285 		if ((stcb == NULL) || (net == NULL)) {
2286 			return;
2287 		}
2288 		tmr = &net->rxt_timer;
2289 		break;
2290 	case SCTP_TIMER_TYPE_HEARTBEAT:
2291 		if (stcb == NULL) {
2292 			return;
2293 		}
2294 		tmr = &stcb->asoc.hb_timer;
2295 		break;
2296 	case SCTP_TIMER_TYPE_COOKIE:
2297 		if ((stcb == NULL) || (net == NULL)) {
2298 			return;
2299 		}
2300 		tmr = &net->rxt_timer;
2301 		break;
2302 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2303 		/* nothing needed but the endpoint here */
2304 		tmr = &inp->sctp_ep.signature_change;
2305 		/*
2306 		 * We re-use the newcookie timer for the INP kill timer. We
2307 		 * must assure that we do not kill it by accident.
2308 		 */
2309 		break;
2310 	case SCTP_TIMER_TYPE_ASOCKILL:
2311 		/*
2312 		 * Stop the asoc kill timer.
2313 		 */
2314 		if (stcb == NULL) {
2315 			return;
2316 		}
2317 		tmr = &stcb->asoc.strreset_timer;
2318 		break;
2319 
2320 	case SCTP_TIMER_TYPE_INPKILL:
2321 		/*
2322 		 * The inp is setup to die. We re-use the signature_chage
2323 		 * timer since that has stopped and we are in the GONE
2324 		 * state.
2325 		 */
2326 		tmr = &inp->sctp_ep.signature_change;
2327 		break;
2328 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2329 		if ((stcb == NULL) || (net == NULL)) {
2330 			return;
2331 		}
2332 		tmr = &net->pmtu_timer;
2333 		break;
2334 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2335 		if ((stcb == NULL) || (net == NULL)) {
2336 			return;
2337 		}
2338 		tmr = &net->rxt_timer;
2339 		break;
2340 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2341 		if (stcb == NULL) {
2342 			return;
2343 		}
2344 		tmr = &stcb->asoc.shut_guard_timer;
2345 		break;
2346 	case SCTP_TIMER_TYPE_STRRESET:
2347 		if (stcb == NULL) {
2348 			return;
2349 		}
2350 		tmr = &stcb->asoc.strreset_timer;
2351 		break;
2352 	case SCTP_TIMER_TYPE_ASCONF:
2353 		if (stcb == NULL) {
2354 			return;
2355 		}
2356 		tmr = &stcb->asoc.asconf_timer;
2357 		break;
2358 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2359 		if (stcb == NULL) {
2360 			return;
2361 		}
2362 		tmr = &stcb->asoc.delete_prim_timer;
2363 		break;
2364 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2365 		if (stcb == NULL) {
2366 			return;
2367 		}
2368 		tmr = &stcb->asoc.autoclose_timer;
2369 		break;
2370 	default:
2371 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2372 		    __FUNCTION__, t_type);
2373 		break;
2374 	};
2375 	if (tmr == NULL) {
2376 		return;
2377 	}
2378 	if ((tmr->type != t_type) && tmr->type) {
2379 		/*
2380 		 * Ok we have a timer that is under joint use. Cookie timer
2381 		 * per chance with the SEND timer. We therefore are NOT
2382 		 * running the timer that the caller wants stopped.  So just
2383 		 * return.
2384 		 */
2385 		return;
2386 	}
2387 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2388 		stcb->asoc.num_send_timers_up--;
2389 		if (stcb->asoc.num_send_timers_up < 0) {
2390 			stcb->asoc.num_send_timers_up = 0;
2391 		}
2392 	}
2393 	tmr->self = NULL;
2394 	tmr->stopped_from = from;
2395 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2396 	return;
2397 }
2398 
2399 #ifdef SCTP_USE_ADLER32
2400 static uint32_t
2401 update_adler32(uint32_t adler, uint8_t * buf, int32_t len)
2402 {
2403 	uint32_t s1 = adler & 0xffff;
2404 	uint32_t s2 = (adler >> 16) & 0xffff;
2405 	int n;
2406 
2407 	for (n = 0; n < len; n++, buf++) {
2408 		/* s1 = (s1 + buf[n]) % BASE */
2409 		/* first we add */
2410 		s1 = (s1 + *buf);
2411 		/*
2412 		 * now if we need to, we do a mod by subtracting. It seems a
2413 		 * bit faster since I really will only ever do one subtract
2414 		 * at the MOST, since buf[n] is a max of 255.
2415 		 */
2416 		if (s1 >= SCTP_ADLER32_BASE) {
2417 			s1 -= SCTP_ADLER32_BASE;
2418 		}
2419 		/* s2 = (s2 + s1) % BASE */
2420 		/* first we add */
2421 		s2 = (s2 + s1);
2422 		/*
2423 		 * again, it is more efficent (it seems) to subtract since
2424 		 * the most s2 will ever be is (BASE-1 + BASE-1) in the
2425 		 * worse case. This would then be (2 * BASE) - 2, which will
2426 		 * still only do one subtract. On Intel this is much better
2427 		 * to do this way and avoid the divide. Have not -pg'd on
2428 		 * sparc.
2429 		 */
2430 		if (s2 >= SCTP_ADLER32_BASE) {
2431 			s2 -= SCTP_ADLER32_BASE;
2432 		}
2433 	}
2434 	/* Return the adler32 of the bytes buf[0..len-1] */
2435 	return ((s2 << 16) + s1);
2436 }
2437 
2438 #endif
2439 
2440 
2441 uint32_t
2442 sctp_calculate_len(struct mbuf *m)
2443 {
2444 	uint32_t tlen = 0;
2445 	struct mbuf *at;
2446 
2447 	at = m;
2448 	while (at) {
2449 		tlen += SCTP_BUF_LEN(at);
2450 		at = SCTP_BUF_NEXT(at);
2451 	}
2452 	return (tlen);
2453 }
2454 
2455 #if defined(SCTP_WITH_NO_CSUM)
2456 
2457 uint32_t
2458 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2459 {
2460 	/*
2461 	 * given a mbuf chain with a packetheader offset by 'offset'
2462 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2463 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2464 	 * has a side bonus as it will calculate the total length of the
2465 	 * mbuf chain. Note: if offset is greater than the total mbuf
2466 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2467 	 */
2468 	if (pktlen == NULL)
2469 		return (0);
2470 	*pktlen = sctp_calculate_len(m);
2471 	return (0);
2472 }
2473 
2474 #elif defined(SCTP_USE_INCHKSUM)
2475 
2476 #include <machine/in_cksum.h>
2477 
2478 uint32_t
2479 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2480 {
2481 	/*
2482 	 * given a mbuf chain with a packetheader offset by 'offset'
2483 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2484 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2485 	 * has a side bonus as it will calculate the total length of the
2486 	 * mbuf chain. Note: if offset is greater than the total mbuf
2487 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2488 	 */
2489 	int32_t tlen = 0;
2490 	struct mbuf *at;
2491 	uint32_t the_sum, retsum;
2492 
2493 	at = m;
2494 	while (at) {
2495 		tlen += SCTP_BUF_LEN(at);
2496 		at = SCTP_BUF_NEXT(at);
2497 	}
2498 	the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset));
2499 	if (pktlen != NULL)
2500 		*pktlen = (tlen - offset);
2501 	retsum = htons(the_sum);
2502 	return (the_sum);
2503 }
2504 
2505 #else
2506 
2507 uint32_t
2508 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2509 {
2510 	/*
2511 	 * given a mbuf chain with a packetheader offset by 'offset'
2512 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2513 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2514 	 * has a side bonus as it will calculate the total length of the
2515 	 * mbuf chain. Note: if offset is greater than the total mbuf
2516 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2517 	 */
2518 	int32_t tlen = 0;
2519 
2520 #ifdef SCTP_USE_ADLER32
2521 	uint32_t base = 1L;
2522 
2523 #else
2524 	uint32_t base = 0xffffffff;
2525 
2526 #endif
2527 	struct mbuf *at;
2528 
2529 	at = m;
2530 	/* find the correct mbuf and offset into mbuf */
2531 	while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
2532 		offset -= SCTP_BUF_LEN(at);	/* update remaining offset
2533 						 * left */
2534 		at = SCTP_BUF_NEXT(at);
2535 	}
2536 	while (at != NULL) {
2537 		if ((SCTP_BUF_LEN(at) - offset) > 0) {
2538 #ifdef SCTP_USE_ADLER32
2539 			base = update_adler32(base,
2540 			    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2541 			    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2542 #else
2543 			if ((SCTP_BUF_LEN(at) - offset) < 4) {
2544 				/* Use old method if less than 4 bytes */
2545 				base = old_update_crc32(base,
2546 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2547 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2548 			} else {
2549 				base = update_crc32(base,
2550 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2551 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2552 			}
2553 #endif
2554 			tlen += SCTP_BUF_LEN(at) - offset;
2555 			/* we only offset once into the first mbuf */
2556 		}
2557 		if (offset) {
2558 			if (offset < (uint32_t) SCTP_BUF_LEN(at))
2559 				offset = 0;
2560 			else
2561 				offset -= SCTP_BUF_LEN(at);
2562 		}
2563 		at = SCTP_BUF_NEXT(at);
2564 	}
2565 	if (pktlen != NULL) {
2566 		*pktlen = tlen;
2567 	}
2568 #ifdef SCTP_USE_ADLER32
2569 	/* Adler32 */
2570 	base = htonl(base);
2571 #else
2572 	/* CRC-32c */
2573 	base = sctp_csum_finalize(base);
2574 #endif
2575 	return (base);
2576 }
2577 
2578 
2579 #endif
2580 
2581 void
2582 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2583     struct sctp_association *asoc, uint32_t mtu)
2584 {
2585 	/*
2586 	 * Reset the P-MTU size on this association, this involves changing
2587 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2588 	 * allow the DF flag to be cleared.
2589 	 */
2590 	struct sctp_tmit_chunk *chk;
2591 	unsigned int eff_mtu, ovh;
2592 
2593 #ifdef SCTP_PRINT_FOR_B_AND_M
2594 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2595 	    inp, asoc, mtu);
2596 #endif
2597 	asoc->smallest_mtu = mtu;
2598 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2599 		ovh = SCTP_MIN_OVERHEAD;
2600 	} else {
2601 		ovh = SCTP_MIN_V4_OVERHEAD;
2602 	}
2603 	eff_mtu = mtu - ovh;
2604 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2605 
2606 		if (chk->send_size > eff_mtu) {
2607 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2608 		}
2609 	}
2610 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2611 		if (chk->send_size > eff_mtu) {
2612 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2613 		}
2614 	}
2615 }
2616 
2617 
2618 /*
2619  * given an association and starting time of the current RTT period return
2620  * RTO in number of msecs net should point to the current network
2621  */
2622 uint32_t
2623 sctp_calculate_rto(struct sctp_tcb *stcb,
2624     struct sctp_association *asoc,
2625     struct sctp_nets *net,
2626     struct timeval *told,
2627     int safe)
2628 {
2629 	/*-
2630 	 * given an association and the starting time of the current RTT
2631 	 * period (in value1/value2) return RTO in number of msecs.
2632 	 */
2633 	int calc_time = 0;
2634 	int o_calctime;
2635 	uint32_t new_rto = 0;
2636 	int first_measure = 0;
2637 	struct timeval now, then, *old;
2638 
2639 	/* Copy it out for sparc64 */
2640 	if (safe == sctp_align_unsafe_makecopy) {
2641 		old = &then;
2642 		memcpy(&then, told, sizeof(struct timeval));
2643 	} else if (safe == sctp_align_safe_nocopy) {
2644 		old = told;
2645 	} else {
2646 		/* error */
2647 		SCTP_PRINTF("Huh, bad rto calc call\n");
2648 		return (0);
2649 	}
2650 	/************************/
2651 	/* 1. calculate new RTT */
2652 	/************************/
2653 	/* get the current time */
2654 	(void)SCTP_GETTIME_TIMEVAL(&now);
2655 	/* compute the RTT value */
2656 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2657 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2658 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2659 			calc_time += (((u_long)now.tv_usec -
2660 			    (u_long)old->tv_usec) / 1000);
2661 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2662 			/* Borrow 1,000ms from current calculation */
2663 			calc_time -= 1000;
2664 			/* Add in the slop over */
2665 			calc_time += ((int)now.tv_usec / 1000);
2666 			/* Add in the pre-second ms's */
2667 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2668 		}
2669 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2670 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2671 			calc_time = ((u_long)now.tv_usec -
2672 			    (u_long)old->tv_usec) / 1000;
2673 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2674 			/* impossible .. garbage in nothing out */
2675 			goto calc_rto;
2676 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2677 			/*
2678 			 * We have to have 1 usec :-D this must be the
2679 			 * loopback.
2680 			 */
2681 			calc_time = 1;
2682 		} else {
2683 			/* impossible .. garbage in nothing out */
2684 			goto calc_rto;
2685 		}
2686 	} else {
2687 		/* Clock wrapped? */
2688 		goto calc_rto;
2689 	}
2690 	/***************************/
2691 	/* 2. update RTTVAR & SRTT */
2692 	/***************************/
2693 	o_calctime = calc_time;
2694 	/* this is Van Jacobson's integer version */
2695 	if (net->RTO_measured) {
2696 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2697 								 * shift=3 */
2698 		if (sctp_logging_level & SCTP_RTTVAR_LOGGING_ENABLE) {
2699 			rto_logging(net, SCTP_LOG_RTTVAR);
2700 		}
2701 		net->prev_rtt = o_calctime;
2702 		net->lastsa += calc_time;	/* add 7/8th into sa when
2703 						 * shift=3 */
2704 		if (calc_time < 0) {
2705 			calc_time = -calc_time;
2706 		}
2707 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2708 									 * VAR shift=2 */
2709 		net->lastsv += calc_time;
2710 		if (net->lastsv == 0) {
2711 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2712 		}
2713 	} else {
2714 		/* First RTO measurment */
2715 		net->RTO_measured = 1;
2716 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2717 								 * shift=3 */
2718 		net->lastsv = calc_time;
2719 		if (net->lastsv == 0) {
2720 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2721 		}
2722 		first_measure = 1;
2723 		net->prev_rtt = o_calctime;
2724 		if (sctp_logging_level & SCTP_RTTVAR_LOGGING_ENABLE) {
2725 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2726 		}
2727 	}
2728 calc_rto:
2729 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2730 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2731 	    (stcb->asoc.sat_network_lockout == 0)) {
2732 		stcb->asoc.sat_network = 1;
2733 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2734 		stcb->asoc.sat_network = 0;
2735 		stcb->asoc.sat_network_lockout = 1;
2736 	}
2737 	/* bound it, per C6/C7 in Section 5.3.1 */
2738 	if (new_rto < stcb->asoc.minrto) {
2739 		new_rto = stcb->asoc.minrto;
2740 	}
2741 	if (new_rto > stcb->asoc.maxrto) {
2742 		new_rto = stcb->asoc.maxrto;
2743 	}
2744 	/* we are now returning the RTO */
2745 	return (new_rto);
2746 }
2747 
2748 /*
2749  * return a pointer to a contiguous piece of data from the given mbuf chain
2750  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2751  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2752  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2753  */
2754 caddr_t
2755 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2756 {
2757 	uint32_t count;
2758 	uint8_t *ptr;
2759 
2760 	ptr = in_ptr;
2761 	if ((off < 0) || (len <= 0))
2762 		return (NULL);
2763 
2764 	/* find the desired start location */
2765 	while ((m != NULL) && (off > 0)) {
2766 		if (off < SCTP_BUF_LEN(m))
2767 			break;
2768 		off -= SCTP_BUF_LEN(m);
2769 		m = SCTP_BUF_NEXT(m);
2770 	}
2771 	if (m == NULL)
2772 		return (NULL);
2773 
2774 	/* is the current mbuf large enough (eg. contiguous)? */
2775 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2776 		return (mtod(m, caddr_t)+off);
2777 	} else {
2778 		/* else, it spans more than one mbuf, so save a temp copy... */
2779 		while ((m != NULL) && (len > 0)) {
2780 			count = min(SCTP_BUF_LEN(m) - off, len);
2781 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2782 			len -= count;
2783 			ptr += count;
2784 			off = 0;
2785 			m = SCTP_BUF_NEXT(m);
2786 		}
2787 		if ((m == NULL) && (len > 0))
2788 			return (NULL);
2789 		else
2790 			return ((caddr_t)in_ptr);
2791 	}
2792 }
2793 
2794 
2795 
2796 struct sctp_paramhdr *
2797 sctp_get_next_param(struct mbuf *m,
2798     int offset,
2799     struct sctp_paramhdr *pull,
2800     int pull_limit)
2801 {
2802 	/* This just provides a typed signature to Peter's Pull routine */
2803 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2804 	    (uint8_t *) pull));
2805 }
2806 
2807 
2808 int
2809 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2810 {
2811 	/*
2812 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2813 	 * padlen is > 3 this routine will fail.
2814 	 */
2815 	uint8_t *dp;
2816 	int i;
2817 
2818 	if (padlen > 3) {
2819 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2820 		return (ENOBUFS);
2821 	}
2822 	if (M_TRAILINGSPACE(m)) {
2823 		/*
2824 		 * The easy way. We hope the majority of the time we hit
2825 		 * here :)
2826 		 */
2827 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2828 		SCTP_BUF_LEN(m) += padlen;
2829 	} else {
2830 		/* Hard way we must grow the mbuf */
2831 		struct mbuf *tmp;
2832 
2833 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2834 		if (tmp == NULL) {
2835 			/* Out of space GAK! we are in big trouble. */
2836 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2837 			return (ENOSPC);
2838 		}
2839 		/* setup and insert in middle */
2840 		SCTP_BUF_NEXT(tmp) = SCTP_BUF_NEXT(m);
2841 		SCTP_BUF_LEN(tmp) = padlen;
2842 		SCTP_BUF_NEXT(m) = tmp;
2843 		dp = mtod(tmp, uint8_t *);
2844 	}
2845 	/* zero out the pad */
2846 	for (i = 0; i < padlen; i++) {
2847 		*dp = 0;
2848 		dp++;
2849 	}
2850 	return (0);
2851 }
2852 
2853 int
2854 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2855 {
2856 	/* find the last mbuf in chain and pad it */
2857 	struct mbuf *m_at;
2858 
2859 	m_at = m;
2860 	if (last_mbuf) {
2861 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2862 	} else {
2863 		while (m_at) {
2864 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2865 				return (sctp_add_pad_tombuf(m_at, padval));
2866 			}
2867 			m_at = SCTP_BUF_NEXT(m_at);
2868 		}
2869 	}
2870 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2871 	return (EFAULT);
2872 }
2873 
2874 int sctp_asoc_change_wake = 0;
2875 
2876 static void
2877 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2878     uint32_t error, void *data, int so_locked
2879 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2880     SCTP_UNUSED
2881 #endif
2882 )
2883 {
2884 	struct mbuf *m_notify;
2885 	struct sctp_assoc_change *sac;
2886 	struct sctp_queued_to_read *control;
2887 
2888 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2889 	struct socket *so;
2890 
2891 #endif
2892 
2893 	/*
2894 	 * First if we are are going down dump everything we can to the
2895 	 * socket rcv queue.
2896 	 */
2897 
2898 	if ((stcb == NULL) ||
2899 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2900 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2901 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
2902 	    ) {
2903 		/* If the socket is gone we are out of here */
2904 		return;
2905 	}
2906 	/*
2907 	 * For TCP model AND UDP connected sockets we will send an error up
2908 	 * when an ABORT comes in.
2909 	 */
2910 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2911 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2912 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2913 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2914 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2915 			stcb->sctp_socket->so_error = ECONNREFUSED;
2916 		} else {
2917 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2918 			stcb->sctp_socket->so_error = ECONNRESET;
2919 		}
2920 		/* Wake ANY sleepers */
2921 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2922 		so = SCTP_INP_SO(stcb->sctp_ep);
2923 		if (!so_locked) {
2924 			atomic_add_int(&stcb->asoc.refcnt, 1);
2925 			SCTP_TCB_UNLOCK(stcb);
2926 			SCTP_SOCKET_LOCK(so, 1);
2927 			SCTP_TCB_LOCK(stcb);
2928 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2929 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2930 				SCTP_SOCKET_UNLOCK(so, 1);
2931 				return;
2932 			}
2933 		}
2934 #endif
2935 		sorwakeup(stcb->sctp_socket);
2936 		sowwakeup(stcb->sctp_socket);
2937 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2938 		if (!so_locked) {
2939 			SCTP_SOCKET_UNLOCK(so, 1);
2940 		}
2941 #endif
2942 		sctp_asoc_change_wake++;
2943 	}
2944 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2945 		/* event not enabled */
2946 		return;
2947 	}
2948 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2949 	if (m_notify == NULL)
2950 		/* no space left */
2951 		return;
2952 	SCTP_BUF_LEN(m_notify) = 0;
2953 
2954 	sac = mtod(m_notify, struct sctp_assoc_change *);
2955 	sac->sac_type = SCTP_ASSOC_CHANGE;
2956 	sac->sac_flags = 0;
2957 	sac->sac_length = sizeof(struct sctp_assoc_change);
2958 	sac->sac_state = event;
2959 	sac->sac_error = error;
2960 	/* XXX verify these stream counts */
2961 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2962 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2963 	sac->sac_assoc_id = sctp_get_associd(stcb);
2964 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2965 	SCTP_BUF_NEXT(m_notify) = NULL;
2966 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2967 	    0, 0, 0, 0, 0, 0,
2968 	    m_notify);
2969 	if (control == NULL) {
2970 		/* no memory */
2971 		sctp_m_freem(m_notify);
2972 		return;
2973 	}
2974 	control->length = SCTP_BUF_LEN(m_notify);
2975 	/* not that we need this */
2976 	control->tail_mbuf = m_notify;
2977 	control->spec_flags = M_NOTIFICATION;
2978 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2979 	    control,
2980 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
2981 	if (event == SCTP_COMM_LOST) {
2982 		/* Wake up any sleeper */
2983 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2984 		so = SCTP_INP_SO(stcb->sctp_ep);
2985 		if (!so_locked) {
2986 			atomic_add_int(&stcb->asoc.refcnt, 1);
2987 			SCTP_TCB_UNLOCK(stcb);
2988 			SCTP_SOCKET_LOCK(so, 1);
2989 			SCTP_TCB_LOCK(stcb);
2990 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2991 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2992 				SCTP_SOCKET_UNLOCK(so, 1);
2993 				return;
2994 			}
2995 		}
2996 #endif
2997 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2998 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2999 		if (!so_locked) {
3000 			SCTP_SOCKET_UNLOCK(so, 1);
3001 		}
3002 #endif
3003 	}
3004 }
3005 
3006 static void
3007 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3008     struct sockaddr *sa, uint32_t error)
3009 {
3010 	struct mbuf *m_notify;
3011 	struct sctp_paddr_change *spc;
3012 	struct sctp_queued_to_read *control;
3013 
3014 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)))
3015 		/* event not enabled */
3016 		return;
3017 
3018 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
3019 	if (m_notify == NULL)
3020 		return;
3021 	SCTP_BUF_LEN(m_notify) = 0;
3022 	spc = mtod(m_notify, struct sctp_paddr_change *);
3023 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3024 	spc->spc_flags = 0;
3025 	spc->spc_length = sizeof(struct sctp_paddr_change);
3026 	if (sa->sa_family == AF_INET) {
3027 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3028 	} else {
3029 		struct sockaddr_in6 *sin6;
3030 
3031 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3032 
3033 		sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3034 		if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3035 			if (sin6->sin6_scope_id == 0) {
3036 				/* recover scope_id for user */
3037 				(void)sa6_recoverscope(sin6);
3038 			} else {
3039 				/* clear embedded scope_id for user */
3040 				in6_clearscope(&sin6->sin6_addr);
3041 			}
3042 		}
3043 	}
3044 	spc->spc_state = state;
3045 	spc->spc_error = error;
3046 	spc->spc_assoc_id = sctp_get_associd(stcb);
3047 
3048 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3049 	SCTP_BUF_NEXT(m_notify) = NULL;
3050 
3051 	/* append to socket */
3052 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3053 	    0, 0, 0, 0, 0, 0,
3054 	    m_notify);
3055 	if (control == NULL) {
3056 		/* no memory */
3057 		sctp_m_freem(m_notify);
3058 		return;
3059 	}
3060 	control->length = SCTP_BUF_LEN(m_notify);
3061 	control->spec_flags = M_NOTIFICATION;
3062 	/* not that we need this */
3063 	control->tail_mbuf = m_notify;
3064 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3065 	    control,
3066 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3067 }
3068 
3069 
3070 static void
3071 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
3072     struct sctp_tmit_chunk *chk, int so_locked
3073 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3074     SCTP_UNUSED
3075 #endif
3076 )
3077 {
3078 	struct mbuf *m_notify;
3079 	struct sctp_send_failed *ssf;
3080 	struct sctp_queued_to_read *control;
3081 	int length;
3082 
3083 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3084 		/* event not enabled */
3085 		return;
3086 
3087 	length = sizeof(struct sctp_send_failed) + chk->send_size;
3088 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3089 	if (m_notify == NULL)
3090 		/* no space left */
3091 		return;
3092 	SCTP_BUF_LEN(m_notify) = 0;
3093 	ssf = mtod(m_notify, struct sctp_send_failed *);
3094 	ssf->ssf_type = SCTP_SEND_FAILED;
3095 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3096 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3097 	else
3098 		ssf->ssf_flags = SCTP_DATA_SENT;
3099 	ssf->ssf_length = length;
3100 	ssf->ssf_error = error;
3101 	/* not exactly what the user sent in, but should be close :) */
3102 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3103 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3104 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3105 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3106 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3107 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3108 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3109 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3110 	SCTP_BUF_NEXT(m_notify) = chk->data;
3111 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3112 
3113 	/* Steal off the mbuf */
3114 	chk->data = NULL;
3115 	/*
3116 	 * For this case, we check the actual socket buffer, since the assoc
3117 	 * is going away we don't want to overfill the socket buffer for a
3118 	 * non-reader
3119 	 */
3120 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3121 		sctp_m_freem(m_notify);
3122 		return;
3123 	}
3124 	/* append to socket */
3125 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3126 	    0, 0, 0, 0, 0, 0,
3127 	    m_notify);
3128 	if (control == NULL) {
3129 		/* no memory */
3130 		sctp_m_freem(m_notify);
3131 		return;
3132 	}
3133 	control->spec_flags = M_NOTIFICATION;
3134 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3135 	    control,
3136 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3137 }
3138 
3139 
3140 static void
3141 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3142     struct sctp_stream_queue_pending *sp, int so_locked
3143 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3144     SCTP_UNUSED
3145 #endif
3146 )
3147 {
3148 	struct mbuf *m_notify;
3149 	struct sctp_send_failed *ssf;
3150 	struct sctp_queued_to_read *control;
3151 	int length;
3152 
3153 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3154 		/* event not enabled */
3155 		return;
3156 
3157 	length = sizeof(struct sctp_send_failed) + sp->length;
3158 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3159 	if (m_notify == NULL)
3160 		/* no space left */
3161 		return;
3162 	SCTP_BUF_LEN(m_notify) = 0;
3163 	ssf = mtod(m_notify, struct sctp_send_failed *);
3164 	ssf->ssf_type = SCTP_SEND_FAILED;
3165 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3166 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3167 	else
3168 		ssf->ssf_flags = SCTP_DATA_SENT;
3169 	ssf->ssf_length = length;
3170 	ssf->ssf_error = error;
3171 	/* not exactly what the user sent in, but should be close :) */
3172 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3173 	ssf->ssf_info.sinfo_stream = sp->stream;
3174 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3175 	ssf->ssf_info.sinfo_flags = sp->sinfo_flags;
3176 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3177 	ssf->ssf_info.sinfo_context = sp->context;
3178 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3179 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3180 	SCTP_BUF_NEXT(m_notify) = sp->data;
3181 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3182 
3183 	/* Steal off the mbuf */
3184 	sp->data = NULL;
3185 	/*
3186 	 * For this case, we check the actual socket buffer, since the assoc
3187 	 * is going away we don't want to overfill the socket buffer for a
3188 	 * non-reader
3189 	 */
3190 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3191 		sctp_m_freem(m_notify);
3192 		return;
3193 	}
3194 	/* append to socket */
3195 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3196 	    0, 0, 0, 0, 0, 0,
3197 	    m_notify);
3198 	if (control == NULL) {
3199 		/* no memory */
3200 		sctp_m_freem(m_notify);
3201 		return;
3202 	}
3203 	control->spec_flags = M_NOTIFICATION;
3204 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3205 	    control,
3206 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3207 }
3208 
3209 
3210 
3211 static void
3212 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3213     uint32_t error)
3214 {
3215 	struct mbuf *m_notify;
3216 	struct sctp_adaptation_event *sai;
3217 	struct sctp_queued_to_read *control;
3218 
3219 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)))
3220 		/* event not enabled */
3221 		return;
3222 
3223 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3224 	if (m_notify == NULL)
3225 		/* no space left */
3226 		return;
3227 	SCTP_BUF_LEN(m_notify) = 0;
3228 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3229 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3230 	sai->sai_flags = 0;
3231 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3232 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3233 	sai->sai_assoc_id = sctp_get_associd(stcb);
3234 
3235 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3236 	SCTP_BUF_NEXT(m_notify) = NULL;
3237 
3238 	/* append to socket */
3239 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3240 	    0, 0, 0, 0, 0, 0,
3241 	    m_notify);
3242 	if (control == NULL) {
3243 		/* no memory */
3244 		sctp_m_freem(m_notify);
3245 		return;
3246 	}
3247 	control->length = SCTP_BUF_LEN(m_notify);
3248 	control->spec_flags = M_NOTIFICATION;
3249 	/* not that we need this */
3250 	control->tail_mbuf = m_notify;
3251 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3252 	    control,
3253 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3254 }
3255 
3256 /* This always must be called with the read-queue LOCKED in the INP */
3257 void
3258 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3259     int nolock, uint32_t val)
3260 {
3261 	struct mbuf *m_notify;
3262 	struct sctp_pdapi_event *pdapi;
3263 	struct sctp_queued_to_read *control;
3264 	struct sockbuf *sb;
3265 
3266 	if ((stcb == NULL) || (stcb->sctp_socket == NULL) ||
3267 	    sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
3268 		/* event not enabled */
3269 		return;
3270 
3271 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3272 	if (m_notify == NULL)
3273 		/* no space left */
3274 		return;
3275 	SCTP_BUF_LEN(m_notify) = 0;
3276 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3277 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3278 	pdapi->pdapi_flags = 0;
3279 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3280 	pdapi->pdapi_indication = error;
3281 	pdapi->pdapi_stream = (val >> 16);
3282 	pdapi->pdapi_seq = (val & 0x0000ffff);
3283 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3284 
3285 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3286 	SCTP_BUF_NEXT(m_notify) = NULL;
3287 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3288 	    0, 0, 0, 0, 0, 0,
3289 	    m_notify);
3290 	if (control == NULL) {
3291 		/* no memory */
3292 		sctp_m_freem(m_notify);
3293 		return;
3294 	}
3295 	control->spec_flags = M_NOTIFICATION;
3296 	control->length = SCTP_BUF_LEN(m_notify);
3297 	/* not that we need this */
3298 	control->tail_mbuf = m_notify;
3299 	control->held_length = 0;
3300 	control->length = 0;
3301 	if (nolock == 0) {
3302 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
3303 	}
3304 	sb = &stcb->sctp_socket->so_rcv;
3305 	if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
3306 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3307 	}
3308 	sctp_sballoc(stcb, sb, m_notify);
3309 	if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
3310 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3311 	}
3312 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3313 	control->end_added = 1;
3314 	if (stcb->asoc.control_pdapi)
3315 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3316 	else {
3317 		/* we really should not see this case */
3318 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3319 	}
3320 	if (nolock == 0) {
3321 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3322 	}
3323 	if (stcb->sctp_ep && stcb->sctp_socket) {
3324 		/* This should always be the case */
3325 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3326 	}
3327 }
3328 
3329 static void
3330 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3331 {
3332 	struct mbuf *m_notify;
3333 	struct sctp_shutdown_event *sse;
3334 	struct sctp_queued_to_read *control;
3335 
3336 	/*
3337 	 * For TCP model AND UDP connected sockets we will send an error up
3338 	 * when an SHUTDOWN completes
3339 	 */
3340 	if (stcb == NULL) {
3341 		return;
3342 	}
3343 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3344 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3345 		/* mark socket closed for read/write and wakeup! */
3346 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3347 		struct socket *so;
3348 
3349 		so = SCTP_INP_SO(stcb->sctp_ep);
3350 		atomic_add_int(&stcb->asoc.refcnt, 1);
3351 		SCTP_TCB_UNLOCK(stcb);
3352 		SCTP_SOCKET_LOCK(so, 1);
3353 		SCTP_TCB_LOCK(stcb);
3354 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3355 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3356 			SCTP_SOCKET_UNLOCK(so, 1);
3357 			return;
3358 		}
3359 #endif
3360 		socantsendmore(stcb->sctp_socket);
3361 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3362 		SCTP_SOCKET_UNLOCK(so, 1);
3363 #endif
3364 	}
3365 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
3366 		/* event not enabled */
3367 		return;
3368 
3369 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3370 	if (m_notify == NULL)
3371 		/* no space left */
3372 		return;
3373 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3374 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3375 	sse->sse_flags = 0;
3376 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3377 	sse->sse_assoc_id = sctp_get_associd(stcb);
3378 
3379 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3380 	SCTP_BUF_NEXT(m_notify) = NULL;
3381 
3382 	/* append to socket */
3383 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3384 	    0, 0, 0, 0, 0, 0,
3385 	    m_notify);
3386 	if (control == NULL) {
3387 		/* no memory */
3388 		sctp_m_freem(m_notify);
3389 		return;
3390 	}
3391 	control->spec_flags = M_NOTIFICATION;
3392 	control->length = SCTP_BUF_LEN(m_notify);
3393 	/* not that we need this */
3394 	control->tail_mbuf = m_notify;
3395 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3396 	    control,
3397 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3398 }
3399 
3400 static void
3401 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3402     int number_entries, uint16_t * list, int flag)
3403 {
3404 	struct mbuf *m_notify;
3405 	struct sctp_queued_to_read *control;
3406 	struct sctp_stream_reset_event *strreset;
3407 	int len;
3408 
3409 	if (stcb == NULL) {
3410 		return;
3411 	}
3412 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
3413 		/* event not enabled */
3414 		return;
3415 
3416 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3417 	if (m_notify == NULL)
3418 		/* no space left */
3419 		return;
3420 	SCTP_BUF_LEN(m_notify) = 0;
3421 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3422 	if (len > M_TRAILINGSPACE(m_notify)) {
3423 		/* never enough room */
3424 		sctp_m_freem(m_notify);
3425 		return;
3426 	}
3427 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3428 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3429 	if (number_entries == 0) {
3430 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3431 	} else {
3432 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3433 	}
3434 	strreset->strreset_length = len;
3435 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3436 	if (number_entries) {
3437 		int i;
3438 
3439 		for (i = 0; i < number_entries; i++) {
3440 			strreset->strreset_list[i] = ntohs(list[i]);
3441 		}
3442 	}
3443 	SCTP_BUF_LEN(m_notify) = len;
3444 	SCTP_BUF_NEXT(m_notify) = NULL;
3445 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3446 		/* no space */
3447 		sctp_m_freem(m_notify);
3448 		return;
3449 	}
3450 	/* append to socket */
3451 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3452 	    0, 0, 0, 0, 0, 0,
3453 	    m_notify);
3454 	if (control == NULL) {
3455 		/* no memory */
3456 		sctp_m_freem(m_notify);
3457 		return;
3458 	}
3459 	control->spec_flags = M_NOTIFICATION;
3460 	control->length = SCTP_BUF_LEN(m_notify);
3461 	/* not that we need this */
3462 	control->tail_mbuf = m_notify;
3463 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3464 	    control,
3465 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3466 }
3467 
3468 
3469 void
3470 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3471     uint32_t error, void *data, int so_locked
3472 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3473     SCTP_UNUSED
3474 #endif
3475 )
3476 {
3477 	if (stcb == NULL) {
3478 		/* unlikely but */
3479 		return;
3480 	}
3481 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3482 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3483 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
3484 	    ) {
3485 		/* No notifications up when we are in a no socket state */
3486 		return;
3487 	}
3488 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3489 		/* Can't send up to a closed socket any notifications */
3490 		return;
3491 	}
3492 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3493 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3494 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3495 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3496 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3497 			/* Don't report these in front states */
3498 			return;
3499 		}
3500 	}
3501 	switch (notification) {
3502 	case SCTP_NOTIFY_ASSOC_UP:
3503 		if (stcb->asoc.assoc_up_sent == 0) {
3504 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3505 			stcb->asoc.assoc_up_sent = 1;
3506 		}
3507 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3508 			sctp_notify_adaptation_layer(stcb, error);
3509 		}
3510 		break;
3511 	case SCTP_NOTIFY_ASSOC_DOWN:
3512 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3513 		break;
3514 	case SCTP_NOTIFY_INTERFACE_DOWN:
3515 		{
3516 			struct sctp_nets *net;
3517 
3518 			net = (struct sctp_nets *)data;
3519 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3520 			    (struct sockaddr *)&net->ro._l_addr, error);
3521 			break;
3522 		}
3523 	case SCTP_NOTIFY_INTERFACE_UP:
3524 		{
3525 			struct sctp_nets *net;
3526 
3527 			net = (struct sctp_nets *)data;
3528 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3529 			    (struct sockaddr *)&net->ro._l_addr, error);
3530 			break;
3531 		}
3532 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3533 		{
3534 			struct sctp_nets *net;
3535 
3536 			net = (struct sctp_nets *)data;
3537 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3538 			    (struct sockaddr *)&net->ro._l_addr, error);
3539 			break;
3540 		}
3541 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3542 		sctp_notify_send_failed2(stcb, error,
3543 		    (struct sctp_stream_queue_pending *)data, so_locked);
3544 		break;
3545 	case SCTP_NOTIFY_DG_FAIL:
3546 		sctp_notify_send_failed(stcb, error,
3547 		    (struct sctp_tmit_chunk *)data, so_locked);
3548 		break;
3549 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3550 		{
3551 			uint32_t val;
3552 
3553 			val = *((uint32_t *) data);
3554 
3555 			sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3556 		}
3557 		break;
3558 	case SCTP_NOTIFY_STRDATA_ERR:
3559 		break;
3560 	case SCTP_NOTIFY_ASSOC_ABORTED:
3561 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3562 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3563 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3564 		} else {
3565 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3566 		}
3567 		break;
3568 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3569 		break;
3570 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3571 		break;
3572 	case SCTP_NOTIFY_ASSOC_RESTART:
3573 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3574 		break;
3575 	case SCTP_NOTIFY_HB_RESP:
3576 		break;
3577 	case SCTP_NOTIFY_STR_RESET_SEND:
3578 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3579 		break;
3580 	case SCTP_NOTIFY_STR_RESET_RECV:
3581 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3582 		break;
3583 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3584 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3585 		break;
3586 
3587 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3588 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3589 		break;
3590 
3591 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3592 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3593 		    error);
3594 		break;
3595 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3596 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3597 		    error);
3598 		break;
3599 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3600 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3601 		    error);
3602 		break;
3603 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3604 		break;
3605 	case SCTP_NOTIFY_ASCONF_FAILED:
3606 		break;
3607 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3608 		sctp_notify_shutdown_event(stcb);
3609 		break;
3610 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3611 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3612 		    (uint16_t) (uintptr_t) data);
3613 		break;
3614 #if 0
3615 	case SCTP_NOTIFY_AUTH_KEY_CONFLICT:
3616 		sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT,
3617 		    error, (uint16_t) (uintptr_t) data);
3618 		break;
3619 #endif				/* not yet? remove? */
3620 
3621 
3622 	default:
3623 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3624 		    __FUNCTION__, notification, notification);
3625 		break;
3626 	}			/* end switch */
3627 }
3628 
3629 void
3630 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3631 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3632     SCTP_UNUSED
3633 #endif
3634 )
3635 {
3636 	struct sctp_association *asoc;
3637 	struct sctp_stream_out *outs;
3638 	struct sctp_tmit_chunk *chk;
3639 	struct sctp_stream_queue_pending *sp;
3640 	int i;
3641 
3642 	asoc = &stcb->asoc;
3643 
3644 	if (stcb == NULL) {
3645 		return;
3646 	}
3647 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3648 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3649 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3650 		return;
3651 	}
3652 	/* now through all the gunk freeing chunks */
3653 	if (holds_lock == 0) {
3654 		SCTP_TCB_SEND_LOCK(stcb);
3655 	}
3656 	/* sent queue SHOULD be empty */
3657 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3658 		chk = TAILQ_FIRST(&asoc->sent_queue);
3659 		while (chk) {
3660 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3661 			asoc->sent_queue_cnt--;
3662 			if (chk->data) {
3663 				/*
3664 				 * trim off the sctp chunk header(it should
3665 				 * be there)
3666 				 */
3667 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3668 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3669 					sctp_mbuf_crush(chk->data);
3670 					chk->send_size -= sizeof(struct sctp_data_chunk);
3671 				}
3672 			}
3673 			sctp_free_bufspace(stcb, asoc, chk, 1);
3674 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3675 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3676 			if (chk->data) {
3677 				sctp_m_freem(chk->data);
3678 				chk->data = NULL;
3679 			}
3680 			sctp_free_a_chunk(stcb, chk);
3681 			/* sa_ignore FREED_MEMORY */
3682 			chk = TAILQ_FIRST(&asoc->sent_queue);
3683 		}
3684 	}
3685 	/* pending send queue SHOULD be empty */
3686 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3687 		chk = TAILQ_FIRST(&asoc->send_queue);
3688 		while (chk) {
3689 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3690 			asoc->send_queue_cnt--;
3691 			if (chk->data) {
3692 				/*
3693 				 * trim off the sctp chunk header(it should
3694 				 * be there)
3695 				 */
3696 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3697 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3698 					sctp_mbuf_crush(chk->data);
3699 					chk->send_size -= sizeof(struct sctp_data_chunk);
3700 				}
3701 			}
3702 			sctp_free_bufspace(stcb, asoc, chk, 1);
3703 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3704 			if (chk->data) {
3705 				sctp_m_freem(chk->data);
3706 				chk->data = NULL;
3707 			}
3708 			sctp_free_a_chunk(stcb, chk);
3709 			/* sa_ignore FREED_MEMORY */
3710 			chk = TAILQ_FIRST(&asoc->send_queue);
3711 		}
3712 	}
3713 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3714 		/* For each stream */
3715 		outs = &stcb->asoc.strmout[i];
3716 		/* clean up any sends there */
3717 		stcb->asoc.locked_on_sending = NULL;
3718 		sp = TAILQ_FIRST(&outs->outqueue);
3719 		while (sp) {
3720 			stcb->asoc.stream_queue_cnt--;
3721 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3722 			sctp_free_spbufspace(stcb, asoc, sp);
3723 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3724 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3725 			if (sp->data) {
3726 				sctp_m_freem(sp->data);
3727 				sp->data = NULL;
3728 			}
3729 			if (sp->net)
3730 				sctp_free_remote_addr(sp->net);
3731 			sp->net = NULL;
3732 			/* Free the chunk */
3733 			sctp_free_a_strmoq(stcb, sp);
3734 			/* sa_ignore FREED_MEMORY */
3735 			sp = TAILQ_FIRST(&outs->outqueue);
3736 		}
3737 	}
3738 
3739 	if (holds_lock == 0) {
3740 		SCTP_TCB_SEND_UNLOCK(stcb);
3741 	}
3742 }
3743 
3744 void
3745 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3746 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3747     SCTP_UNUSED
3748 #endif
3749 )
3750 {
3751 
3752 	if (stcb == NULL) {
3753 		return;
3754 	}
3755 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3756 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3757 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3758 		return;
3759 	}
3760 	/* Tell them we lost the asoc */
3761 	sctp_report_all_outbound(stcb, 1, so_locked);
3762 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3763 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3764 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3765 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3766 	}
3767 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3768 }
3769 
3770 void
3771 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3772     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3773     uint32_t vrf_id)
3774 {
3775 	uint32_t vtag;
3776 
3777 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3778 	struct socket *so;
3779 
3780 #endif
3781 
3782 	vtag = 0;
3783 	if (stcb != NULL) {
3784 		/* We have a TCB to abort, send notification too */
3785 		vtag = stcb->asoc.peer_vtag;
3786 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3787 		/* get the assoc vrf id and table id */
3788 		vrf_id = stcb->asoc.vrf_id;
3789 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3790 	}
3791 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id);
3792 	if (stcb != NULL) {
3793 		/* Ok, now lets free it */
3794 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3795 		so = SCTP_INP_SO(inp);
3796 		atomic_add_int(&stcb->asoc.refcnt, 1);
3797 		SCTP_TCB_UNLOCK(stcb);
3798 		SCTP_SOCKET_LOCK(so, 1);
3799 		SCTP_TCB_LOCK(stcb);
3800 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3801 #endif
3802 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3803 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3804 		SCTP_SOCKET_UNLOCK(so, 1);
3805 #endif
3806 	} else {
3807 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3808 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3809 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3810 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3811 			}
3812 		}
3813 	}
3814 }
3815 
3816 #ifdef SCTP_ASOCLOG_OF_TSNS
3817 void
3818 sctp_print_out_track_log(struct sctp_tcb *stcb)
3819 {
3820 #ifdef NOSIY_PRINTS
3821 	int i;
3822 
3823 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3824 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3825 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3826 		SCTP_PRINTF("None rcvd\n");
3827 		goto none_in;
3828 	}
3829 	if (stcb->asoc.tsn_in_wrapped) {
3830 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3831 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3832 			    stcb->asoc.in_tsnlog[i].tsn,
3833 			    stcb->asoc.in_tsnlog[i].strm,
3834 			    stcb->asoc.in_tsnlog[i].seq,
3835 			    stcb->asoc.in_tsnlog[i].flgs,
3836 			    stcb->asoc.in_tsnlog[i].sz);
3837 		}
3838 	}
3839 	if (stcb->asoc.tsn_in_at) {
3840 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3841 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3842 			    stcb->asoc.in_tsnlog[i].tsn,
3843 			    stcb->asoc.in_tsnlog[i].strm,
3844 			    stcb->asoc.in_tsnlog[i].seq,
3845 			    stcb->asoc.in_tsnlog[i].flgs,
3846 			    stcb->asoc.in_tsnlog[i].sz);
3847 		}
3848 	}
3849 none_in:
3850 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3851 	if ((stcb->asoc.tsn_out_at == 0) &&
3852 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3853 		SCTP_PRINTF("None sent\n");
3854 	}
3855 	if (stcb->asoc.tsn_out_wrapped) {
3856 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3857 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3858 			    stcb->asoc.out_tsnlog[i].tsn,
3859 			    stcb->asoc.out_tsnlog[i].strm,
3860 			    stcb->asoc.out_tsnlog[i].seq,
3861 			    stcb->asoc.out_tsnlog[i].flgs,
3862 			    stcb->asoc.out_tsnlog[i].sz);
3863 		}
3864 	}
3865 	if (stcb->asoc.tsn_out_at) {
3866 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3867 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3868 			    stcb->asoc.out_tsnlog[i].tsn,
3869 			    stcb->asoc.out_tsnlog[i].strm,
3870 			    stcb->asoc.out_tsnlog[i].seq,
3871 			    stcb->asoc.out_tsnlog[i].flgs,
3872 			    stcb->asoc.out_tsnlog[i].sz);
3873 		}
3874 	}
3875 #endif
3876 }
3877 
3878 #endif
3879 
3880 void
3881 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3882     int error, struct mbuf *op_err,
3883     int so_locked
3884 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3885     SCTP_UNUSED
3886 #endif
3887 )
3888 {
3889 	uint32_t vtag;
3890 
3891 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3892 	struct socket *so;
3893 
3894 #endif
3895 
3896 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3897 	so = SCTP_INP_SO(inp);
3898 #endif
3899 	if (stcb == NULL) {
3900 		/* Got to have a TCB */
3901 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3902 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3903 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3904 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3905 			}
3906 		}
3907 		return;
3908 	} else {
3909 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3910 	}
3911 	vtag = stcb->asoc.peer_vtag;
3912 	/* notify the ulp */
3913 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3914 		sctp_abort_notification(stcb, error, so_locked);
3915 	/* notify the peer */
3916 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3917 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3918 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3919 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3920 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3921 	}
3922 	/* now free the asoc */
3923 #ifdef SCTP_ASOCLOG_OF_TSNS
3924 	sctp_print_out_track_log(stcb);
3925 #endif
3926 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3927 	if (!so_locked) {
3928 		atomic_add_int(&stcb->asoc.refcnt, 1);
3929 		SCTP_TCB_UNLOCK(stcb);
3930 		SCTP_SOCKET_LOCK(so, 1);
3931 		SCTP_TCB_LOCK(stcb);
3932 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3933 	}
3934 #endif
3935 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3936 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3937 	if (!so_locked) {
3938 		SCTP_SOCKET_UNLOCK(so, 1);
3939 	}
3940 #endif
3941 }
3942 
3943 void
3944 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3945     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id)
3946 {
3947 	struct sctp_chunkhdr *ch, chunk_buf;
3948 	unsigned int chk_length;
3949 
3950 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3951 	/* Generate a TO address for future reference */
3952 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3953 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3954 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3955 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3956 		}
3957 	}
3958 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3959 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3960 	while (ch != NULL) {
3961 		chk_length = ntohs(ch->chunk_length);
3962 		if (chk_length < sizeof(*ch)) {
3963 			/* break to abort land */
3964 			break;
3965 		}
3966 		switch (ch->chunk_type) {
3967 		case SCTP_PACKET_DROPPED:
3968 			/* we don't respond to pkt-dropped */
3969 			return;
3970 		case SCTP_ABORT_ASSOCIATION:
3971 			/* we don't respond with an ABORT to an ABORT */
3972 			return;
3973 		case SCTP_SHUTDOWN_COMPLETE:
3974 			/*
3975 			 * we ignore it since we are not waiting for it and
3976 			 * peer is gone
3977 			 */
3978 			return;
3979 		case SCTP_SHUTDOWN_ACK:
3980 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id);
3981 			return;
3982 		default:
3983 			break;
3984 		}
3985 		offset += SCTP_SIZE32(chk_length);
3986 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3987 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3988 	}
3989 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id);
3990 }
3991 
3992 /*
3993  * check the inbound datagram to make sure there is not an abort inside it,
3994  * if there is return 1, else return 0.
3995  */
3996 int
3997 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3998 {
3999 	struct sctp_chunkhdr *ch;
4000 	struct sctp_init_chunk *init_chk, chunk_buf;
4001 	int offset;
4002 	unsigned int chk_length;
4003 
4004 	offset = iphlen + sizeof(struct sctphdr);
4005 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4006 	    (uint8_t *) & chunk_buf);
4007 	while (ch != NULL) {
4008 		chk_length = ntohs(ch->chunk_length);
4009 		if (chk_length < sizeof(*ch)) {
4010 			/* packet is probably corrupt */
4011 			break;
4012 		}
4013 		/* we seem to be ok, is it an abort? */
4014 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4015 			/* yep, tell them */
4016 			return (1);
4017 		}
4018 		if (ch->chunk_type == SCTP_INITIATION) {
4019 			/* need to update the Vtag */
4020 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4021 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4022 			if (init_chk != NULL) {
4023 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4024 			}
4025 		}
4026 		/* Nope, move to the next chunk */
4027 		offset += SCTP_SIZE32(chk_length);
4028 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4029 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4030 	}
4031 	return (0);
4032 }
4033 
4034 /*
4035  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4036  * set (i.e. it's 0) so, create this function to compare link local scopes
4037  */
4038 uint32_t
4039 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4040 {
4041 	struct sockaddr_in6 a, b;
4042 
4043 	/* save copies */
4044 	a = *addr1;
4045 	b = *addr2;
4046 
4047 	if (a.sin6_scope_id == 0)
4048 		if (sa6_recoverscope(&a)) {
4049 			/* can't get scope, so can't match */
4050 			return (0);
4051 		}
4052 	if (b.sin6_scope_id == 0)
4053 		if (sa6_recoverscope(&b)) {
4054 			/* can't get scope, so can't match */
4055 			return (0);
4056 		}
4057 	if (a.sin6_scope_id != b.sin6_scope_id)
4058 		return (0);
4059 
4060 	return (1);
4061 }
4062 
4063 /*
4064  * returns a sockaddr_in6 with embedded scope recovered and removed
4065  */
4066 struct sockaddr_in6 *
4067 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4068 {
4069 	/* check and strip embedded scope junk */
4070 	if (addr->sin6_family == AF_INET6) {
4071 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4072 			if (addr->sin6_scope_id == 0) {
4073 				*store = *addr;
4074 				if (!sa6_recoverscope(store)) {
4075 					/* use the recovered scope */
4076 					addr = store;
4077 				}
4078 			} else {
4079 				/* else, return the original "to" addr */
4080 				in6_clearscope(&addr->sin6_addr);
4081 			}
4082 		}
4083 	}
4084 	return (addr);
4085 }
4086 
4087 /*
4088  * are the two addresses the same?  currently a "scopeless" check returns: 1
4089  * if same, 0 if not
4090  */
4091 int
4092 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4093 {
4094 
4095 	/* must be valid */
4096 	if (sa1 == NULL || sa2 == NULL)
4097 		return (0);
4098 
4099 	/* must be the same family */
4100 	if (sa1->sa_family != sa2->sa_family)
4101 		return (0);
4102 
4103 	if (sa1->sa_family == AF_INET6) {
4104 		/* IPv6 addresses */
4105 		struct sockaddr_in6 *sin6_1, *sin6_2;
4106 
4107 		sin6_1 = (struct sockaddr_in6 *)sa1;
4108 		sin6_2 = (struct sockaddr_in6 *)sa2;
4109 		return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
4110 		    &sin6_2->sin6_addr));
4111 	} else if (sa1->sa_family == AF_INET) {
4112 		/* IPv4 addresses */
4113 		struct sockaddr_in *sin_1, *sin_2;
4114 
4115 		sin_1 = (struct sockaddr_in *)sa1;
4116 		sin_2 = (struct sockaddr_in *)sa2;
4117 		return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4118 	} else {
4119 		/* we don't do these... */
4120 		return (0);
4121 	}
4122 }
4123 
4124 void
4125 sctp_print_address(struct sockaddr *sa)
4126 {
4127 	char ip6buf[INET6_ADDRSTRLEN];
4128 
4129 	ip6buf[0] = 0;
4130 	if (sa->sa_family == AF_INET6) {
4131 		struct sockaddr_in6 *sin6;
4132 
4133 		sin6 = (struct sockaddr_in6 *)sa;
4134 		SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4135 		    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4136 		    ntohs(sin6->sin6_port),
4137 		    sin6->sin6_scope_id);
4138 	} else if (sa->sa_family == AF_INET) {
4139 		struct sockaddr_in *sin;
4140 		unsigned char *p;
4141 
4142 		sin = (struct sockaddr_in *)sa;
4143 		p = (unsigned char *)&sin->sin_addr;
4144 		SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4145 		    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4146 	} else {
4147 		SCTP_PRINTF("?\n");
4148 	}
4149 }
4150 
4151 void
4152 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4153 {
4154 	if (iph->ip_v == IPVERSION) {
4155 		struct sockaddr_in lsa, fsa;
4156 
4157 		bzero(&lsa, sizeof(lsa));
4158 		lsa.sin_len = sizeof(lsa);
4159 		lsa.sin_family = AF_INET;
4160 		lsa.sin_addr = iph->ip_src;
4161 		lsa.sin_port = sh->src_port;
4162 		bzero(&fsa, sizeof(fsa));
4163 		fsa.sin_len = sizeof(fsa);
4164 		fsa.sin_family = AF_INET;
4165 		fsa.sin_addr = iph->ip_dst;
4166 		fsa.sin_port = sh->dest_port;
4167 		SCTP_PRINTF("src: ");
4168 		sctp_print_address((struct sockaddr *)&lsa);
4169 		SCTP_PRINTF("dest: ");
4170 		sctp_print_address((struct sockaddr *)&fsa);
4171 	} else if (iph->ip_v == (IPV6_VERSION >> 4)) {
4172 		struct ip6_hdr *ip6;
4173 		struct sockaddr_in6 lsa6, fsa6;
4174 
4175 		ip6 = (struct ip6_hdr *)iph;
4176 		bzero(&lsa6, sizeof(lsa6));
4177 		lsa6.sin6_len = sizeof(lsa6);
4178 		lsa6.sin6_family = AF_INET6;
4179 		lsa6.sin6_addr = ip6->ip6_src;
4180 		lsa6.sin6_port = sh->src_port;
4181 		bzero(&fsa6, sizeof(fsa6));
4182 		fsa6.sin6_len = sizeof(fsa6);
4183 		fsa6.sin6_family = AF_INET6;
4184 		fsa6.sin6_addr = ip6->ip6_dst;
4185 		fsa6.sin6_port = sh->dest_port;
4186 		SCTP_PRINTF("src: ");
4187 		sctp_print_address((struct sockaddr *)&lsa6);
4188 		SCTP_PRINTF("dest: ");
4189 		sctp_print_address((struct sockaddr *)&fsa6);
4190 	}
4191 }
4192 
4193 void
4194 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4195     struct sctp_inpcb *new_inp,
4196     struct sctp_tcb *stcb,
4197     int waitflags)
4198 {
4199 	/*
4200 	 * go through our old INP and pull off any control structures that
4201 	 * belong to stcb and move then to the new inp.
4202 	 */
4203 	struct socket *old_so, *new_so;
4204 	struct sctp_queued_to_read *control, *nctl;
4205 	struct sctp_readhead tmp_queue;
4206 	struct mbuf *m;
4207 	int error = 0;
4208 
4209 	old_so = old_inp->sctp_socket;
4210 	new_so = new_inp->sctp_socket;
4211 	TAILQ_INIT(&tmp_queue);
4212 	error = sblock(&old_so->so_rcv, waitflags);
4213 	if (error) {
4214 		/*
4215 		 * Gak, can't get sblock, we have a problem. data will be
4216 		 * left stranded.. and we don't dare look at it since the
4217 		 * other thread may be reading something. Oh well, its a
4218 		 * screwed up app that does a peeloff OR a accept while
4219 		 * reading from the main socket... actually its only the
4220 		 * peeloff() case, since I think read will fail on a
4221 		 * listening socket..
4222 		 */
4223 		return;
4224 	}
4225 	/* lock the socket buffers */
4226 	SCTP_INP_READ_LOCK(old_inp);
4227 	control = TAILQ_FIRST(&old_inp->read_queue);
4228 	/* Pull off all for out target stcb */
4229 	while (control) {
4230 		nctl = TAILQ_NEXT(control, next);
4231 		if (control->stcb == stcb) {
4232 			/* remove it we want it */
4233 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4234 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4235 			m = control->data;
4236 			while (m) {
4237 				if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4238 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4239 				}
4240 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4241 				if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4242 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4243 				}
4244 				m = SCTP_BUF_NEXT(m);
4245 			}
4246 		}
4247 		control = nctl;
4248 	}
4249 	SCTP_INP_READ_UNLOCK(old_inp);
4250 	/* Remove the sb-lock on the old socket */
4251 
4252 	sbunlock(&old_so->so_rcv);
4253 	/* Now we move them over to the new socket buffer */
4254 	control = TAILQ_FIRST(&tmp_queue);
4255 	SCTP_INP_READ_LOCK(new_inp);
4256 	while (control) {
4257 		nctl = TAILQ_NEXT(control, next);
4258 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4259 		m = control->data;
4260 		while (m) {
4261 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4262 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4263 			}
4264 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4265 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4266 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4267 			}
4268 			m = SCTP_BUF_NEXT(m);
4269 		}
4270 		control = nctl;
4271 	}
4272 	SCTP_INP_READ_UNLOCK(new_inp);
4273 }
4274 
4275 
4276 void
4277 sctp_add_to_readq(struct sctp_inpcb *inp,
4278     struct sctp_tcb *stcb,
4279     struct sctp_queued_to_read *control,
4280     struct sockbuf *sb,
4281     int end,
4282     int so_locked
4283 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4284     SCTP_UNUSED
4285 #endif
4286 )
4287 {
4288 	/*
4289 	 * Here we must place the control on the end of the socket read
4290 	 * queue AND increment sb_cc so that select will work properly on
4291 	 * read.
4292 	 */
4293 	struct mbuf *m, *prev = NULL;
4294 
4295 	if (inp == NULL) {
4296 		/* Gak, TSNH!! */
4297 #ifdef INVARIANTS
4298 		panic("Gak, inp NULL on add_to_readq");
4299 #endif
4300 		return;
4301 	}
4302 	SCTP_INP_READ_LOCK(inp);
4303 	if (!(control->spec_flags & M_NOTIFICATION)) {
4304 		atomic_add_int(&inp->total_recvs, 1);
4305 		if (!control->do_not_ref_stcb) {
4306 			atomic_add_int(&stcb->total_recvs, 1);
4307 		}
4308 	}
4309 	m = control->data;
4310 	control->held_length = 0;
4311 	control->length = 0;
4312 	while (m) {
4313 		if (SCTP_BUF_LEN(m) == 0) {
4314 			/* Skip mbufs with NO length */
4315 			if (prev == NULL) {
4316 				/* First one */
4317 				control->data = sctp_m_free(m);
4318 				m = control->data;
4319 			} else {
4320 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4321 				m = SCTP_BUF_NEXT(prev);
4322 			}
4323 			if (m == NULL) {
4324 				control->tail_mbuf = prev;;
4325 			}
4326 			continue;
4327 		}
4328 		prev = m;
4329 		if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4330 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4331 		}
4332 		sctp_sballoc(stcb, sb, m);
4333 		if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4334 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4335 		}
4336 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4337 		m = SCTP_BUF_NEXT(m);
4338 	}
4339 	if (prev != NULL) {
4340 		control->tail_mbuf = prev;
4341 	} else {
4342 		/* Everything got collapsed out?? */
4343 		return;
4344 	}
4345 	if (end) {
4346 		control->end_added = 1;
4347 	}
4348 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4349 	SCTP_INP_READ_UNLOCK(inp);
4350 	if (inp && inp->sctp_socket) {
4351 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4352 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4353 		} else {
4354 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4355 			struct socket *so;
4356 
4357 			so = SCTP_INP_SO(inp);
4358 			if (!so_locked) {
4359 				atomic_add_int(&stcb->asoc.refcnt, 1);
4360 				SCTP_TCB_UNLOCK(stcb);
4361 				SCTP_SOCKET_LOCK(so, 1);
4362 				SCTP_TCB_LOCK(stcb);
4363 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4364 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4365 					SCTP_SOCKET_UNLOCK(so, 1);
4366 					return;
4367 				}
4368 			}
4369 #endif
4370 			sctp_sorwakeup(inp, inp->sctp_socket);
4371 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4372 			if (!so_locked) {
4373 				SCTP_SOCKET_UNLOCK(so, 1);
4374 			}
4375 #endif
4376 		}
4377 	}
4378 }
4379 
4380 
4381 int
4382 sctp_append_to_readq(struct sctp_inpcb *inp,
4383     struct sctp_tcb *stcb,
4384     struct sctp_queued_to_read *control,
4385     struct mbuf *m,
4386     int end,
4387     int ctls_cumack,
4388     struct sockbuf *sb)
4389 {
4390 	/*
4391 	 * A partial delivery API event is underway. OR we are appending on
4392 	 * the reassembly queue.
4393 	 *
4394 	 * If PDAPI this means we need to add m to the end of the data.
4395 	 * Increase the length in the control AND increment the sb_cc.
4396 	 * Otherwise sb is NULL and all we need to do is put it at the end
4397 	 * of the mbuf chain.
4398 	 */
4399 	int len = 0;
4400 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4401 
4402 	if (inp) {
4403 		SCTP_INP_READ_LOCK(inp);
4404 	}
4405 	if (control == NULL) {
4406 get_out:
4407 		if (inp) {
4408 			SCTP_INP_READ_UNLOCK(inp);
4409 		}
4410 		return (-1);
4411 	}
4412 	if (control->end_added) {
4413 		/* huh this one is complete? */
4414 		goto get_out;
4415 	}
4416 	mm = m;
4417 	if (mm == NULL) {
4418 		goto get_out;
4419 	}
4420 	while (mm) {
4421 		if (SCTP_BUF_LEN(mm) == 0) {
4422 			/* Skip mbufs with NO lenght */
4423 			if (prev == NULL) {
4424 				/* First one */
4425 				m = sctp_m_free(mm);
4426 				mm = m;
4427 			} else {
4428 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4429 				mm = SCTP_BUF_NEXT(prev);
4430 			}
4431 			continue;
4432 		}
4433 		prev = mm;
4434 		len += SCTP_BUF_LEN(mm);
4435 		if (sb) {
4436 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4437 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4438 			}
4439 			sctp_sballoc(stcb, sb, mm);
4440 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4441 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4442 			}
4443 		}
4444 		mm = SCTP_BUF_NEXT(mm);
4445 	}
4446 	if (prev) {
4447 		tail = prev;
4448 	} else {
4449 		/* Really there should always be a prev */
4450 		if (m == NULL) {
4451 			/* Huh nothing left? */
4452 #ifdef INVARIANTS
4453 			panic("Nothing left to add?");
4454 #else
4455 			goto get_out;
4456 #endif
4457 		}
4458 		tail = m;
4459 	}
4460 	if (control->tail_mbuf) {
4461 		/* append */
4462 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4463 		control->tail_mbuf = tail;
4464 	} else {
4465 		/* nothing there */
4466 #ifdef INVARIANTS
4467 		if (control->data != NULL) {
4468 			panic("This should NOT happen");
4469 		}
4470 #endif
4471 		control->data = m;
4472 		control->tail_mbuf = tail;
4473 	}
4474 	atomic_add_int(&control->length, len);
4475 	if (end) {
4476 		/* message is complete */
4477 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4478 			stcb->asoc.control_pdapi = NULL;
4479 		}
4480 		control->held_length = 0;
4481 		control->end_added = 1;
4482 	}
4483 	if (stcb == NULL) {
4484 		control->do_not_ref_stcb = 1;
4485 	}
4486 	/*
4487 	 * When we are appending in partial delivery, the cum-ack is used
4488 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4489 	 * is populated in the outbound sinfo structure from the true cumack
4490 	 * if the association exists...
4491 	 */
4492 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4493 	if (inp) {
4494 		SCTP_INP_READ_UNLOCK(inp);
4495 	}
4496 	if (inp && inp->sctp_socket) {
4497 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4498 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4499 		} else {
4500 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4501 			struct socket *so;
4502 
4503 			so = SCTP_INP_SO(inp);
4504 			atomic_add_int(&stcb->asoc.refcnt, 1);
4505 			SCTP_TCB_UNLOCK(stcb);
4506 			SCTP_SOCKET_LOCK(so, 1);
4507 			SCTP_TCB_LOCK(stcb);
4508 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4509 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4510 				SCTP_SOCKET_UNLOCK(so, 1);
4511 				return (0);
4512 			}
4513 #endif
4514 			sctp_sorwakeup(inp, inp->sctp_socket);
4515 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4516 			SCTP_SOCKET_UNLOCK(so, 1);
4517 #endif
4518 		}
4519 	}
4520 	return (0);
4521 }
4522 
4523 
4524 
4525 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4526  *************ALTERNATE ROUTING CODE
4527  */
4528 
4529 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4530  *************ALTERNATE ROUTING CODE
4531  */
4532 
4533 struct mbuf *
4534 sctp_generate_invmanparam(int err)
4535 {
4536 	/* Return a MBUF with a invalid mandatory parameter */
4537 	struct mbuf *m;
4538 
4539 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4540 	if (m) {
4541 		struct sctp_paramhdr *ph;
4542 
4543 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4544 		ph = mtod(m, struct sctp_paramhdr *);
4545 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4546 		ph->param_type = htons(err);
4547 	}
4548 	return (m);
4549 }
4550 
4551 #ifdef SCTP_MBCNT_LOGGING
4552 void
4553 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4554     struct sctp_tmit_chunk *tp1, int chk_cnt)
4555 {
4556 	if (tp1->data == NULL) {
4557 		return;
4558 	}
4559 	asoc->chunks_on_out_queue -= chk_cnt;
4560 	if (sctp_logging_level & SCTP_MBCNT_LOGGING_ENABLE) {
4561 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4562 		    asoc->total_output_queue_size,
4563 		    tp1->book_size,
4564 		    0,
4565 		    tp1->mbcnt);
4566 	}
4567 	if (asoc->total_output_queue_size >= tp1->book_size) {
4568 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4569 	} else {
4570 		asoc->total_output_queue_size = 0;
4571 	}
4572 
4573 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4574 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4575 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4576 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4577 		} else {
4578 			stcb->sctp_socket->so_snd.sb_cc = 0;
4579 
4580 		}
4581 	}
4582 }
4583 
4584 #endif
4585 
4586 int
4587 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4588     int reason, struct sctpchunk_listhead *queue, int so_locked
4589 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4590     SCTP_UNUSED
4591 #endif
4592 )
4593 {
4594 	int ret_sz = 0;
4595 	int notdone;
4596 	uint8_t foundeom = 0;
4597 
4598 	do {
4599 		ret_sz += tp1->book_size;
4600 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4601 		if (tp1->data) {
4602 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4603 			struct socket *so;
4604 
4605 #endif
4606 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4607 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, SCTP_SO_NOT_LOCKED);
4608 			sctp_m_freem(tp1->data);
4609 			tp1->data = NULL;
4610 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4611 			so = SCTP_INP_SO(stcb->sctp_ep);
4612 			if (!so_locked) {
4613 				atomic_add_int(&stcb->asoc.refcnt, 1);
4614 				SCTP_TCB_UNLOCK(stcb);
4615 				SCTP_SOCKET_LOCK(so, 1);
4616 				SCTP_TCB_LOCK(stcb);
4617 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4618 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4619 					/*
4620 					 * assoc was freed while we were
4621 					 * unlocked
4622 					 */
4623 					SCTP_SOCKET_UNLOCK(so, 1);
4624 					return (ret_sz);
4625 				}
4626 			}
4627 #endif
4628 			sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4629 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4630 			if (!so_locked) {
4631 				SCTP_SOCKET_UNLOCK(so, 1);
4632 			}
4633 #endif
4634 		}
4635 		if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4636 			stcb->asoc.sent_queue_cnt_removeable--;
4637 		}
4638 		if (queue == &stcb->asoc.send_queue) {
4639 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4640 			/* on to the sent queue */
4641 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4642 			    sctp_next);
4643 			stcb->asoc.sent_queue_cnt++;
4644 		}
4645 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4646 		    SCTP_DATA_NOT_FRAG) {
4647 			/* not frag'ed we ae done   */
4648 			notdone = 0;
4649 			foundeom = 1;
4650 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4651 			/* end of frag, we are done */
4652 			notdone = 0;
4653 			foundeom = 1;
4654 		} else {
4655 			/*
4656 			 * Its a begin or middle piece, we must mark all of
4657 			 * it
4658 			 */
4659 			notdone = 1;
4660 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4661 		}
4662 	} while (tp1 && notdone);
4663 	if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
4664 		/*
4665 		 * The multi-part message was scattered across the send and
4666 		 * sent queue.
4667 		 */
4668 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4669 		/*
4670 		 * recurse throught the send_queue too, starting at the
4671 		 * beginning.
4672 		 */
4673 		if (tp1) {
4674 			ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
4675 			    &stcb->asoc.send_queue, so_locked);
4676 		} else {
4677 			SCTP_PRINTF("hmm, nothing on the send queue and no EOM?\n");
4678 		}
4679 	}
4680 	return (ret_sz);
4681 }
4682 
4683 /*
4684  * checks to see if the given address, sa, is one that is currently known by
4685  * the kernel note: can't distinguish the same address on multiple interfaces
4686  * and doesn't handle multiple addresses with different zone/scope id's note:
4687  * ifa_ifwithaddr() compares the entire sockaddr struct
4688  */
4689 struct sctp_ifa *
4690 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4691     int holds_lock)
4692 {
4693 	struct sctp_laddr *laddr;
4694 
4695 	if (holds_lock == 0) {
4696 		SCTP_INP_RLOCK(inp);
4697 	}
4698 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4699 		if (laddr->ifa == NULL)
4700 			continue;
4701 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4702 			continue;
4703 		if (addr->sa_family == AF_INET) {
4704 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4705 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4706 				/* found him. */
4707 				if (holds_lock == 0) {
4708 					SCTP_INP_RUNLOCK(inp);
4709 				}
4710 				return (laddr->ifa);
4711 				break;
4712 			}
4713 		} else if (addr->sa_family == AF_INET6) {
4714 			if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
4715 			    &laddr->ifa->address.sin6.sin6_addr)) {
4716 				/* found him. */
4717 				if (holds_lock == 0) {
4718 					SCTP_INP_RUNLOCK(inp);
4719 				}
4720 				return (laddr->ifa);
4721 				break;
4722 			}
4723 		}
4724 	}
4725 	if (holds_lock == 0) {
4726 		SCTP_INP_RUNLOCK(inp);
4727 	}
4728 	return (NULL);
4729 }
4730 
4731 uint32_t
4732 sctp_get_ifa_hash_val(struct sockaddr *addr)
4733 {
4734 	if (addr->sa_family == AF_INET) {
4735 		struct sockaddr_in *sin;
4736 
4737 		sin = (struct sockaddr_in *)addr;
4738 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4739 	} else if (addr->sa_family == AF_INET6) {
4740 		struct sockaddr_in6 *sin6;
4741 		uint32_t hash_of_addr;
4742 
4743 		sin6 = (struct sockaddr_in6 *)addr;
4744 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4745 		    sin6->sin6_addr.s6_addr32[1] +
4746 		    sin6->sin6_addr.s6_addr32[2] +
4747 		    sin6->sin6_addr.s6_addr32[3]);
4748 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4749 		return (hash_of_addr);
4750 	}
4751 	return (0);
4752 }
4753 
4754 struct sctp_ifa *
4755 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4756 {
4757 	struct sctp_ifa *sctp_ifap;
4758 	struct sctp_vrf *vrf;
4759 	struct sctp_ifalist *hash_head;
4760 	uint32_t hash_of_addr;
4761 
4762 	if (holds_lock == 0)
4763 		SCTP_IPI_ADDR_LOCK();
4764 
4765 	vrf = sctp_find_vrf(vrf_id);
4766 	if (vrf == NULL) {
4767 		if (holds_lock == 0)
4768 			SCTP_IPI_ADDR_UNLOCK();
4769 		return (NULL);
4770 	}
4771 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4772 
4773 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4774 	if (hash_head == NULL) {
4775 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4776 		    (u_int)hash_of_addr, (u_int)vrf->vrf_addr_hashmark,
4777 		    (u_int)(hash_of_addr & vrf->vrf_addr_hashmark));
4778 		sctp_print_address(addr);
4779 		SCTP_PRINTF("No such bucket for address\n");
4780 		if (holds_lock == 0)
4781 			SCTP_IPI_ADDR_UNLOCK();
4782 
4783 		return (NULL);
4784 	}
4785 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4786 		if (sctp_ifap == NULL) {
4787 			panic("Huh LIST_FOREACH corrupt");
4788 		}
4789 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4790 			continue;
4791 		if (addr->sa_family == AF_INET) {
4792 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4793 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4794 				/* found him. */
4795 				if (holds_lock == 0)
4796 					SCTP_IPI_ADDR_UNLOCK();
4797 				return (sctp_ifap);
4798 				break;
4799 			}
4800 		} else if (addr->sa_family == AF_INET6) {
4801 			if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
4802 			    &sctp_ifap->address.sin6.sin6_addr)) {
4803 				/* found him. */
4804 				if (holds_lock == 0)
4805 					SCTP_IPI_ADDR_UNLOCK();
4806 				return (sctp_ifap);
4807 				break;
4808 			}
4809 		}
4810 	}
4811 	if (holds_lock == 0)
4812 		SCTP_IPI_ADDR_UNLOCK();
4813 	return (NULL);
4814 }
4815 
4816 static void
4817 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4818     uint32_t rwnd_req)
4819 {
4820 	/* User pulled some data, do we need a rwnd update? */
4821 	int r_unlocked = 0;
4822 	uint32_t dif, rwnd;
4823 	struct socket *so = NULL;
4824 
4825 	if (stcb == NULL)
4826 		return;
4827 
4828 	atomic_add_int(&stcb->asoc.refcnt, 1);
4829 
4830 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4831 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4832 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4833 		/* Pre-check If we are freeing no update */
4834 		goto no_lock;
4835 	}
4836 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4837 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4838 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4839 		goto out;
4840 	}
4841 	so = stcb->sctp_socket;
4842 	if (so == NULL) {
4843 		goto out;
4844 	}
4845 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4846 	/* Have you have freed enough to look */
4847 	*freed_so_far = 0;
4848 	/* Yep, its worth a look and the lock overhead */
4849 
4850 	/* Figure out what the rwnd would be */
4851 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4852 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4853 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4854 	} else {
4855 		dif = 0;
4856 	}
4857 	if (dif >= rwnd_req) {
4858 		if (hold_rlock) {
4859 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4860 			r_unlocked = 1;
4861 		}
4862 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4863 			/*
4864 			 * One last check before we allow the guy possibly
4865 			 * to get in. There is a race, where the guy has not
4866 			 * reached the gate. In that case
4867 			 */
4868 			goto out;
4869 		}
4870 		SCTP_TCB_LOCK(stcb);
4871 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4872 			/* No reports here */
4873 			SCTP_TCB_UNLOCK(stcb);
4874 			goto out;
4875 		}
4876 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4877 		sctp_send_sack(stcb);
4878 		sctp_chunk_output(stcb->sctp_ep, stcb,
4879 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4880 		/* make sure no timer is running */
4881 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4882 		SCTP_TCB_UNLOCK(stcb);
4883 	} else {
4884 		/* Update how much we have pending */
4885 		stcb->freed_by_sorcv_sincelast = dif;
4886 	}
4887 out:
4888 	if (so && r_unlocked && hold_rlock) {
4889 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4890 	}
4891 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4892 no_lock:
4893 	atomic_add_int(&stcb->asoc.refcnt, -1);
4894 	return;
4895 }
4896 
4897 int
4898 sctp_sorecvmsg(struct socket *so,
4899     struct uio *uio,
4900     struct mbuf **mp,
4901     struct sockaddr *from,
4902     int fromlen,
4903     int *msg_flags,
4904     struct sctp_sndrcvinfo *sinfo,
4905     int filling_sinfo)
4906 {
4907 	/*
4908 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4909 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4910 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4911 	 * On the way out we may send out any combination of:
4912 	 * MSG_NOTIFICATION MSG_EOR
4913 	 *
4914 	 */
4915 	struct sctp_inpcb *inp = NULL;
4916 	int my_len = 0;
4917 	int cp_len = 0, error = 0;
4918 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4919 	struct mbuf *m = NULL, *embuf = NULL;
4920 	struct sctp_tcb *stcb = NULL;
4921 	int wakeup_read_socket = 0;
4922 	int freecnt_applied = 0;
4923 	int out_flags = 0, in_flags = 0;
4924 	int block_allowed = 1;
4925 	uint32_t freed_so_far = 0;
4926 	int copied_so_far = 0;
4927 	int in_eeor_mode = 0;
4928 	int no_rcv_needed = 0;
4929 	uint32_t rwnd_req = 0;
4930 	int hold_sblock = 0;
4931 	int hold_rlock = 0;
4932 	int slen = 0;
4933 	uint32_t held_length = 0;
4934 	int sockbuf_lock = 0;
4935 
4936 	if (uio == NULL) {
4937 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
4938 		return (EINVAL);
4939 	}
4940 	if (msg_flags) {
4941 		in_flags = *msg_flags;
4942 		if (in_flags & MSG_PEEK)
4943 			SCTP_STAT_INCR(sctps_read_peeks);
4944 	} else {
4945 		in_flags = 0;
4946 	}
4947 	slen = uio->uio_resid;
4948 
4949 	/* Pull in and set up our int flags */
4950 	if (in_flags & MSG_OOB) {
4951 		/* Out of band's NOT supported */
4952 		return (EOPNOTSUPP);
4953 	}
4954 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
4955 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
4956 		return (EINVAL);
4957 	}
4958 	if ((in_flags & (MSG_DONTWAIT
4959 	    | MSG_NBIO
4960 	    )) ||
4961 	    SCTP_SO_IS_NBIO(so)) {
4962 		block_allowed = 0;
4963 	}
4964 	/* setup the endpoint */
4965 	inp = (struct sctp_inpcb *)so->so_pcb;
4966 	if (inp == NULL) {
4967 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
4968 		return (EFAULT);
4969 	}
4970 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
4971 	/* Must be at least a MTU's worth */
4972 	if (rwnd_req < SCTP_MIN_RWND)
4973 		rwnd_req = SCTP_MIN_RWND;
4974 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
4975 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
4976 		sctp_misc_ints(SCTP_SORECV_ENTER,
4977 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
4978 	}
4979 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
4980 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
4981 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
4982 	}
4983 	error = sblock(&so->so_rcv, (block_allowed ? M_WAITOK : 0));
4984 	sockbuf_lock = 1;
4985 	if (error) {
4986 		goto release_unlocked;
4987 	}
4988 restart:
4989 
4990 
4991 restart_nosblocks:
4992 	if (hold_sblock == 0) {
4993 		SOCKBUF_LOCK(&so->so_rcv);
4994 		hold_sblock = 1;
4995 	}
4996 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4997 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4998 		goto out;
4999 	}
5000 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5001 		if (so->so_error) {
5002 			error = so->so_error;
5003 			if ((in_flags & MSG_PEEK) == 0)
5004 				so->so_error = 0;
5005 		} else {
5006 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5007 			error = ENOTCONN;
5008 		}
5009 		goto out;
5010 	}
5011 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5012 		/* we need to wait for data */
5013 		if ((so->so_rcv.sb_cc == 0) &&
5014 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5015 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5016 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5017 				/*
5018 				 * For active open side clear flags for
5019 				 * re-use passive open is blocked by
5020 				 * connect.
5021 				 */
5022 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5023 					/*
5024 					 * You were aborted, passive side
5025 					 * always hits here
5026 					 */
5027 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5028 					error = ECONNRESET;
5029 					/*
5030 					 * You get this once if you are
5031 					 * active open side
5032 					 */
5033 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5034 						/*
5035 						 * Remove flag if on the
5036 						 * active open side
5037 						 */
5038 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5039 					}
5040 				}
5041 				so->so_state &= ~(SS_ISCONNECTING |
5042 				    SS_ISDISCONNECTING |
5043 				    SS_ISCONFIRMING |
5044 				    SS_ISCONNECTED);
5045 				if (error == 0) {
5046 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5047 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5048 						error = ENOTCONN;
5049 					} else {
5050 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5051 					}
5052 				}
5053 				goto out;
5054 			}
5055 		}
5056 		error = sbwait(&so->so_rcv);
5057 		if (error) {
5058 			goto out;
5059 		}
5060 		held_length = 0;
5061 		goto restart_nosblocks;
5062 	} else if (so->so_rcv.sb_cc == 0) {
5063 		if (so->so_error) {
5064 			error = so->so_error;
5065 			if ((in_flags & MSG_PEEK) == 0)
5066 				so->so_error = 0;
5067 		} else {
5068 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5069 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5070 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5071 					/*
5072 					 * For active open side clear flags
5073 					 * for re-use passive open is
5074 					 * blocked by connect.
5075 					 */
5076 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5077 						/*
5078 						 * You were aborted, passive
5079 						 * side always hits here
5080 						 */
5081 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5082 						error = ECONNRESET;
5083 						/*
5084 						 * You get this once if you
5085 						 * are active open side
5086 						 */
5087 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5088 							/*
5089 							 * Remove flag if on
5090 							 * the active open
5091 							 * side
5092 							 */
5093 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5094 						}
5095 					}
5096 					so->so_state &= ~(SS_ISCONNECTING |
5097 					    SS_ISDISCONNECTING |
5098 					    SS_ISCONFIRMING |
5099 					    SS_ISCONNECTED);
5100 					if (error == 0) {
5101 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5102 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5103 							error = ENOTCONN;
5104 						} else {
5105 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5106 						}
5107 					}
5108 					goto out;
5109 				}
5110 			}
5111 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5112 			error = EWOULDBLOCK;
5113 		}
5114 		goto out;
5115 	}
5116 	if (hold_sblock == 1) {
5117 		SOCKBUF_UNLOCK(&so->so_rcv);
5118 		hold_sblock = 0;
5119 	}
5120 	/* we possibly have data we can read */
5121 	/* sa_ignore FREED_MEMORY */
5122 	control = TAILQ_FIRST(&inp->read_queue);
5123 	if (control == NULL) {
5124 		/*
5125 		 * This could be happening since the appender did the
5126 		 * increment but as not yet did the tailq insert onto the
5127 		 * read_queue
5128 		 */
5129 		if (hold_rlock == 0) {
5130 			SCTP_INP_READ_LOCK(inp);
5131 			hold_rlock = 1;
5132 		}
5133 		control = TAILQ_FIRST(&inp->read_queue);
5134 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5135 #ifdef INVARIANTS
5136 			panic("Huh, its non zero and nothing on control?");
5137 #endif
5138 			so->so_rcv.sb_cc = 0;
5139 		}
5140 		SCTP_INP_READ_UNLOCK(inp);
5141 		hold_rlock = 0;
5142 		goto restart;
5143 	}
5144 	if ((control->length == 0) &&
5145 	    (control->do_not_ref_stcb)) {
5146 		/*
5147 		 * Clean up code for freeing assoc that left behind a
5148 		 * pdapi.. maybe a peer in EEOR that just closed after
5149 		 * sending and never indicated a EOR.
5150 		 */
5151 		if (hold_rlock == 0) {
5152 			hold_rlock = 1;
5153 			SCTP_INP_READ_LOCK(inp);
5154 		}
5155 		control->held_length = 0;
5156 		if (control->data) {
5157 			/* Hmm there is data here .. fix */
5158 			struct mbuf *m_tmp;
5159 			int cnt = 0;
5160 
5161 			m_tmp = control->data;
5162 			while (m_tmp) {
5163 				cnt += SCTP_BUF_LEN(m_tmp);
5164 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5165 					control->tail_mbuf = m_tmp;
5166 					control->end_added = 1;
5167 				}
5168 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5169 			}
5170 			control->length = cnt;
5171 		} else {
5172 			/* remove it */
5173 			TAILQ_REMOVE(&inp->read_queue, control, next);
5174 			/* Add back any hiddend data */
5175 			sctp_free_remote_addr(control->whoFrom);
5176 			sctp_free_a_readq(stcb, control);
5177 		}
5178 		if (hold_rlock) {
5179 			hold_rlock = 0;
5180 			SCTP_INP_READ_UNLOCK(inp);
5181 		}
5182 		goto restart;
5183 	}
5184 	if (control->length == 0) {
5185 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5186 		    (filling_sinfo)) {
5187 			/* find a more suitable one then this */
5188 			ctl = TAILQ_NEXT(control, next);
5189 			while (ctl) {
5190 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5191 				    (ctl->some_taken ||
5192 				    (ctl->spec_flags & M_NOTIFICATION) ||
5193 				    ((ctl->do_not_ref_stcb == 0) &&
5194 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5195 				    ) {
5196 					/*-
5197 					 * If we have a different TCB next, and there is data
5198 					 * present. If we have already taken some (pdapi), OR we can
5199 					 * ref the tcb and no delivery as started on this stream, we
5200 					 * take it. Note we allow a notification on a different
5201 					 * assoc to be delivered..
5202 					 */
5203 					control = ctl;
5204 					goto found_one;
5205 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5206 					    (ctl->length) &&
5207 					    ((ctl->some_taken) ||
5208 					    ((ctl->do_not_ref_stcb == 0) &&
5209 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5210 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5211 				    ) {
5212 					/*-
5213 					 * If we have the same tcb, and there is data present, and we
5214 					 * have the strm interleave feature present. Then if we have
5215 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5216 					 * not started a delivery for this stream, we can take it.
5217 					 * Note we do NOT allow a notificaiton on the same assoc to
5218 					 * be delivered.
5219 					 */
5220 					control = ctl;
5221 					goto found_one;
5222 				}
5223 				ctl = TAILQ_NEXT(ctl, next);
5224 			}
5225 		}
5226 		/*
5227 		 * if we reach here, not suitable replacement is available
5228 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5229 		 * into the our held count, and its time to sleep again.
5230 		 */
5231 		held_length = so->so_rcv.sb_cc;
5232 		control->held_length = so->so_rcv.sb_cc;
5233 		goto restart;
5234 	}
5235 	/* Clear the held length since there is something to read */
5236 	control->held_length = 0;
5237 	if (hold_rlock) {
5238 		SCTP_INP_READ_UNLOCK(inp);
5239 		hold_rlock = 0;
5240 	}
5241 found_one:
5242 	/*
5243 	 * If we reach here, control has a some data for us to read off.
5244 	 * Note that stcb COULD be NULL.
5245 	 */
5246 	control->some_taken = 1;
5247 	if (hold_sblock) {
5248 		SOCKBUF_UNLOCK(&so->so_rcv);
5249 		hold_sblock = 0;
5250 	}
5251 	stcb = control->stcb;
5252 	if (stcb) {
5253 		if ((control->do_not_ref_stcb == 0) &&
5254 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5255 			if (freecnt_applied == 0)
5256 				stcb = NULL;
5257 		} else if (control->do_not_ref_stcb == 0) {
5258 			/* you can't free it on me please */
5259 			/*
5260 			 * The lock on the socket buffer protects us so the
5261 			 * free code will stop. But since we used the
5262 			 * socketbuf lock and the sender uses the tcb_lock
5263 			 * to increment, we need to use the atomic add to
5264 			 * the refcnt
5265 			 */
5266 			if (freecnt_applied)
5267 				panic("refcnt already incremented");
5268 			atomic_add_int(&stcb->asoc.refcnt, 1);
5269 			freecnt_applied = 1;
5270 			/*
5271 			 * Setup to remember how much we have not yet told
5272 			 * the peer our rwnd has opened up. Note we grab the
5273 			 * value from the tcb from last time. Note too that
5274 			 * sack sending clears this when a sack is sent,
5275 			 * which is fine. Once we hit the rwnd_req, we then
5276 			 * will go to the sctp_user_rcvd() that will not
5277 			 * lock until it KNOWs it MUST send a WUP-SACK.
5278 			 */
5279 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5280 			stcb->freed_by_sorcv_sincelast = 0;
5281 		}
5282 	}
5283 	if (stcb &&
5284 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5285 	    control->do_not_ref_stcb == 0) {
5286 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5287 	}
5288 	/* First lets get off the sinfo and sockaddr info */
5289 	if ((sinfo) && filling_sinfo) {
5290 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5291 		nxt = TAILQ_NEXT(control, next);
5292 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5293 			struct sctp_extrcvinfo *s_extra;
5294 
5295 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5296 			if ((nxt) &&
5297 			    (nxt->length)) {
5298 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5299 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5300 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5301 				}
5302 				if (nxt->spec_flags & M_NOTIFICATION) {
5303 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5304 				}
5305 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5306 				s_extra->sreinfo_next_length = nxt->length;
5307 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5308 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5309 				if (nxt->tail_mbuf != NULL) {
5310 					if (nxt->end_added) {
5311 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5312 					}
5313 				}
5314 			} else {
5315 				/*
5316 				 * we explicitly 0 this, since the memcpy
5317 				 * got some other things beyond the older
5318 				 * sinfo_ that is on the control's structure
5319 				 * :-D
5320 				 */
5321 				nxt = NULL;
5322 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5323 				s_extra->sreinfo_next_aid = 0;
5324 				s_extra->sreinfo_next_length = 0;
5325 				s_extra->sreinfo_next_ppid = 0;
5326 				s_extra->sreinfo_next_stream = 0;
5327 			}
5328 		}
5329 		/*
5330 		 * update off the real current cum-ack, if we have an stcb.
5331 		 */
5332 		if ((control->do_not_ref_stcb == 0) && stcb)
5333 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5334 		/*
5335 		 * mask off the high bits, we keep the actual chunk bits in
5336 		 * there.
5337 		 */
5338 		sinfo->sinfo_flags &= 0x00ff;
5339 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5340 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5341 		}
5342 	}
5343 #ifdef SCTP_ASOCLOG_OF_TSNS
5344 	{
5345 		int index, newindex;
5346 		struct sctp_pcbtsn_rlog *entry;
5347 
5348 		do {
5349 			index = inp->readlog_index;
5350 			newindex = index + 1;
5351 			if (newindex >= SCTP_READ_LOG_SIZE) {
5352 				newindex = 0;
5353 			}
5354 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5355 		entry = &inp->readlog[index];
5356 		entry->vtag = control->sinfo_assoc_id;
5357 		entry->strm = control->sinfo_stream;
5358 		entry->seq = control->sinfo_ssn;
5359 		entry->sz = control->length;
5360 		entry->flgs = control->sinfo_flags;
5361 	}
5362 #endif
5363 	if (fromlen && from) {
5364 		struct sockaddr *to;
5365 
5366 #ifdef INET
5367 		cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin.sin_len);
5368 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5369 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5370 #else
5371 		/* No AF_INET use AF_INET6 */
5372 		cp_len = min(fromlen, control->whoFrom->ro._l_addr.sin6.sin6_len);
5373 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5374 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5375 #endif
5376 
5377 		to = from;
5378 #if defined(INET) && defined(INET6)
5379 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
5380 		    (to->sa_family == AF_INET) &&
5381 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5382 			struct sockaddr_in *sin;
5383 			struct sockaddr_in6 sin6;
5384 
5385 			sin = (struct sockaddr_in *)to;
5386 			bzero(&sin6, sizeof(sin6));
5387 			sin6.sin6_family = AF_INET6;
5388 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5389 			sin6.sin6_addr.s6_addr16[2] = 0xffff;
5390 			bcopy(&sin->sin_addr,
5391 			    &sin6.sin6_addr.s6_addr16[3],
5392 			    sizeof(sin6.sin6_addr.s6_addr16[3]));
5393 			sin6.sin6_port = sin->sin_port;
5394 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5395 		}
5396 #endif
5397 #if defined(INET6)
5398 		{
5399 			struct sockaddr_in6 lsa6, *to6;
5400 
5401 			to6 = (struct sockaddr_in6 *)to;
5402 			sctp_recover_scope_mac(to6, (&lsa6));
5403 		}
5404 #endif
5405 	}
5406 	/* now copy out what data we can */
5407 	if (mp == NULL) {
5408 		/* copy out each mbuf in the chain up to length */
5409 get_more_data:
5410 		m = control->data;
5411 		while (m) {
5412 			/* Move out all we can */
5413 			cp_len = (int)uio->uio_resid;
5414 			my_len = (int)SCTP_BUF_LEN(m);
5415 			if (cp_len > my_len) {
5416 				/* not enough in this buf */
5417 				cp_len = my_len;
5418 			}
5419 			if (hold_rlock) {
5420 				SCTP_INP_READ_UNLOCK(inp);
5421 				hold_rlock = 0;
5422 			}
5423 			if (cp_len > 0)
5424 				error = uiomove(mtod(m, char *), cp_len, uio);
5425 			/* re-read */
5426 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5427 				goto release;
5428 			}
5429 			if ((control->do_not_ref_stcb == 0) && stcb &&
5430 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5431 				no_rcv_needed = 1;
5432 			}
5433 			if (error) {
5434 				/* error we are out of here */
5435 				goto release;
5436 			}
5437 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5438 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5439 			    ((control->end_added == 0) ||
5440 			    (control->end_added &&
5441 			    (TAILQ_NEXT(control, next) == NULL)))
5442 			    ) {
5443 				SCTP_INP_READ_LOCK(inp);
5444 				hold_rlock = 1;
5445 			}
5446 			if (cp_len == SCTP_BUF_LEN(m)) {
5447 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5448 				    (control->end_added)) {
5449 					out_flags |= MSG_EOR;
5450 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5451 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5452 				}
5453 				if (control->spec_flags & M_NOTIFICATION) {
5454 					out_flags |= MSG_NOTIFICATION;
5455 				}
5456 				/* we ate up the mbuf */
5457 				if (in_flags & MSG_PEEK) {
5458 					/* just looking */
5459 					m = SCTP_BUF_NEXT(m);
5460 					copied_so_far += cp_len;
5461 				} else {
5462 					/* dispose of the mbuf */
5463 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5464 						sctp_sblog(&so->so_rcv,
5465 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5466 					}
5467 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5468 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5469 						sctp_sblog(&so->so_rcv,
5470 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5471 					}
5472 					embuf = m;
5473 					copied_so_far += cp_len;
5474 					freed_so_far += cp_len;
5475 					freed_so_far += MSIZE;
5476 					atomic_subtract_int(&control->length, cp_len);
5477 					control->data = sctp_m_free(m);
5478 					m = control->data;
5479 					/*
5480 					 * been through it all, must hold sb
5481 					 * lock ok to null tail
5482 					 */
5483 					if (control->data == NULL) {
5484 #ifdef INVARIANTS
5485 						if ((control->end_added == 0) ||
5486 						    (TAILQ_NEXT(control, next) == NULL)) {
5487 							/*
5488 							 * If the end is not
5489 							 * added, OR the
5490 							 * next is NOT null
5491 							 * we MUST have the
5492 							 * lock.
5493 							 */
5494 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5495 								panic("Hmm we don't own the lock?");
5496 							}
5497 						}
5498 #endif
5499 						control->tail_mbuf = NULL;
5500 #ifdef INVARIANTS
5501 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5502 							panic("end_added, nothing left and no MSG_EOR");
5503 						}
5504 #endif
5505 					}
5506 				}
5507 			} else {
5508 				/* Do we need to trim the mbuf? */
5509 				if (control->spec_flags & M_NOTIFICATION) {
5510 					out_flags |= MSG_NOTIFICATION;
5511 				}
5512 				if ((in_flags & MSG_PEEK) == 0) {
5513 					SCTP_BUF_RESV_UF(m, cp_len);
5514 					SCTP_BUF_LEN(m) -= cp_len;
5515 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5516 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5517 					}
5518 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5519 					if ((control->do_not_ref_stcb == 0) &&
5520 					    stcb) {
5521 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5522 					}
5523 					copied_so_far += cp_len;
5524 					embuf = m;
5525 					freed_so_far += cp_len;
5526 					freed_so_far += MSIZE;
5527 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5528 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5529 						    SCTP_LOG_SBRESULT, 0);
5530 					}
5531 					atomic_subtract_int(&control->length, cp_len);
5532 				} else {
5533 					copied_so_far += cp_len;
5534 				}
5535 			}
5536 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5537 				break;
5538 			}
5539 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5540 			    (control->do_not_ref_stcb == 0) &&
5541 			    (freed_so_far >= rwnd_req)) {
5542 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5543 			}
5544 		}		/* end while(m) */
5545 		/*
5546 		 * At this point we have looked at it all and we either have
5547 		 * a MSG_EOR/or read all the user wants... <OR>
5548 		 * control->length == 0.
5549 		 */
5550 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5551 			/* we are done with this control */
5552 			if (control->length == 0) {
5553 				if (control->data) {
5554 #ifdef INVARIANTS
5555 					panic("control->data not null at read eor?");
5556 #else
5557 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5558 					sctp_m_freem(control->data);
5559 					control->data = NULL;
5560 #endif
5561 				}
5562 		done_with_control:
5563 				if (TAILQ_NEXT(control, next) == NULL) {
5564 					/*
5565 					 * If we don't have a next we need a
5566 					 * lock, if there is a next interupt
5567 					 * is filling ahead of us and we
5568 					 * don't need a lock to remove this
5569 					 * guy (which is the head of the
5570 					 * queue).
5571 					 */
5572 					if (hold_rlock == 0) {
5573 						SCTP_INP_READ_LOCK(inp);
5574 						hold_rlock = 1;
5575 					}
5576 				}
5577 				TAILQ_REMOVE(&inp->read_queue, control, next);
5578 				/* Add back any hiddend data */
5579 				if (control->held_length) {
5580 					held_length = 0;
5581 					control->held_length = 0;
5582 					wakeup_read_socket = 1;
5583 				}
5584 				if (control->aux_data) {
5585 					sctp_m_free(control->aux_data);
5586 					control->aux_data = NULL;
5587 				}
5588 				no_rcv_needed = control->do_not_ref_stcb;
5589 				sctp_free_remote_addr(control->whoFrom);
5590 				control->data = NULL;
5591 				sctp_free_a_readq(stcb, control);
5592 				control = NULL;
5593 				if ((freed_so_far >= rwnd_req) &&
5594 				    (no_rcv_needed == 0))
5595 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5596 
5597 			} else {
5598 				/*
5599 				 * The user did not read all of this
5600 				 * message, turn off the returned MSG_EOR
5601 				 * since we are leaving more behind on the
5602 				 * control to read.
5603 				 */
5604 #ifdef INVARIANTS
5605 				if (control->end_added &&
5606 				    (control->data == NULL) &&
5607 				    (control->tail_mbuf == NULL)) {
5608 					panic("Gak, control->length is corrupt?");
5609 				}
5610 #endif
5611 				no_rcv_needed = control->do_not_ref_stcb;
5612 				out_flags &= ~MSG_EOR;
5613 			}
5614 		}
5615 		if (out_flags & MSG_EOR) {
5616 			goto release;
5617 		}
5618 		if ((uio->uio_resid == 0) ||
5619 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5620 		    ) {
5621 			goto release;
5622 		}
5623 		/*
5624 		 * If I hit here the receiver wants more and this message is
5625 		 * NOT done (pd-api). So two questions. Can we block? if not
5626 		 * we are done. Did the user NOT set MSG_WAITALL?
5627 		 */
5628 		if (block_allowed == 0) {
5629 			goto release;
5630 		}
5631 		/*
5632 		 * We need to wait for more data a few things: - We don't
5633 		 * sbunlock() so we don't get someone else reading. - We
5634 		 * must be sure to account for the case where what is added
5635 		 * is NOT to our control when we wakeup.
5636 		 */
5637 
5638 		/*
5639 		 * Do we need to tell the transport a rwnd update might be
5640 		 * needed before we go to sleep?
5641 		 */
5642 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5643 		    ((freed_so_far >= rwnd_req) &&
5644 		    (control->do_not_ref_stcb == 0) &&
5645 		    (no_rcv_needed == 0))) {
5646 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5647 		}
5648 wait_some_more:
5649 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5650 			goto release;
5651 		}
5652 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5653 			goto release;
5654 
5655 		if (hold_rlock == 1) {
5656 			SCTP_INP_READ_UNLOCK(inp);
5657 			hold_rlock = 0;
5658 		}
5659 		if (hold_sblock == 0) {
5660 			SOCKBUF_LOCK(&so->so_rcv);
5661 			hold_sblock = 1;
5662 		}
5663 		if ((copied_so_far) && (control->length == 0) &&
5664 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5665 		    ) {
5666 			goto release;
5667 		}
5668 		if (so->so_rcv.sb_cc <= control->held_length) {
5669 			error = sbwait(&so->so_rcv);
5670 			if (error) {
5671 				goto release;
5672 			}
5673 			control->held_length = 0;
5674 		}
5675 		if (hold_sblock) {
5676 			SOCKBUF_UNLOCK(&so->so_rcv);
5677 			hold_sblock = 0;
5678 		}
5679 		if (control->length == 0) {
5680 			/* still nothing here */
5681 			if (control->end_added == 1) {
5682 				/* he aborted, or is done i.e.did a shutdown */
5683 				out_flags |= MSG_EOR;
5684 				if (control->pdapi_aborted) {
5685 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5686 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5687 
5688 					out_flags |= MSG_TRUNC;
5689 				} else {
5690 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5691 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5692 				}
5693 				goto done_with_control;
5694 			}
5695 			if (so->so_rcv.sb_cc > held_length) {
5696 				control->held_length = so->so_rcv.sb_cc;
5697 				held_length = 0;
5698 			}
5699 			goto wait_some_more;
5700 		} else if (control->data == NULL) {
5701 			/*
5702 			 * we must re-sync since data is probably being
5703 			 * added
5704 			 */
5705 			SCTP_INP_READ_LOCK(inp);
5706 			if ((control->length > 0) && (control->data == NULL)) {
5707 				/*
5708 				 * big trouble.. we have the lock and its
5709 				 * corrupt?
5710 				 */
5711 				panic("Impossible data==NULL length !=0");
5712 			}
5713 			SCTP_INP_READ_UNLOCK(inp);
5714 			/* We will fall around to get more data */
5715 		}
5716 		goto get_more_data;
5717 	} else {
5718 		/*-
5719 		 * Give caller back the mbuf chain,
5720 		 * store in uio_resid the length
5721 		 */
5722 		wakeup_read_socket = 0;
5723 		if ((control->end_added == 0) ||
5724 		    (TAILQ_NEXT(control, next) == NULL)) {
5725 			/* Need to get rlock */
5726 			if (hold_rlock == 0) {
5727 				SCTP_INP_READ_LOCK(inp);
5728 				hold_rlock = 1;
5729 			}
5730 		}
5731 		if (control->end_added) {
5732 			out_flags |= MSG_EOR;
5733 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5734 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5735 		}
5736 		if (control->spec_flags & M_NOTIFICATION) {
5737 			out_flags |= MSG_NOTIFICATION;
5738 		}
5739 		uio->uio_resid = control->length;
5740 		*mp = control->data;
5741 		m = control->data;
5742 		while (m) {
5743 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5744 				sctp_sblog(&so->so_rcv,
5745 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5746 			}
5747 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5748 			freed_so_far += SCTP_BUF_LEN(m);
5749 			freed_so_far += MSIZE;
5750 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5751 				sctp_sblog(&so->so_rcv,
5752 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5753 			}
5754 			m = SCTP_BUF_NEXT(m);
5755 		}
5756 		control->data = control->tail_mbuf = NULL;
5757 		control->length = 0;
5758 		if (out_flags & MSG_EOR) {
5759 			/* Done with this control */
5760 			goto done_with_control;
5761 		}
5762 	}
5763 release:
5764 	if (hold_rlock == 1) {
5765 		SCTP_INP_READ_UNLOCK(inp);
5766 		hold_rlock = 0;
5767 	}
5768 	if (hold_sblock == 1) {
5769 		SOCKBUF_UNLOCK(&so->so_rcv);
5770 		hold_sblock = 0;
5771 	}
5772 	sbunlock(&so->so_rcv);
5773 	sockbuf_lock = 0;
5774 
5775 release_unlocked:
5776 	if (hold_sblock) {
5777 		SOCKBUF_UNLOCK(&so->so_rcv);
5778 		hold_sblock = 0;
5779 	}
5780 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5781 		if ((freed_so_far >= rwnd_req) &&
5782 		    (control && (control->do_not_ref_stcb == 0)) &&
5783 		    (no_rcv_needed == 0))
5784 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5785 	}
5786 	if (msg_flags)
5787 		*msg_flags |= out_flags;
5788 out:
5789 	if (((out_flags & MSG_EOR) == 0) &&
5790 	    ((in_flags & MSG_PEEK) == 0) &&
5791 	    (sinfo) &&
5792 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5793 		struct sctp_extrcvinfo *s_extra;
5794 
5795 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5796 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5797 	}
5798 	if (hold_rlock == 1) {
5799 		SCTP_INP_READ_UNLOCK(inp);
5800 		hold_rlock = 0;
5801 	}
5802 	if (hold_sblock) {
5803 		SOCKBUF_UNLOCK(&so->so_rcv);
5804 		hold_sblock = 0;
5805 	}
5806 	if (sockbuf_lock) {
5807 		sbunlock(&so->so_rcv);
5808 	}
5809 	if (freecnt_applied) {
5810 		/*
5811 		 * The lock on the socket buffer protects us so the free
5812 		 * code will stop. But since we used the socketbuf lock and
5813 		 * the sender uses the tcb_lock to increment, we need to use
5814 		 * the atomic add to the refcnt.
5815 		 */
5816 		if (stcb == NULL) {
5817 			panic("stcb for refcnt has gone NULL?");
5818 		}
5819 		atomic_add_int(&stcb->asoc.refcnt, -1);
5820 		freecnt_applied = 0;
5821 		/* Save the value back for next time */
5822 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5823 	}
5824 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
5825 		if (stcb) {
5826 			sctp_misc_ints(SCTP_SORECV_DONE,
5827 			    freed_so_far,
5828 			    ((uio) ? (slen - uio->uio_resid) : slen),
5829 			    stcb->asoc.my_rwnd,
5830 			    so->so_rcv.sb_cc);
5831 		} else {
5832 			sctp_misc_ints(SCTP_SORECV_DONE,
5833 			    freed_so_far,
5834 			    ((uio) ? (slen - uio->uio_resid) : slen),
5835 			    0,
5836 			    so->so_rcv.sb_cc);
5837 		}
5838 	}
5839 	if (wakeup_read_socket) {
5840 		sctp_sorwakeup(inp, so);
5841 	}
5842 	return (error);
5843 }
5844 
5845 
5846 #ifdef SCTP_MBUF_LOGGING
5847 struct mbuf *
5848 sctp_m_free(struct mbuf *m)
5849 {
5850 	if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
5851 		if (SCTP_BUF_IS_EXTENDED(m)) {
5852 			sctp_log_mb(m, SCTP_MBUF_IFREE);
5853 		}
5854 	}
5855 	return (m_free(m));
5856 }
5857 
5858 void
5859 sctp_m_freem(struct mbuf *mb)
5860 {
5861 	while (mb != NULL)
5862 		mb = sctp_m_free(mb);
5863 }
5864 
5865 #endif
5866 
5867 int
5868 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5869 {
5870 	/*
5871 	 * Given a local address. For all associations that holds the
5872 	 * address, request a peer-set-primary.
5873 	 */
5874 	struct sctp_ifa *ifa;
5875 	struct sctp_laddr *wi;
5876 
5877 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5878 	if (ifa == NULL) {
5879 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
5880 		return (EADDRNOTAVAIL);
5881 	}
5882 	/*
5883 	 * Now that we have the ifa we must awaken the iterator with this
5884 	 * message.
5885 	 */
5886 	wi = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
5887 	if (wi == NULL) {
5888 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
5889 		return (ENOMEM);
5890 	}
5891 	/* Now incr the count and int wi structure */
5892 	SCTP_INCR_LADDR_COUNT();
5893 	bzero(wi, sizeof(*wi));
5894 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
5895 	wi->ifa = ifa;
5896 	wi->action = SCTP_SET_PRIM_ADDR;
5897 	atomic_add_int(&ifa->refcount, 1);
5898 
5899 	/* Now add it to the work queue */
5900 	SCTP_IPI_ITERATOR_WQ_LOCK();
5901 	/*
5902 	 * Should this really be a tailq? As it is we will process the
5903 	 * newest first :-0
5904 	 */
5905 	LIST_INSERT_HEAD(&sctppcbinfo.addr_wq, wi, sctp_nxt_addr);
5906 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
5907 	    (struct sctp_inpcb *)NULL,
5908 	    (struct sctp_tcb *)NULL,
5909 	    (struct sctp_nets *)NULL);
5910 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
5911 	return (0);
5912 }
5913 
5914 
5915 
5916 
5917 int
5918 sctp_soreceive(struct socket *so,
5919     struct sockaddr **psa,
5920     struct uio *uio,
5921     struct mbuf **mp0,
5922     struct mbuf **controlp,
5923     int *flagsp)
5924 {
5925 	int error, fromlen;
5926 	uint8_t sockbuf[256];
5927 	struct sockaddr *from;
5928 	struct sctp_extrcvinfo sinfo;
5929 	int filling_sinfo = 1;
5930 	struct sctp_inpcb *inp;
5931 
5932 	inp = (struct sctp_inpcb *)so->so_pcb;
5933 	/* pickup the assoc we are reading from */
5934 	if (inp == NULL) {
5935 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5936 		return (EINVAL);
5937 	}
5938 	if ((sctp_is_feature_off(inp,
5939 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
5940 	    (controlp == NULL)) {
5941 		/* user does not want the sndrcv ctl */
5942 		filling_sinfo = 0;
5943 	}
5944 	if (psa) {
5945 		from = (struct sockaddr *)sockbuf;
5946 		fromlen = sizeof(sockbuf);
5947 		from->sa_len = 0;
5948 	} else {
5949 		from = NULL;
5950 		fromlen = 0;
5951 	}
5952 
5953 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
5954 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
5955 	if ((controlp) && (filling_sinfo)) {
5956 		/* copy back the sinfo in a CMSG format */
5957 		if (filling_sinfo)
5958 			*controlp = sctp_build_ctl_nchunk(inp,
5959 			    (struct sctp_sndrcvinfo *)&sinfo);
5960 		else
5961 			*controlp = NULL;
5962 	}
5963 	if (psa) {
5964 		/* copy back the address info */
5965 		if (from && from->sa_len) {
5966 			*psa = sodupsockaddr(from, M_NOWAIT);
5967 		} else {
5968 			*psa = NULL;
5969 		}
5970 	}
5971 	return (error);
5972 }
5973 
5974 
5975 int
5976 sctp_l_soreceive(struct socket *so,
5977     struct sockaddr **name,
5978     struct uio *uio,
5979     char **controlp,
5980     int *controllen,
5981     int *flag)
5982 {
5983 	int error, fromlen;
5984 	uint8_t sockbuf[256];
5985 	struct sockaddr *from;
5986 	struct sctp_extrcvinfo sinfo;
5987 	int filling_sinfo = 1;
5988 	struct sctp_inpcb *inp;
5989 
5990 	inp = (struct sctp_inpcb *)so->so_pcb;
5991 	/* pickup the assoc we are reading from */
5992 	if (inp == NULL) {
5993 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5994 		return (EINVAL);
5995 	}
5996 	if ((sctp_is_feature_off(inp,
5997 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
5998 	    (controlp == NULL)) {
5999 		/* user does not want the sndrcv ctl */
6000 		filling_sinfo = 0;
6001 	}
6002 	if (name) {
6003 		from = (struct sockaddr *)sockbuf;
6004 		fromlen = sizeof(sockbuf);
6005 		from->sa_len = 0;
6006 	} else {
6007 		from = NULL;
6008 		fromlen = 0;
6009 	}
6010 
6011 	error = sctp_sorecvmsg(so, uio,
6012 	    (struct mbuf **)NULL,
6013 	    from, fromlen, flag,
6014 	    (struct sctp_sndrcvinfo *)&sinfo,
6015 	    filling_sinfo);
6016 	if ((controlp) && (filling_sinfo)) {
6017 		/*
6018 		 * copy back the sinfo in a CMSG format note that the caller
6019 		 * has reponsibility for freeing the memory.
6020 		 */
6021 		if (filling_sinfo)
6022 			*controlp = sctp_build_ctl_cchunk(inp,
6023 			    controllen,
6024 			    (struct sctp_sndrcvinfo *)&sinfo);
6025 	}
6026 	if (name) {
6027 		/* copy back the address info */
6028 		if (from && from->sa_len) {
6029 			*name = sodupsockaddr(from, M_WAIT);
6030 		} else {
6031 			*name = NULL;
6032 		}
6033 	}
6034 	return (error);
6035 }
6036 
6037 
6038 
6039 
6040 
6041 
6042 
6043 int
6044 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6045     int totaddr, int *error)
6046 {
6047 	int added = 0;
6048 	int i;
6049 	struct sctp_inpcb *inp;
6050 	struct sockaddr *sa;
6051 	size_t incr = 0;
6052 
6053 	sa = addr;
6054 	inp = stcb->sctp_ep;
6055 	*error = 0;
6056 	for (i = 0; i < totaddr; i++) {
6057 		if (sa->sa_family == AF_INET) {
6058 			incr = sizeof(struct sockaddr_in);
6059 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6060 				/* assoc gone no un-lock */
6061 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6062 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6063 				*error = ENOBUFS;
6064 				goto out_now;
6065 			}
6066 			added++;
6067 		} else if (sa->sa_family == AF_INET6) {
6068 			incr = sizeof(struct sockaddr_in6);
6069 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6070 				/* assoc gone no un-lock */
6071 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6072 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6073 				*error = ENOBUFS;
6074 				goto out_now;
6075 			}
6076 			added++;
6077 		}
6078 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6079 	}
6080 out_now:
6081 	return (added);
6082 }
6083 
6084 struct sctp_tcb *
6085 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6086     int *totaddr, int *num_v4, int *num_v6, int *error,
6087     int limit, int *bad_addr)
6088 {
6089 	struct sockaddr *sa;
6090 	struct sctp_tcb *stcb = NULL;
6091 	size_t incr, at, i;
6092 
6093 	at = incr = 0;
6094 	sa = addr;
6095 	*error = *num_v6 = *num_v4 = 0;
6096 	/* account and validate addresses */
6097 	for (i = 0; i < (size_t)*totaddr; i++) {
6098 		if (sa->sa_family == AF_INET) {
6099 			(*num_v4) += 1;
6100 			incr = sizeof(struct sockaddr_in);
6101 			if (sa->sa_len != incr) {
6102 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6103 				*error = EINVAL;
6104 				*bad_addr = 1;
6105 				return (NULL);
6106 			}
6107 		} else if (sa->sa_family == AF_INET6) {
6108 			struct sockaddr_in6 *sin6;
6109 
6110 			sin6 = (struct sockaddr_in6 *)sa;
6111 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6112 				/* Must be non-mapped for connectx */
6113 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6114 				*error = EINVAL;
6115 				*bad_addr = 1;
6116 				return (NULL);
6117 			}
6118 			(*num_v6) += 1;
6119 			incr = sizeof(struct sockaddr_in6);
6120 			if (sa->sa_len != incr) {
6121 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6122 				*error = EINVAL;
6123 				*bad_addr = 1;
6124 				return (NULL);
6125 			}
6126 		} else {
6127 			*totaddr = i;
6128 			/* we are done */
6129 			break;
6130 		}
6131 		SCTP_INP_INCR_REF(inp);
6132 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6133 		if (stcb != NULL) {
6134 			/* Already have or am bring up an association */
6135 			return (stcb);
6136 		} else {
6137 			SCTP_INP_DECR_REF(inp);
6138 		}
6139 		if ((at + incr) > (size_t)limit) {
6140 			*totaddr = i;
6141 			break;
6142 		}
6143 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6144 	}
6145 	return ((struct sctp_tcb *)NULL);
6146 }
6147 
6148 /*
6149  * sctp_bindx(ADD) for one address.
6150  * assumes all arguments are valid/checked by caller.
6151  */
6152 void
6153 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6154     struct sockaddr *sa, sctp_assoc_t assoc_id,
6155     uint32_t vrf_id, int *error, void *p)
6156 {
6157 	struct sockaddr *addr_touse;
6158 	struct sockaddr_in sin;
6159 
6160 	/* see if we're bound all already! */
6161 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6162 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6163 		*error = EINVAL;
6164 		return;
6165 	}
6166 	addr_touse = sa;
6167 #if defined(INET6)
6168 	if (sa->sa_family == AF_INET6) {
6169 		struct sockaddr_in6 *sin6;
6170 
6171 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6172 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6173 			*error = EINVAL;
6174 			return;
6175 		}
6176 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6177 			/* can only bind v6 on PF_INET6 sockets */
6178 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6179 			*error = EINVAL;
6180 			return;
6181 		}
6182 		sin6 = (struct sockaddr_in6 *)addr_touse;
6183 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6184 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6185 			    SCTP_IPV6_V6ONLY(inp)) {
6186 				/* can't bind v4-mapped on PF_INET sockets */
6187 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6188 				*error = EINVAL;
6189 				return;
6190 			}
6191 			in6_sin6_2_sin(&sin, sin6);
6192 			addr_touse = (struct sockaddr *)&sin;
6193 		}
6194 	}
6195 #endif
6196 	if (sa->sa_family == AF_INET) {
6197 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6198 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6199 			*error = EINVAL;
6200 			return;
6201 		}
6202 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6203 		    SCTP_IPV6_V6ONLY(inp)) {
6204 			/* can't bind v4 on PF_INET sockets */
6205 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6206 			*error = EINVAL;
6207 			return;
6208 		}
6209 	}
6210 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6211 		if (p == NULL) {
6212 			/* Can't get proc for Net/Open BSD */
6213 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6214 			*error = EINVAL;
6215 			return;
6216 		}
6217 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6218 		return;
6219 	}
6220 	/*
6221 	 * No locks required here since bind and mgmt_ep_sa all do their own
6222 	 * locking. If we do something for the FIX: below we may need to
6223 	 * lock in that case.
6224 	 */
6225 	if (assoc_id == 0) {
6226 		/* add the address */
6227 		struct sctp_inpcb *lep;
6228 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6229 
6230 		/* validate the incoming port */
6231 		if ((lsin->sin_port != 0) &&
6232 		    (lsin->sin_port != inp->sctp_lport)) {
6233 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6234 			*error = EINVAL;
6235 			return;
6236 		} else {
6237 			/* user specified 0 port, set it to existing port */
6238 			lsin->sin_port = inp->sctp_lport;
6239 		}
6240 
6241 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6242 		if (lep != NULL) {
6243 			/*
6244 			 * We must decrement the refcount since we have the
6245 			 * ep already and are binding. No remove going on
6246 			 * here.
6247 			 */
6248 			SCTP_INP_DECR_REF(inp);
6249 		}
6250 		if (lep == inp) {
6251 			/* already bound to it.. ok */
6252 			return;
6253 		} else if (lep == NULL) {
6254 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6255 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6256 			    SCTP_ADD_IP_ADDRESS,
6257 			    vrf_id, NULL);
6258 		} else {
6259 			*error = EADDRINUSE;
6260 		}
6261 		if (*error)
6262 			return;
6263 	} else {
6264 		/*
6265 		 * FIX: decide whether we allow assoc based bindx
6266 		 */
6267 	}
6268 }
6269 
6270 /*
6271  * sctp_bindx(DELETE) for one address.
6272  * assumes all arguments are valid/checked by caller.
6273  */
6274 void
6275 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6276     struct sockaddr *sa, sctp_assoc_t assoc_id,
6277     uint32_t vrf_id, int *error)
6278 {
6279 	struct sockaddr *addr_touse;
6280 	struct sockaddr_in sin;
6281 
6282 	/* see if we're bound all already! */
6283 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6284 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6285 		*error = EINVAL;
6286 		return;
6287 	}
6288 	addr_touse = sa;
6289 #if defined(INET6)
6290 	if (sa->sa_family == AF_INET6) {
6291 		struct sockaddr_in6 *sin6;
6292 
6293 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6294 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6295 			*error = EINVAL;
6296 			return;
6297 		}
6298 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6299 			/* can only bind v6 on PF_INET6 sockets */
6300 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6301 			*error = EINVAL;
6302 			return;
6303 		}
6304 		sin6 = (struct sockaddr_in6 *)addr_touse;
6305 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6306 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6307 			    SCTP_IPV6_V6ONLY(inp)) {
6308 				/* can't bind mapped-v4 on PF_INET sockets */
6309 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6310 				*error = EINVAL;
6311 				return;
6312 			}
6313 			in6_sin6_2_sin(&sin, sin6);
6314 			addr_touse = (struct sockaddr *)&sin;
6315 		}
6316 	}
6317 #endif
6318 	if (sa->sa_family == AF_INET) {
6319 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6320 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6321 			*error = EINVAL;
6322 			return;
6323 		}
6324 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6325 		    SCTP_IPV6_V6ONLY(inp)) {
6326 			/* can't bind v4 on PF_INET sockets */
6327 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6328 			*error = EINVAL;
6329 			return;
6330 		}
6331 	}
6332 	/*
6333 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6334 	 * below is ever changed we may need to lock before calling
6335 	 * association level binding.
6336 	 */
6337 	if (assoc_id == 0) {
6338 		/* delete the address */
6339 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6340 		    SCTP_DEL_IP_ADDRESS,
6341 		    vrf_id, NULL);
6342 	} else {
6343 		/*
6344 		 * FIX: decide whether we allow assoc based bindx
6345 		 */
6346 	}
6347 }
6348 
6349 /*
6350  * returns the valid local address count for an assoc, taking into account
6351  * all scoping rules
6352  */
6353 int
6354 sctp_local_addr_count(struct sctp_tcb *stcb)
6355 {
6356 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6357 	int ipv4_addr_legal, ipv6_addr_legal;
6358 	struct sctp_vrf *vrf;
6359 	struct sctp_ifn *sctp_ifn;
6360 	struct sctp_ifa *sctp_ifa;
6361 	int count = 0;
6362 
6363 	/* Turn on all the appropriate scopes */
6364 	loopback_scope = stcb->asoc.loopback_scope;
6365 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6366 	local_scope = stcb->asoc.local_scope;
6367 	site_scope = stcb->asoc.site_scope;
6368 	ipv4_addr_legal = ipv6_addr_legal = 0;
6369 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6370 		ipv6_addr_legal = 1;
6371 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6372 			ipv4_addr_legal = 1;
6373 		}
6374 	} else {
6375 		ipv4_addr_legal = 1;
6376 	}
6377 
6378 	SCTP_IPI_ADDR_LOCK();
6379 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6380 	if (vrf == NULL) {
6381 		/* no vrf, no addresses */
6382 		SCTP_IPI_ADDR_UNLOCK();
6383 		return (0);
6384 	}
6385 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6386 		/*
6387 		 * bound all case: go through all ifns on the vrf
6388 		 */
6389 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6390 			if ((loopback_scope == 0) &&
6391 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6392 				continue;
6393 			}
6394 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6395 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6396 					continue;
6397 
6398 				if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
6399 				    (ipv4_addr_legal)) {
6400 					struct sockaddr_in *sin;
6401 
6402 					sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6403 					if (sin->sin_addr.s_addr == 0) {
6404 						/* skip unspecified addrs */
6405 						continue;
6406 					}
6407 					if ((ipv4_local_scope == 0) &&
6408 					    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6409 						continue;
6410 					}
6411 					/* count this one */
6412 					count++;
6413 				} else if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
6414 				    (ipv6_addr_legal)) {
6415 					struct sockaddr_in6 *sin6;
6416 
6417 					sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6418 					if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6419 						continue;
6420 					}
6421 					if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6422 						if (local_scope == 0)
6423 							continue;
6424 						if (sin6->sin6_scope_id == 0) {
6425 							if (sa6_recoverscope(sin6) != 0)
6426 								/*
6427 								 * bad link
6428 								 * local
6429 								 * address
6430 								 */
6431 								continue;
6432 						}
6433 					}
6434 					if ((site_scope == 0) &&
6435 					    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6436 						continue;
6437 					}
6438 					/* count this one */
6439 					count++;
6440 				}
6441 			}
6442 		}
6443 	} else {
6444 		/*
6445 		 * subset bound case
6446 		 */
6447 		struct sctp_laddr *laddr;
6448 
6449 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6450 		    sctp_nxt_addr) {
6451 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6452 				continue;
6453 			}
6454 			/* count this one */
6455 			count++;
6456 		}
6457 	}
6458 	SCTP_IPI_ADDR_UNLOCK();
6459 	return (count);
6460 }
6461 
6462 #if defined(SCTP_LOCAL_TRACE_BUF)
6463 
6464 struct sctp_dump_log {
6465 	u_int64_t timestamp;
6466 	const char *descr;
6467 	uint32_t subsys;
6468 	uint32_t params[SCTP_TRACE_PARAMS];
6469 };
6470 int sctp_log_index = 0;
6471 struct sctp_dump_log sctp_log[SCTP_MAX_LOGGING_SIZE];
6472 
6473 void
6474 sctp_log_trace(uint32_t subsys, const char *str, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6475 {
6476 	int saveindex, newindex;
6477 
6478 	do {
6479 		saveindex = sctp_log_index;
6480 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6481 			newindex = 1;
6482 		} else {
6483 			newindex = saveindex + 1;
6484 		}
6485 	} while (atomic_cmpset_int(&sctp_log_index, saveindex, newindex) == 0);
6486 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6487 		saveindex = 0;
6488 	}
6489 	sctp_log[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6490 	sctp_log[saveindex].subsys = subsys;
6491 	sctp_log[saveindex].descr = str;
6492 	sctp_log[saveindex].params[0] = a;
6493 	sctp_log[saveindex].params[1] = b;
6494 	sctp_log[saveindex].params[2] = c;
6495 	sctp_log[saveindex].params[3] = d;
6496 	sctp_log[saveindex].params[4] = e;
6497 	sctp_log[saveindex].params[5] = f;
6498 }
6499 
6500 #endif
6501