xref: /freebsd/sys/netinet/sctputil.c (revision d876124d6ae9d56da5b4ff4c6015efd1d0c9222a)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_crc32.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_cc_functions.h>
53 
54 #define NUMBER_OF_MTU_SIZES 18
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 void
62 sctp_sblog(struct sockbuf *sb,
63     struct sctp_tcb *stcb, int from, int incr)
64 {
65 	struct sctp_cwnd_log sctp_clog;
66 
67 	sctp_clog.x.sb.stcb = stcb;
68 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
69 	if (stcb)
70 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
71 	else
72 		sctp_clog.x.sb.stcb_sbcc = 0;
73 	sctp_clog.x.sb.incr = incr;
74 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
75 	    SCTP_LOG_EVENT_SB,
76 	    from,
77 	    sctp_clog.x.misc.log1,
78 	    sctp_clog.x.misc.log2,
79 	    sctp_clog.x.misc.log3,
80 	    sctp_clog.x.misc.log4);
81 }
82 
83 void
84 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
85 {
86 	struct sctp_cwnd_log sctp_clog;
87 
88 	sctp_clog.x.close.inp = (void *)inp;
89 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
90 	if (stcb) {
91 		sctp_clog.x.close.stcb = (void *)stcb;
92 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
93 	} else {
94 		sctp_clog.x.close.stcb = 0;
95 		sctp_clog.x.close.state = 0;
96 	}
97 	sctp_clog.x.close.loc = loc;
98 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
99 	    SCTP_LOG_EVENT_CLOSE,
100 	    0,
101 	    sctp_clog.x.misc.log1,
102 	    sctp_clog.x.misc.log2,
103 	    sctp_clog.x.misc.log3,
104 	    sctp_clog.x.misc.log4);
105 }
106 
107 
108 void
109 rto_logging(struct sctp_nets *net, int from)
110 {
111 	struct sctp_cwnd_log sctp_clog;
112 
113 	sctp_clog.x.rto.net = (void *)net;
114 	sctp_clog.x.rto.rtt = net->prev_rtt;
115 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 	    SCTP_LOG_EVENT_RTT,
117 	    from,
118 	    sctp_clog.x.misc.log1,
119 	    sctp_clog.x.misc.log2,
120 	    sctp_clog.x.misc.log3,
121 	    sctp_clog.x.misc.log4);
122 
123 }
124 
125 void
126 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
127 {
128 	struct sctp_cwnd_log sctp_clog;
129 
130 	sctp_clog.x.strlog.stcb = stcb;
131 	sctp_clog.x.strlog.n_tsn = tsn;
132 	sctp_clog.x.strlog.n_sseq = sseq;
133 	sctp_clog.x.strlog.e_tsn = 0;
134 	sctp_clog.x.strlog.e_sseq = 0;
135 	sctp_clog.x.strlog.strm = stream;
136 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
137 	    SCTP_LOG_EVENT_STRM,
138 	    from,
139 	    sctp_clog.x.misc.log1,
140 	    sctp_clog.x.misc.log2,
141 	    sctp_clog.x.misc.log3,
142 	    sctp_clog.x.misc.log4);
143 
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 	struct sctp_cwnd_log sctp_clog;
170 
171 	sctp_clog.x.sack.cumack = cumack;
172 	sctp_clog.x.sack.oldcumack = old_cumack;
173 	sctp_clog.x.sack.tsn = tsn;
174 	sctp_clog.x.sack.numGaps = gaps;
175 	sctp_clog.x.sack.numDups = dups;
176 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 	    SCTP_LOG_EVENT_SACK,
178 	    from,
179 	    sctp_clog.x.misc.log1,
180 	    sctp_clog.x.misc.log2,
181 	    sctp_clog.x.misc.log3,
182 	    sctp_clog.x.misc.log4);
183 }
184 
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
204     int from)
205 {
206 	struct sctp_cwnd_log sctp_clog;
207 
208 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
209 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
210 	sctp_clog.x.fr.tsn = tsn;
211 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
212 	    SCTP_LOG_EVENT_FR,
213 	    from,
214 	    sctp_clog.x.misc.log1,
215 	    sctp_clog.x.misc.log2,
216 	    sctp_clog.x.misc.log3,
217 	    sctp_clog.x.misc.log4);
218 
219 }
220 
221 
222 void
223 sctp_log_mb(struct mbuf *m, int from)
224 {
225 	struct sctp_cwnd_log sctp_clog;
226 
227 	sctp_clog.x.mb.mp = m;
228 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
229 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
230 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
231 	if (SCTP_BUF_IS_EXTENDED(m)) {
232 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
233 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
234 	} else {
235 		sctp_clog.x.mb.ext = 0;
236 		sctp_clog.x.mb.refcnt = 0;
237 	}
238 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
239 	    SCTP_LOG_EVENT_MBUF,
240 	    from,
241 	    sctp_clog.x.misc.log1,
242 	    sctp_clog.x.misc.log2,
243 	    sctp_clog.x.misc.log3,
244 	    sctp_clog.x.misc.log4);
245 }
246 
247 
248 void
249 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
250     int from)
251 {
252 	struct sctp_cwnd_log sctp_clog;
253 
254 	if (control == NULL) {
255 		SCTP_PRINTF("Gak log of NULL?\n");
256 		return;
257 	}
258 	sctp_clog.x.strlog.stcb = control->stcb;
259 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
260 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
261 	sctp_clog.x.strlog.strm = control->sinfo_stream;
262 	if (poschk != NULL) {
263 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
264 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
265 	} else {
266 		sctp_clog.x.strlog.e_tsn = 0;
267 		sctp_clog.x.strlog.e_sseq = 0;
268 	}
269 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
270 	    SCTP_LOG_EVENT_STRM,
271 	    from,
272 	    sctp_clog.x.misc.log1,
273 	    sctp_clog.x.misc.log2,
274 	    sctp_clog.x.misc.log3,
275 	    sctp_clog.x.misc.log4);
276 
277 }
278 
279 void
280 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
281 {
282 	struct sctp_cwnd_log sctp_clog;
283 
284 	sctp_clog.x.cwnd.net = net;
285 	if (stcb->asoc.send_queue_cnt > 255)
286 		sctp_clog.x.cwnd.cnt_in_send = 255;
287 	else
288 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
289 	if (stcb->asoc.stream_queue_cnt > 255)
290 		sctp_clog.x.cwnd.cnt_in_str = 255;
291 	else
292 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
293 
294 	if (net) {
295 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
296 		sctp_clog.x.cwnd.inflight = net->flight_size;
297 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
298 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
299 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
300 	}
301 	if (SCTP_CWNDLOG_PRESEND == from) {
302 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
303 	}
304 	sctp_clog.x.cwnd.cwnd_augment = augment;
305 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
306 	    SCTP_LOG_EVENT_CWND,
307 	    from,
308 	    sctp_clog.x.misc.log1,
309 	    sctp_clog.x.misc.log2,
310 	    sctp_clog.x.misc.log3,
311 	    sctp_clog.x.misc.log4);
312 
313 }
314 
315 void
316 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
317 {
318 	struct sctp_cwnd_log sctp_clog;
319 
320 	if (inp) {
321 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
322 
323 	} else {
324 		sctp_clog.x.lock.sock = (void *)NULL;
325 	}
326 	sctp_clog.x.lock.inp = (void *)inp;
327 	if (stcb) {
328 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
329 	} else {
330 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
331 	}
332 	if (inp) {
333 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
334 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
335 	} else {
336 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
337 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
338 	}
339 	sctp_clog.x.lock.info_lock = rw_wowned(&sctppcbinfo.ipi_ep_mtx);
340 	if (inp->sctp_socket) {
341 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
342 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
343 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
344 	} else {
345 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
346 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
347 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
348 	}
349 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
350 	    SCTP_LOG_LOCK_EVENT,
351 	    from,
352 	    sctp_clog.x.misc.log1,
353 	    sctp_clog.x.misc.log2,
354 	    sctp_clog.x.misc.log3,
355 	    sctp_clog.x.misc.log4);
356 
357 }
358 
359 void
360 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
361 {
362 	struct sctp_cwnd_log sctp_clog;
363 
364 	sctp_clog.x.cwnd.net = net;
365 	sctp_clog.x.cwnd.cwnd_new_value = error;
366 	sctp_clog.x.cwnd.inflight = net->flight_size;
367 	sctp_clog.x.cwnd.cwnd_augment = burst;
368 	if (stcb->asoc.send_queue_cnt > 255)
369 		sctp_clog.x.cwnd.cnt_in_send = 255;
370 	else
371 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
372 	if (stcb->asoc.stream_queue_cnt > 255)
373 		sctp_clog.x.cwnd.cnt_in_str = 255;
374 	else
375 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
376 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
377 	    SCTP_LOG_EVENT_MAXBURST,
378 	    from,
379 	    sctp_clog.x.misc.log1,
380 	    sctp_clog.x.misc.log2,
381 	    sctp_clog.x.misc.log3,
382 	    sctp_clog.x.misc.log4);
383 
384 }
385 
386 void
387 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
388 {
389 	struct sctp_cwnd_log sctp_clog;
390 
391 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
392 	sctp_clog.x.rwnd.send_size = snd_size;
393 	sctp_clog.x.rwnd.overhead = overhead;
394 	sctp_clog.x.rwnd.new_rwnd = 0;
395 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
396 	    SCTP_LOG_EVENT_RWND,
397 	    from,
398 	    sctp_clog.x.misc.log1,
399 	    sctp_clog.x.misc.log2,
400 	    sctp_clog.x.misc.log3,
401 	    sctp_clog.x.misc.log4);
402 }
403 
404 void
405 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
406 {
407 	struct sctp_cwnd_log sctp_clog;
408 
409 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
410 	sctp_clog.x.rwnd.send_size = flight_size;
411 	sctp_clog.x.rwnd.overhead = overhead;
412 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
413 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
414 	    SCTP_LOG_EVENT_RWND,
415 	    from,
416 	    sctp_clog.x.misc.log1,
417 	    sctp_clog.x.misc.log2,
418 	    sctp_clog.x.misc.log3,
419 	    sctp_clog.x.misc.log4);
420 }
421 
422 void
423 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
424 {
425 	struct sctp_cwnd_log sctp_clog;
426 
427 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
428 	sctp_clog.x.mbcnt.size_change = book;
429 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
430 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
431 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
432 	    SCTP_LOG_EVENT_MBCNT,
433 	    from,
434 	    sctp_clog.x.misc.log1,
435 	    sctp_clog.x.misc.log2,
436 	    sctp_clog.x.misc.log3,
437 	    sctp_clog.x.misc.log4);
438 
439 }
440 
441 void
442 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
443 {
444 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
445 	    SCTP_LOG_MISC_EVENT,
446 	    from,
447 	    a, b, c, d);
448 }
449 
450 void
451 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
452 {
453 	struct sctp_cwnd_log sctp_clog;
454 
455 	sctp_clog.x.wake.stcb = (void *)stcb;
456 	sctp_clog.x.wake.wake_cnt = wake_cnt;
457 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
458 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
459 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
460 
461 	if (stcb->asoc.stream_queue_cnt < 0xff)
462 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
463 	else
464 		sctp_clog.x.wake.stream_qcnt = 0xff;
465 
466 	if (stcb->asoc.chunks_on_out_queue < 0xff)
467 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
468 	else
469 		sctp_clog.x.wake.chunks_on_oque = 0xff;
470 
471 	sctp_clog.x.wake.sctpflags = 0;
472 	/* set in the defered mode stuff */
473 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
474 		sctp_clog.x.wake.sctpflags |= 1;
475 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
476 		sctp_clog.x.wake.sctpflags |= 2;
477 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
478 		sctp_clog.x.wake.sctpflags |= 4;
479 	/* what about the sb */
480 	if (stcb->sctp_socket) {
481 		struct socket *so = stcb->sctp_socket;
482 
483 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
484 	} else {
485 		sctp_clog.x.wake.sbflags = 0xff;
486 	}
487 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
488 	    SCTP_LOG_EVENT_WAKE,
489 	    from,
490 	    sctp_clog.x.misc.log1,
491 	    sctp_clog.x.misc.log2,
492 	    sctp_clog.x.misc.log3,
493 	    sctp_clog.x.misc.log4);
494 
495 }
496 
497 void
498 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
499 {
500 	struct sctp_cwnd_log sctp_clog;
501 
502 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
503 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
504 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
505 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
506 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
507 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
508 	sctp_clog.x.blk.sndlen = sendlen;
509 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
510 	    SCTP_LOG_EVENT_BLOCK,
511 	    from,
512 	    sctp_clog.x.misc.log1,
513 	    sctp_clog.x.misc.log2,
514 	    sctp_clog.x.misc.log3,
515 	    sctp_clog.x.misc.log4);
516 
517 }
518 
519 int
520 sctp_fill_stat_log(void *optval, size_t *optsize)
521 {
522 	/* May need to fix this if ktrdump does not work */
523 	return (0);
524 }
525 
526 #ifdef SCTP_AUDITING_ENABLED
527 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
528 static int sctp_audit_indx = 0;
529 
530 static
531 void
532 sctp_print_audit_report(void)
533 {
534 	int i;
535 	int cnt;
536 
537 	cnt = 0;
538 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
539 		if ((sctp_audit_data[i][0] == 0xe0) &&
540 		    (sctp_audit_data[i][1] == 0x01)) {
541 			cnt = 0;
542 			SCTP_PRINTF("\n");
543 		} else if (sctp_audit_data[i][0] == 0xf0) {
544 			cnt = 0;
545 			SCTP_PRINTF("\n");
546 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
547 		    (sctp_audit_data[i][1] == 0x01)) {
548 			SCTP_PRINTF("\n");
549 			cnt = 0;
550 		}
551 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
552 		    (uint32_t) sctp_audit_data[i][1]);
553 		cnt++;
554 		if ((cnt % 14) == 0)
555 			SCTP_PRINTF("\n");
556 	}
557 	for (i = 0; i < sctp_audit_indx; i++) {
558 		if ((sctp_audit_data[i][0] == 0xe0) &&
559 		    (sctp_audit_data[i][1] == 0x01)) {
560 			cnt = 0;
561 			SCTP_PRINTF("\n");
562 		} else if (sctp_audit_data[i][0] == 0xf0) {
563 			cnt = 0;
564 			SCTP_PRINTF("\n");
565 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
566 		    (sctp_audit_data[i][1] == 0x01)) {
567 			SCTP_PRINTF("\n");
568 			cnt = 0;
569 		}
570 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
571 		    (uint32_t) sctp_audit_data[i][1]);
572 		cnt++;
573 		if ((cnt % 14) == 0)
574 			SCTP_PRINTF("\n");
575 	}
576 	SCTP_PRINTF("\n");
577 }
578 
579 void
580 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
581     struct sctp_nets *net)
582 {
583 	int resend_cnt, tot_out, rep, tot_book_cnt;
584 	struct sctp_nets *lnet;
585 	struct sctp_tmit_chunk *chk;
586 
587 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
588 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
589 	sctp_audit_indx++;
590 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
591 		sctp_audit_indx = 0;
592 	}
593 	if (inp == NULL) {
594 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
595 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
596 		sctp_audit_indx++;
597 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 			sctp_audit_indx = 0;
599 		}
600 		return;
601 	}
602 	if (stcb == NULL) {
603 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
604 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
605 		sctp_audit_indx++;
606 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 			sctp_audit_indx = 0;
608 		}
609 		return;
610 	}
611 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
612 	sctp_audit_data[sctp_audit_indx][1] =
613 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
614 	sctp_audit_indx++;
615 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
616 		sctp_audit_indx = 0;
617 	}
618 	rep = 0;
619 	tot_book_cnt = 0;
620 	resend_cnt = tot_out = 0;
621 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
622 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
623 			resend_cnt++;
624 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
625 			tot_out += chk->book_size;
626 			tot_book_cnt++;
627 		}
628 	}
629 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
630 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
631 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
632 		sctp_audit_indx++;
633 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
634 			sctp_audit_indx = 0;
635 		}
636 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
637 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
638 		rep = 1;
639 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
640 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
641 		sctp_audit_data[sctp_audit_indx][1] =
642 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
643 		sctp_audit_indx++;
644 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
645 			sctp_audit_indx = 0;
646 		}
647 	}
648 	if (tot_out != stcb->asoc.total_flight) {
649 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
650 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
651 		sctp_audit_indx++;
652 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
653 			sctp_audit_indx = 0;
654 		}
655 		rep = 1;
656 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
657 		    (int)stcb->asoc.total_flight);
658 		stcb->asoc.total_flight = tot_out;
659 	}
660 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
661 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
662 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
663 		sctp_audit_indx++;
664 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
665 			sctp_audit_indx = 0;
666 		}
667 		rep = 1;
668 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
669 
670 		stcb->asoc.total_flight_count = tot_book_cnt;
671 	}
672 	tot_out = 0;
673 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
674 		tot_out += lnet->flight_size;
675 	}
676 	if (tot_out != stcb->asoc.total_flight) {
677 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
678 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
679 		sctp_audit_indx++;
680 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
681 			sctp_audit_indx = 0;
682 		}
683 		rep = 1;
684 		SCTP_PRINTF("real flight:%d net total was %d\n",
685 		    stcb->asoc.total_flight, tot_out);
686 		/* now corrective action */
687 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
688 
689 			tot_out = 0;
690 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
691 				if ((chk->whoTo == lnet) &&
692 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
693 					tot_out += chk->book_size;
694 				}
695 			}
696 			if (lnet->flight_size != tot_out) {
697 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
698 				    (uint32_t) lnet, lnet->flight_size,
699 				    tot_out);
700 				lnet->flight_size = tot_out;
701 			}
702 		}
703 	}
704 	if (rep) {
705 		sctp_print_audit_report();
706 	}
707 }
708 
709 void
710 sctp_audit_log(uint8_t ev, uint8_t fd)
711 {
712 
713 	sctp_audit_data[sctp_audit_indx][0] = ev;
714 	sctp_audit_data[sctp_audit_indx][1] = fd;
715 	sctp_audit_indx++;
716 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
717 		sctp_audit_indx = 0;
718 	}
719 }
720 
721 #endif
722 
723 /*
724  * a list of sizes based on typical mtu's, used only if next hop size not
725  * returned.
726  */
727 static int sctp_mtu_sizes[] = {
728 	68,
729 	296,
730 	508,
731 	512,
732 	544,
733 	576,
734 	1006,
735 	1492,
736 	1500,
737 	1536,
738 	2002,
739 	2048,
740 	4352,
741 	4464,
742 	8166,
743 	17914,
744 	32000,
745 	65535
746 };
747 
748 void
749 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
750 {
751 	struct sctp_association *asoc;
752 	struct sctp_nets *net;
753 
754 	asoc = &stcb->asoc;
755 
756 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
757 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
758 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
759 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
760 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
761 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
762 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
763 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
764 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
765 	}
766 }
767 
768 int
769 find_next_best_mtu(int totsz)
770 {
771 	int i, perfer;
772 
773 	/*
774 	 * if we are in here we must find the next best fit based on the
775 	 * size of the dg that failed to be sent.
776 	 */
777 	perfer = 0;
778 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
779 		if (totsz < sctp_mtu_sizes[i]) {
780 			perfer = i - 1;
781 			if (perfer < 0)
782 				perfer = 0;
783 			break;
784 		}
785 	}
786 	return (sctp_mtu_sizes[perfer]);
787 }
788 
789 void
790 sctp_fill_random_store(struct sctp_pcb *m)
791 {
792 	/*
793 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
794 	 * our counter. The result becomes our good random numbers and we
795 	 * then setup to give these out. Note that we do no locking to
796 	 * protect this. This is ok, since if competing folks call this we
797 	 * will get more gobbled gook in the random store which is what we
798 	 * want. There is a danger that two guys will use the same random
799 	 * numbers, but thats ok too since that is random as well :->
800 	 */
801 	m->store_at = 0;
802 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
803 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
804 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
805 	m->random_counter++;
806 }
807 
808 uint32_t
809 sctp_select_initial_TSN(struct sctp_pcb *inp)
810 {
811 	/*
812 	 * A true implementation should use random selection process to get
813 	 * the initial stream sequence number, using RFC1750 as a good
814 	 * guideline
815 	 */
816 	uint32_t x, *xp;
817 	uint8_t *p;
818 	int store_at, new_store;
819 
820 	if (inp->initial_sequence_debug != 0) {
821 		uint32_t ret;
822 
823 		ret = inp->initial_sequence_debug;
824 		inp->initial_sequence_debug++;
825 		return (ret);
826 	}
827 retry:
828 	store_at = inp->store_at;
829 	new_store = store_at + sizeof(uint32_t);
830 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
831 		new_store = 0;
832 	}
833 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
834 		goto retry;
835 	}
836 	if (new_store == 0) {
837 		/* Refill the random store */
838 		sctp_fill_random_store(inp);
839 	}
840 	p = &inp->random_store[store_at];
841 	xp = (uint32_t *) p;
842 	x = *xp;
843 	return (x);
844 }
845 
846 uint32_t
847 sctp_select_a_tag(struct sctp_inpcb *inp, int save_in_twait)
848 {
849 	u_long x, not_done;
850 	struct timeval now;
851 
852 	(void)SCTP_GETTIME_TIMEVAL(&now);
853 	not_done = 1;
854 	while (not_done) {
855 		x = sctp_select_initial_TSN(&inp->sctp_ep);
856 		if (x == 0) {
857 			/* we never use 0 */
858 			continue;
859 		}
860 		if (sctp_is_vtag_good(inp, x, &now, save_in_twait)) {
861 			not_done = 0;
862 		}
863 	}
864 	return (x);
865 }
866 
867 int
868 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
869     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
870 {
871 	struct sctp_association *asoc;
872 
873 	/*
874 	 * Anything set to zero is taken care of by the allocation routine's
875 	 * bzero
876 	 */
877 
878 	/*
879 	 * Up front select what scoping to apply on addresses I tell my peer
880 	 * Not sure what to do with these right now, we will need to come up
881 	 * with a way to set them. We may need to pass them through from the
882 	 * caller in the sctp_aloc_assoc() function.
883 	 */
884 	int i;
885 
886 	asoc = &stcb->asoc;
887 	/* init all variables to a known value. */
888 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
889 	asoc->max_burst = m->sctp_ep.max_burst;
890 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
891 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
892 	asoc->sctp_cmt_on_off = (uint8_t) sctp_cmt_on_off;
893 	/* JRS 5/21/07 - Init CMT PF variables */
894 	asoc->sctp_cmt_pf = (uint8_t) sctp_cmt_pf;
895 	asoc->sctp_frag_point = m->sctp_frag_point;
896 #ifdef INET
897 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
898 #else
899 	asoc->default_tos = 0;
900 #endif
901 
902 #ifdef INET6
903 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
904 #else
905 	asoc->default_flowlabel = 0;
906 #endif
907 	asoc->sb_send_resv = 0;
908 	if (override_tag) {
909 		struct timeval now;
910 
911 		(void)SCTP_GETTIME_TIMEVAL(&now);
912 		if (sctp_is_in_timewait(override_tag)) {
913 			/*
914 			 * It must be in the time-wait hash, we put it there
915 			 * when we aloc one. If not the peer is playing
916 			 * games.
917 			 */
918 			asoc->my_vtag = override_tag;
919 		} else {
920 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
921 			panic("Huh is_in_timewait fails");
922 			return (ENOMEM);
923 		}
924 
925 	} else {
926 		asoc->my_vtag = sctp_select_a_tag(m, 1);
927 	}
928 	/* Get the nonce tags */
929 	asoc->my_vtag_nonce = sctp_select_a_tag(m, 0);
930 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, 0);
931 	asoc->vrf_id = vrf_id;
932 
933 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
934 		asoc->hb_is_disabled = 1;
935 	else
936 		asoc->hb_is_disabled = 0;
937 
938 #ifdef SCTP_ASOCLOG_OF_TSNS
939 	asoc->tsn_in_at = 0;
940 	asoc->tsn_out_at = 0;
941 	asoc->tsn_in_wrapped = 0;
942 	asoc->tsn_out_wrapped = 0;
943 	asoc->cumack_log_at = 0;
944 	asoc->cumack_log_atsnt = 0;
945 #endif
946 #ifdef SCTP_FS_SPEC_LOG
947 	asoc->fs_index = 0;
948 #endif
949 	asoc->refcnt = 0;
950 	asoc->assoc_up_sent = 0;
951 	asoc->assoc_id = asoc->my_vtag;
952 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
953 	    sctp_select_initial_TSN(&m->sctp_ep);
954 	/* we are optimisitic here */
955 	asoc->peer_supports_pktdrop = 1;
956 
957 	asoc->sent_queue_retran_cnt = 0;
958 
959 	/* for CMT */
960 	asoc->last_net_data_came_from = NULL;
961 
962 	/* This will need to be adjusted */
963 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
964 	asoc->last_acked_seq = asoc->init_seq_number - 1;
965 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
966 	asoc->asconf_seq_in = asoc->last_acked_seq;
967 
968 	/* here we are different, we hold the next one we expect */
969 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
970 
971 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
972 	asoc->initial_rto = m->sctp_ep.initial_rto;
973 
974 	asoc->max_init_times = m->sctp_ep.max_init_times;
975 	asoc->max_send_times = m->sctp_ep.max_send_times;
976 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
977 	asoc->free_chunk_cnt = 0;
978 
979 	asoc->iam_blocking = 0;
980 	/* ECN Nonce initialization */
981 	asoc->context = m->sctp_context;
982 	asoc->def_send = m->def_send;
983 	asoc->ecn_nonce_allowed = 0;
984 	asoc->receiver_nonce_sum = 1;
985 	asoc->nonce_sum_expect_base = 1;
986 	asoc->nonce_sum_check = 1;
987 	asoc->nonce_resync_tsn = 0;
988 	asoc->nonce_wait_for_ecne = 0;
989 	asoc->nonce_wait_tsn = 0;
990 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
991 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
992 	asoc->pr_sctp_cnt = 0;
993 	asoc->total_output_queue_size = 0;
994 
995 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
996 		struct in6pcb *inp6;
997 
998 		/* Its a V6 socket */
999 		inp6 = (struct in6pcb *)m;
1000 		asoc->ipv6_addr_legal = 1;
1001 		/* Now look at the binding flag to see if V4 will be legal */
1002 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1003 			asoc->ipv4_addr_legal = 1;
1004 		} else {
1005 			/* V4 addresses are NOT legal on the association */
1006 			asoc->ipv4_addr_legal = 0;
1007 		}
1008 	} else {
1009 		/* Its a V4 socket, no - V6 */
1010 		asoc->ipv4_addr_legal = 1;
1011 		asoc->ipv6_addr_legal = 0;
1012 	}
1013 
1014 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1015 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1016 
1017 	asoc->smallest_mtu = m->sctp_frag_point;
1018 #ifdef SCTP_PRINT_FOR_B_AND_M
1019 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1020 	    asoc->smallest_mtu);
1021 #endif
1022 	asoc->minrto = m->sctp_ep.sctp_minrto;
1023 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1024 
1025 	asoc->locked_on_sending = NULL;
1026 	asoc->stream_locked_on = 0;
1027 	asoc->ecn_echo_cnt_onq = 0;
1028 	asoc->stream_locked = 0;
1029 
1030 	asoc->send_sack = 1;
1031 
1032 	LIST_INIT(&asoc->sctp_restricted_addrs);
1033 
1034 	TAILQ_INIT(&asoc->nets);
1035 	TAILQ_INIT(&asoc->pending_reply_queue);
1036 	TAILQ_INIT(&asoc->asconf_ack_sent);
1037 	/* Setup to fill the hb random cache at first HB */
1038 	asoc->hb_random_idx = 4;
1039 
1040 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1041 
1042 	/*
1043 	 * JRS - Pick the default congestion control module based on the
1044 	 * sysctl.
1045 	 */
1046 	switch (m->sctp_ep.sctp_default_cc_module) {
1047 		/* JRS - Standard TCP congestion control */
1048 	case SCTP_CC_RFC2581:
1049 		{
1050 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1051 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1052 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1053 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1054 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1055 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1056 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1057 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1058 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1059 			break;
1060 		}
1061 		/* JRS - High Speed TCP congestion control (Floyd) */
1062 	case SCTP_CC_HSTCP:
1063 		{
1064 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1065 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1066 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1067 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1068 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1069 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1070 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1071 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1072 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1073 			break;
1074 		}
1075 		/* JRS - HTCP congestion control */
1076 	case SCTP_CC_HTCP:
1077 		{
1078 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1079 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1080 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1081 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1082 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1083 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1084 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1085 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1086 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1087 			break;
1088 		}
1089 		/* JRS - By default, use RFC2581 */
1090 	default:
1091 		{
1092 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1093 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1094 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1095 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1096 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1097 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1098 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1099 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1100 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1101 			break;
1102 		}
1103 	}
1104 
1105 	/*
1106 	 * Now the stream parameters, here we allocate space for all streams
1107 	 * that we request by default.
1108 	 */
1109 	asoc->streamoutcnt = asoc->pre_open_streams =
1110 	    m->sctp_ep.pre_open_stream_count;
1111 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1112 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1113 	    SCTP_M_STRMO);
1114 	if (asoc->strmout == NULL) {
1115 		/* big trouble no memory */
1116 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1117 		return (ENOMEM);
1118 	}
1119 	for (i = 0; i < asoc->streamoutcnt; i++) {
1120 		/*
1121 		 * inbound side must be set to 0xffff, also NOTE when we get
1122 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1123 		 * count (streamoutcnt) but first check if we sent to any of
1124 		 * the upper streams that were dropped (if some were). Those
1125 		 * that were dropped must be notified to the upper layer as
1126 		 * failed to send.
1127 		 */
1128 		asoc->strmout[i].next_sequence_sent = 0x0;
1129 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1130 		asoc->strmout[i].stream_no = i;
1131 		asoc->strmout[i].last_msg_incomplete = 0;
1132 		asoc->strmout[i].next_spoke.tqe_next = 0;
1133 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1134 	}
1135 	/* Now the mapping array */
1136 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1137 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1138 	    SCTP_M_MAP);
1139 	if (asoc->mapping_array == NULL) {
1140 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1141 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1142 		return (ENOMEM);
1143 	}
1144 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1145 	/* Now the init of the other outqueues */
1146 	TAILQ_INIT(&asoc->free_chunks);
1147 	TAILQ_INIT(&asoc->out_wheel);
1148 	TAILQ_INIT(&asoc->control_send_queue);
1149 	TAILQ_INIT(&asoc->send_queue);
1150 	TAILQ_INIT(&asoc->sent_queue);
1151 	TAILQ_INIT(&asoc->reasmqueue);
1152 	TAILQ_INIT(&asoc->resetHead);
1153 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1154 	TAILQ_INIT(&asoc->asconf_queue);
1155 	/* authentication fields */
1156 	asoc->authinfo.random = NULL;
1157 	asoc->authinfo.assoc_key = NULL;
1158 	asoc->authinfo.assoc_keyid = 0;
1159 	asoc->authinfo.recv_key = NULL;
1160 	asoc->authinfo.recv_keyid = 0;
1161 	LIST_INIT(&asoc->shared_keys);
1162 	asoc->marked_retrans = 0;
1163 	asoc->timoinit = 0;
1164 	asoc->timodata = 0;
1165 	asoc->timosack = 0;
1166 	asoc->timoshutdown = 0;
1167 	asoc->timoheartbeat = 0;
1168 	asoc->timocookie = 0;
1169 	asoc->timoshutdownack = 0;
1170 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1171 	asoc->discontinuity_time = asoc->start_time;
1172 	/*
1173 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1174 	 * freed later whe the association is freed.
1175 	 */
1176 	return (0);
1177 }
1178 
1179 int
1180 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1181 {
1182 	/* mapping array needs to grow */
1183 	uint8_t *new_array;
1184 	uint32_t new_size;
1185 
1186 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1187 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1188 	if (new_array == NULL) {
1189 		/* can't get more, forget it */
1190 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1191 		    new_size);
1192 		return (-1);
1193 	}
1194 	memset(new_array, 0, new_size);
1195 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1196 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1197 	asoc->mapping_array = new_array;
1198 	asoc->mapping_array_size = new_size;
1199 	return (0);
1200 }
1201 
1202 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1203 static void
1204 sctp_iterator_work(struct sctp_iterator *it)
1205 {
1206 	int iteration_count = 0;
1207 	int inp_skip = 0;
1208 
1209 	SCTP_ITERATOR_LOCK();
1210 	if (it->inp) {
1211 		SCTP_INP_DECR_REF(it->inp);
1212 	}
1213 	if (it->inp == NULL) {
1214 		/* iterator is complete */
1215 done_with_iterator:
1216 		SCTP_ITERATOR_UNLOCK();
1217 		if (it->function_atend != NULL) {
1218 			(*it->function_atend) (it->pointer, it->val);
1219 		}
1220 		SCTP_FREE(it, SCTP_M_ITER);
1221 		return;
1222 	}
1223 select_a_new_ep:
1224 	SCTP_INP_WLOCK(it->inp);
1225 	while (((it->pcb_flags) &&
1226 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1227 	    ((it->pcb_features) &&
1228 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1229 		/* endpoint flags or features don't match, so keep looking */
1230 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1231 			SCTP_INP_WUNLOCK(it->inp);
1232 			goto done_with_iterator;
1233 		}
1234 		SCTP_INP_WUNLOCK(it->inp);
1235 		it->inp = LIST_NEXT(it->inp, sctp_list);
1236 		if (it->inp == NULL) {
1237 			goto done_with_iterator;
1238 		}
1239 		SCTP_INP_WLOCK(it->inp);
1240 	}
1241 
1242 	SCTP_INP_WUNLOCK(it->inp);
1243 	SCTP_INP_RLOCK(it->inp);
1244 
1245 	/* now go through each assoc which is in the desired state */
1246 	if (it->done_current_ep == 0) {
1247 		if (it->function_inp != NULL)
1248 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1249 		it->done_current_ep = 1;
1250 	}
1251 	if (it->stcb == NULL) {
1252 		/* run the per instance function */
1253 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1254 	}
1255 	if ((inp_skip) || it->stcb == NULL) {
1256 		if (it->function_inp_end != NULL) {
1257 			inp_skip = (*it->function_inp_end) (it->inp,
1258 			    it->pointer,
1259 			    it->val);
1260 		}
1261 		SCTP_INP_RUNLOCK(it->inp);
1262 		goto no_stcb;
1263 	}
1264 	while (it->stcb) {
1265 		SCTP_TCB_LOCK(it->stcb);
1266 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1267 			/* not in the right state... keep looking */
1268 			SCTP_TCB_UNLOCK(it->stcb);
1269 			goto next_assoc;
1270 		}
1271 		/* see if we have limited out the iterator loop */
1272 		iteration_count++;
1273 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1274 			/* Pause to let others grab the lock */
1275 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1276 			SCTP_TCB_UNLOCK(it->stcb);
1277 
1278 			SCTP_INP_INCR_REF(it->inp);
1279 			SCTP_INP_RUNLOCK(it->inp);
1280 			SCTP_ITERATOR_UNLOCK();
1281 			SCTP_ITERATOR_LOCK();
1282 			SCTP_INP_RLOCK(it->inp);
1283 
1284 			SCTP_INP_DECR_REF(it->inp);
1285 			SCTP_TCB_LOCK(it->stcb);
1286 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1287 			iteration_count = 0;
1288 		}
1289 		/* run function on this one */
1290 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1291 
1292 		/*
1293 		 * we lie here, it really needs to have its own type but
1294 		 * first I must verify that this won't effect things :-0
1295 		 */
1296 		if (it->no_chunk_output == 0)
1297 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1298 
1299 		SCTP_TCB_UNLOCK(it->stcb);
1300 next_assoc:
1301 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1302 		if (it->stcb == NULL) {
1303 			/* Run last function */
1304 			if (it->function_inp_end != NULL) {
1305 				inp_skip = (*it->function_inp_end) (it->inp,
1306 				    it->pointer,
1307 				    it->val);
1308 			}
1309 		}
1310 	}
1311 	SCTP_INP_RUNLOCK(it->inp);
1312 no_stcb:
1313 	/* done with all assocs on this endpoint, move on to next endpoint */
1314 	it->done_current_ep = 0;
1315 	SCTP_INP_WLOCK(it->inp);
1316 	SCTP_INP_WUNLOCK(it->inp);
1317 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1318 		it->inp = NULL;
1319 	} else {
1320 		SCTP_INP_INFO_RLOCK();
1321 		it->inp = LIST_NEXT(it->inp, sctp_list);
1322 		SCTP_INP_INFO_RUNLOCK();
1323 	}
1324 	if (it->inp == NULL) {
1325 		goto done_with_iterator;
1326 	}
1327 	goto select_a_new_ep;
1328 }
1329 
1330 void
1331 sctp_iterator_worker(void)
1332 {
1333 	struct sctp_iterator *it = NULL;
1334 
1335 	/* This function is called with the WQ lock in place */
1336 
1337 	sctppcbinfo.iterator_running = 1;
1338 again:
1339 	it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
1340 	while (it) {
1341 		/* now lets work on this one */
1342 		TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
1343 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1344 		sctp_iterator_work(it);
1345 		SCTP_IPI_ITERATOR_WQ_LOCK();
1346 		/* sa_ignore FREED_MEMORY */
1347 		it = TAILQ_FIRST(&sctppcbinfo.iteratorhead);
1348 	}
1349 	if (TAILQ_FIRST(&sctppcbinfo.iteratorhead)) {
1350 		goto again;
1351 	}
1352 	sctppcbinfo.iterator_running = 0;
1353 	return;
1354 }
1355 
1356 #endif
1357 
1358 
1359 static void
1360 sctp_handle_addr_wq(void)
1361 {
1362 	/* deal with the ADDR wq from the rtsock calls */
1363 	struct sctp_laddr *wi;
1364 	struct sctp_asconf_iterator *asc;
1365 
1366 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1367 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1368 	if (asc == NULL) {
1369 		/* Try later, no memory */
1370 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1371 		    (struct sctp_inpcb *)NULL,
1372 		    (struct sctp_tcb *)NULL,
1373 		    (struct sctp_nets *)NULL);
1374 		return;
1375 	}
1376 	LIST_INIT(&asc->list_of_work);
1377 	asc->cnt = 0;
1378 	SCTP_IPI_ITERATOR_WQ_LOCK();
1379 	wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1380 	while (wi != NULL) {
1381 		LIST_REMOVE(wi, sctp_nxt_addr);
1382 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1383 		asc->cnt++;
1384 		wi = LIST_FIRST(&sctppcbinfo.addr_wq);
1385 	}
1386 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1387 	if (asc->cnt == 0) {
1388 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1389 	} else {
1390 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1391 		    sctp_asconf_iterator_stcb,
1392 		    NULL,	/* No ep end for boundall */
1393 		    SCTP_PCB_FLAGS_BOUNDALL,
1394 		    SCTP_PCB_ANY_FEATURES,
1395 		    SCTP_ASOC_ANY_STATE,
1396 		    (void *)asc, 0,
1397 		    sctp_asconf_iterator_end, NULL, 0);
1398 	}
1399 }
1400 
1401 int retcode = 0;
1402 int cur_oerr = 0;
1403 
1404 void
1405 sctp_timeout_handler(void *t)
1406 {
1407 	struct sctp_inpcb *inp;
1408 	struct sctp_tcb *stcb;
1409 	struct sctp_nets *net;
1410 	struct sctp_timer *tmr;
1411 
1412 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1413 	struct socket *so;
1414 
1415 #endif
1416 	int did_output;
1417 	struct sctp_iterator *it = NULL;
1418 
1419 	tmr = (struct sctp_timer *)t;
1420 	inp = (struct sctp_inpcb *)tmr->ep;
1421 	stcb = (struct sctp_tcb *)tmr->tcb;
1422 	net = (struct sctp_nets *)tmr->net;
1423 	did_output = 1;
1424 
1425 #ifdef SCTP_AUDITING_ENABLED
1426 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1427 	sctp_auditing(3, inp, stcb, net);
1428 #endif
1429 
1430 	/* sanity checks... */
1431 	if (tmr->self != (void *)tmr) {
1432 		/*
1433 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1434 		 * tmr);
1435 		 */
1436 		return;
1437 	}
1438 	tmr->stopped_from = 0xa001;
1439 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1440 		/*
1441 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1442 		 * tmr->type);
1443 		 */
1444 		return;
1445 	}
1446 	tmr->stopped_from = 0xa002;
1447 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1448 		return;
1449 	}
1450 	/* if this is an iterator timeout, get the struct and clear inp */
1451 	tmr->stopped_from = 0xa003;
1452 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1453 		it = (struct sctp_iterator *)inp;
1454 		inp = NULL;
1455 	}
1456 	if (inp) {
1457 		SCTP_INP_INCR_REF(inp);
1458 		if ((inp->sctp_socket == 0) &&
1459 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1460 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1461 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1462 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1463 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1464 		    ) {
1465 			SCTP_INP_DECR_REF(inp);
1466 			return;
1467 		}
1468 	}
1469 	tmr->stopped_from = 0xa004;
1470 	if (stcb) {
1471 		atomic_add_int(&stcb->asoc.refcnt, 1);
1472 		if (stcb->asoc.state == 0) {
1473 			atomic_add_int(&stcb->asoc.refcnt, -1);
1474 			if (inp) {
1475 				SCTP_INP_DECR_REF(inp);
1476 			}
1477 			return;
1478 		}
1479 	}
1480 	tmr->stopped_from = 0xa005;
1481 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1482 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1483 		if (inp) {
1484 			SCTP_INP_DECR_REF(inp);
1485 		}
1486 		if (stcb) {
1487 			atomic_add_int(&stcb->asoc.refcnt, -1);
1488 		}
1489 		return;
1490 	}
1491 	tmr->stopped_from = 0xa006;
1492 
1493 	if (stcb) {
1494 		SCTP_TCB_LOCK(stcb);
1495 		atomic_add_int(&stcb->asoc.refcnt, -1);
1496 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1497 		    ((stcb->asoc.state == 0) ||
1498 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1499 			SCTP_TCB_UNLOCK(stcb);
1500 			if (inp) {
1501 				SCTP_INP_DECR_REF(inp);
1502 			}
1503 			return;
1504 		}
1505 	}
1506 	/* record in stopped what t-o occured */
1507 	tmr->stopped_from = tmr->type;
1508 
1509 	/* mark as being serviced now */
1510 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1511 		/*
1512 		 * Callout has been rescheduled.
1513 		 */
1514 		goto get_out;
1515 	}
1516 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1517 		/*
1518 		 * Not active, so no action.
1519 		 */
1520 		goto get_out;
1521 	}
1522 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1523 
1524 	/* call the handler for the appropriate timer type */
1525 	switch (tmr->type) {
1526 	case SCTP_TIMER_TYPE_ZERO_COPY:
1527 		if (inp == NULL) {
1528 			break;
1529 		}
1530 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1531 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1532 		}
1533 		break;
1534 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1535 		if (inp == NULL) {
1536 			break;
1537 		}
1538 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1539 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1540 		}
1541 		break;
1542 	case SCTP_TIMER_TYPE_ADDR_WQ:
1543 		sctp_handle_addr_wq();
1544 		break;
1545 	case SCTP_TIMER_TYPE_ITERATOR:
1546 		SCTP_STAT_INCR(sctps_timoiterator);
1547 		sctp_iterator_timer(it);
1548 		break;
1549 	case SCTP_TIMER_TYPE_SEND:
1550 		if ((stcb == NULL) || (inp == NULL)) {
1551 			break;
1552 		}
1553 		SCTP_STAT_INCR(sctps_timodata);
1554 		stcb->asoc.timodata++;
1555 		stcb->asoc.num_send_timers_up--;
1556 		if (stcb->asoc.num_send_timers_up < 0) {
1557 			stcb->asoc.num_send_timers_up = 0;
1558 		}
1559 		SCTP_TCB_LOCK_ASSERT(stcb);
1560 		cur_oerr = stcb->asoc.overall_error_count;
1561 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1562 		if (retcode) {
1563 			/* no need to unlock on tcb its gone */
1564 
1565 			goto out_decr;
1566 		}
1567 		SCTP_TCB_LOCK_ASSERT(stcb);
1568 #ifdef SCTP_AUDITING_ENABLED
1569 		sctp_auditing(4, inp, stcb, net);
1570 #endif
1571 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1572 		if ((stcb->asoc.num_send_timers_up == 0) &&
1573 		    (stcb->asoc.sent_queue_cnt > 0)
1574 		    ) {
1575 			struct sctp_tmit_chunk *chk;
1576 
1577 			/*
1578 			 * safeguard. If there on some on the sent queue
1579 			 * somewhere but no timers running something is
1580 			 * wrong... so we start a timer on the first chunk
1581 			 * on the send queue on whatever net it is sent to.
1582 			 */
1583 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1584 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1585 			    chk->whoTo);
1586 		}
1587 		break;
1588 	case SCTP_TIMER_TYPE_INIT:
1589 		if ((stcb == NULL) || (inp == NULL)) {
1590 			break;
1591 		}
1592 		SCTP_STAT_INCR(sctps_timoinit);
1593 		stcb->asoc.timoinit++;
1594 		if (sctp_t1init_timer(inp, stcb, net)) {
1595 			/* no need to unlock on tcb its gone */
1596 			goto out_decr;
1597 		}
1598 		/* We do output but not here */
1599 		did_output = 0;
1600 		break;
1601 	case SCTP_TIMER_TYPE_RECV:
1602 		if ((stcb == NULL) || (inp == NULL)) {
1603 			break;
1604 		} {
1605 			int abort_flag;
1606 
1607 			SCTP_STAT_INCR(sctps_timosack);
1608 			stcb->asoc.timosack++;
1609 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1610 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1611 			sctp_send_sack(stcb);
1612 		}
1613 #ifdef SCTP_AUDITING_ENABLED
1614 		sctp_auditing(4, inp, stcb, net);
1615 #endif
1616 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1617 		break;
1618 	case SCTP_TIMER_TYPE_SHUTDOWN:
1619 		if ((stcb == NULL) || (inp == NULL)) {
1620 			break;
1621 		}
1622 		if (sctp_shutdown_timer(inp, stcb, net)) {
1623 			/* no need to unlock on tcb its gone */
1624 			goto out_decr;
1625 		}
1626 		SCTP_STAT_INCR(sctps_timoshutdown);
1627 		stcb->asoc.timoshutdown++;
1628 #ifdef SCTP_AUDITING_ENABLED
1629 		sctp_auditing(4, inp, stcb, net);
1630 #endif
1631 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1632 		break;
1633 	case SCTP_TIMER_TYPE_HEARTBEAT:
1634 		{
1635 			struct sctp_nets *lnet;
1636 			int cnt_of_unconf = 0;
1637 
1638 			if ((stcb == NULL) || (inp == NULL)) {
1639 				break;
1640 			}
1641 			SCTP_STAT_INCR(sctps_timoheartbeat);
1642 			stcb->asoc.timoheartbeat++;
1643 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1644 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1645 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1646 					cnt_of_unconf++;
1647 				}
1648 			}
1649 			if (cnt_of_unconf == 0) {
1650 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1651 				    cnt_of_unconf)) {
1652 					/* no need to unlock on tcb its gone */
1653 					goto out_decr;
1654 				}
1655 			}
1656 #ifdef SCTP_AUDITING_ENABLED
1657 			sctp_auditing(4, inp, stcb, lnet);
1658 #endif
1659 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1660 			    stcb->sctp_ep, stcb, lnet);
1661 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1662 		}
1663 		break;
1664 	case SCTP_TIMER_TYPE_COOKIE:
1665 		if ((stcb == NULL) || (inp == NULL)) {
1666 			break;
1667 		}
1668 		if (sctp_cookie_timer(inp, stcb, net)) {
1669 			/* no need to unlock on tcb its gone */
1670 			goto out_decr;
1671 		}
1672 		SCTP_STAT_INCR(sctps_timocookie);
1673 		stcb->asoc.timocookie++;
1674 #ifdef SCTP_AUDITING_ENABLED
1675 		sctp_auditing(4, inp, stcb, net);
1676 #endif
1677 		/*
1678 		 * We consider T3 and Cookie timer pretty much the same with
1679 		 * respect to where from in chunk_output.
1680 		 */
1681 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1682 		break;
1683 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1684 		{
1685 			struct timeval tv;
1686 			int i, secret;
1687 
1688 			if (inp == NULL) {
1689 				break;
1690 			}
1691 			SCTP_STAT_INCR(sctps_timosecret);
1692 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1693 			SCTP_INP_WLOCK(inp);
1694 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1695 			inp->sctp_ep.last_secret_number =
1696 			    inp->sctp_ep.current_secret_number;
1697 			inp->sctp_ep.current_secret_number++;
1698 			if (inp->sctp_ep.current_secret_number >=
1699 			    SCTP_HOW_MANY_SECRETS) {
1700 				inp->sctp_ep.current_secret_number = 0;
1701 			}
1702 			secret = (int)inp->sctp_ep.current_secret_number;
1703 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1704 				inp->sctp_ep.secret_key[secret][i] =
1705 				    sctp_select_initial_TSN(&inp->sctp_ep);
1706 			}
1707 			SCTP_INP_WUNLOCK(inp);
1708 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1709 		}
1710 		did_output = 0;
1711 		break;
1712 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1713 		if ((stcb == NULL) || (inp == NULL)) {
1714 			break;
1715 		}
1716 		SCTP_STAT_INCR(sctps_timopathmtu);
1717 		sctp_pathmtu_timer(inp, stcb, net);
1718 		did_output = 0;
1719 		break;
1720 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1721 		if ((stcb == NULL) || (inp == NULL)) {
1722 			break;
1723 		}
1724 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1725 			/* no need to unlock on tcb its gone */
1726 			goto out_decr;
1727 		}
1728 		SCTP_STAT_INCR(sctps_timoshutdownack);
1729 		stcb->asoc.timoshutdownack++;
1730 #ifdef SCTP_AUDITING_ENABLED
1731 		sctp_auditing(4, inp, stcb, net);
1732 #endif
1733 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1734 		break;
1735 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1736 		if ((stcb == NULL) || (inp == NULL)) {
1737 			break;
1738 		}
1739 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1740 		sctp_abort_an_association(inp, stcb,
1741 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1742 		/* no need to unlock on tcb its gone */
1743 		goto out_decr;
1744 
1745 	case SCTP_TIMER_TYPE_STRRESET:
1746 		if ((stcb == NULL) || (inp == NULL)) {
1747 			break;
1748 		}
1749 		if (sctp_strreset_timer(inp, stcb, net)) {
1750 			/* no need to unlock on tcb its gone */
1751 			goto out_decr;
1752 		}
1753 		SCTP_STAT_INCR(sctps_timostrmrst);
1754 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1755 		break;
1756 	case SCTP_TIMER_TYPE_EARLYFR:
1757 		/* Need to do FR of things for net */
1758 		if ((stcb == NULL) || (inp == NULL)) {
1759 			break;
1760 		}
1761 		SCTP_STAT_INCR(sctps_timoearlyfr);
1762 		sctp_early_fr_timer(inp, stcb, net);
1763 		break;
1764 	case SCTP_TIMER_TYPE_ASCONF:
1765 		if ((stcb == NULL) || (inp == NULL)) {
1766 			break;
1767 		}
1768 		if (sctp_asconf_timer(inp, stcb, net)) {
1769 			/* no need to unlock on tcb its gone */
1770 			goto out_decr;
1771 		}
1772 		SCTP_STAT_INCR(sctps_timoasconf);
1773 #ifdef SCTP_AUDITING_ENABLED
1774 		sctp_auditing(4, inp, stcb, net);
1775 #endif
1776 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1777 		break;
1778 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1779 		if ((stcb == NULL) || (inp == NULL)) {
1780 			break;
1781 		}
1782 		sctp_delete_prim_timer(inp, stcb, net);
1783 		SCTP_STAT_INCR(sctps_timodelprim);
1784 		break;
1785 
1786 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1787 		if ((stcb == NULL) || (inp == NULL)) {
1788 			break;
1789 		}
1790 		SCTP_STAT_INCR(sctps_timoautoclose);
1791 		sctp_autoclose_timer(inp, stcb, net);
1792 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1793 		did_output = 0;
1794 		break;
1795 	case SCTP_TIMER_TYPE_ASOCKILL:
1796 		if ((stcb == NULL) || (inp == NULL)) {
1797 			break;
1798 		}
1799 		SCTP_STAT_INCR(sctps_timoassockill);
1800 		/* Can we free it yet? */
1801 		SCTP_INP_DECR_REF(inp);
1802 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1803 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1804 		so = SCTP_INP_SO(inp);
1805 		atomic_add_int(&stcb->asoc.refcnt, 1);
1806 		SCTP_TCB_UNLOCK(stcb);
1807 		SCTP_SOCKET_LOCK(so, 1);
1808 		SCTP_TCB_LOCK(stcb);
1809 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1810 #endif
1811 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1812 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1813 		SCTP_SOCKET_UNLOCK(so, 1);
1814 #endif
1815 		/*
1816 		 * free asoc, always unlocks (or destroy's) so prevent
1817 		 * duplicate unlock or unlock of a free mtx :-0
1818 		 */
1819 		stcb = NULL;
1820 		goto out_no_decr;
1821 	case SCTP_TIMER_TYPE_INPKILL:
1822 		SCTP_STAT_INCR(sctps_timoinpkill);
1823 		if (inp == NULL) {
1824 			break;
1825 		}
1826 		/*
1827 		 * special case, take away our increment since WE are the
1828 		 * killer
1829 		 */
1830 		SCTP_INP_DECR_REF(inp);
1831 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1832 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1833 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1834 		goto out_no_decr;
1835 	default:
1836 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1837 		    tmr->type);
1838 		break;
1839 	};
1840 #ifdef SCTP_AUDITING_ENABLED
1841 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1842 	if (inp)
1843 		sctp_auditing(5, inp, stcb, net);
1844 #endif
1845 	if ((did_output) && stcb) {
1846 		/*
1847 		 * Now we need to clean up the control chunk chain if an
1848 		 * ECNE is on it. It must be marked as UNSENT again so next
1849 		 * call will continue to send it until such time that we get
1850 		 * a CWR, to remove it. It is, however, less likely that we
1851 		 * will find a ecn echo on the chain though.
1852 		 */
1853 		sctp_fix_ecn_echo(&stcb->asoc);
1854 	}
1855 get_out:
1856 	if (stcb) {
1857 		SCTP_TCB_UNLOCK(stcb);
1858 	}
1859 out_decr:
1860 	if (inp) {
1861 		SCTP_INP_DECR_REF(inp);
1862 	}
1863 out_no_decr:
1864 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1865 	    tmr->type);
1866 	if (inp) {
1867 	}
1868 }
1869 
1870 void
1871 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1872     struct sctp_nets *net)
1873 {
1874 	int to_ticks;
1875 	struct sctp_timer *tmr;
1876 
1877 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1878 		return;
1879 
1880 	to_ticks = 0;
1881 
1882 	tmr = NULL;
1883 	if (stcb) {
1884 		SCTP_TCB_LOCK_ASSERT(stcb);
1885 	}
1886 	switch (t_type) {
1887 	case SCTP_TIMER_TYPE_ZERO_COPY:
1888 		tmr = &inp->sctp_ep.zero_copy_timer;
1889 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1890 		break;
1891 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1892 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1893 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1894 		break;
1895 	case SCTP_TIMER_TYPE_ADDR_WQ:
1896 		/* Only 1 tick away :-) */
1897 		tmr = &sctppcbinfo.addr_wq_timer;
1898 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1899 		break;
1900 	case SCTP_TIMER_TYPE_ITERATOR:
1901 		{
1902 			struct sctp_iterator *it;
1903 
1904 			it = (struct sctp_iterator *)inp;
1905 			tmr = &it->tmr;
1906 			to_ticks = SCTP_ITERATOR_TICKS;
1907 		}
1908 		break;
1909 	case SCTP_TIMER_TYPE_SEND:
1910 		/* Here we use the RTO timer */
1911 		{
1912 			int rto_val;
1913 
1914 			if ((stcb == NULL) || (net == NULL)) {
1915 				return;
1916 			}
1917 			tmr = &net->rxt_timer;
1918 			if (net->RTO == 0) {
1919 				rto_val = stcb->asoc.initial_rto;
1920 			} else {
1921 				rto_val = net->RTO;
1922 			}
1923 			to_ticks = MSEC_TO_TICKS(rto_val);
1924 		}
1925 		break;
1926 	case SCTP_TIMER_TYPE_INIT:
1927 		/*
1928 		 * Here we use the INIT timer default usually about 1
1929 		 * minute.
1930 		 */
1931 		if ((stcb == NULL) || (net == NULL)) {
1932 			return;
1933 		}
1934 		tmr = &net->rxt_timer;
1935 		if (net->RTO == 0) {
1936 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1937 		} else {
1938 			to_ticks = MSEC_TO_TICKS(net->RTO);
1939 		}
1940 		break;
1941 	case SCTP_TIMER_TYPE_RECV:
1942 		/*
1943 		 * Here we use the Delayed-Ack timer value from the inp
1944 		 * ususually about 200ms.
1945 		 */
1946 		if (stcb == NULL) {
1947 			return;
1948 		}
1949 		tmr = &stcb->asoc.dack_timer;
1950 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1951 		break;
1952 	case SCTP_TIMER_TYPE_SHUTDOWN:
1953 		/* Here we use the RTO of the destination. */
1954 		if ((stcb == NULL) || (net == NULL)) {
1955 			return;
1956 		}
1957 		if (net->RTO == 0) {
1958 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1959 		} else {
1960 			to_ticks = MSEC_TO_TICKS(net->RTO);
1961 		}
1962 		tmr = &net->rxt_timer;
1963 		break;
1964 	case SCTP_TIMER_TYPE_HEARTBEAT:
1965 		/*
1966 		 * the net is used here so that we can add in the RTO. Even
1967 		 * though we use a different timer. We also add the HB timer
1968 		 * PLUS a random jitter.
1969 		 */
1970 		if ((inp == NULL) || (stcb == NULL)) {
1971 			return;
1972 		} else {
1973 			uint32_t rndval;
1974 			uint8_t this_random;
1975 			int cnt_of_unconf = 0;
1976 			struct sctp_nets *lnet;
1977 
1978 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1979 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1980 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1981 					cnt_of_unconf++;
1982 				}
1983 			}
1984 			if (cnt_of_unconf) {
1985 				net = lnet = NULL;
1986 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
1987 			}
1988 			if (stcb->asoc.hb_random_idx > 3) {
1989 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1990 				memcpy(stcb->asoc.hb_random_values, &rndval,
1991 				    sizeof(stcb->asoc.hb_random_values));
1992 				stcb->asoc.hb_random_idx = 0;
1993 			}
1994 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
1995 			stcb->asoc.hb_random_idx++;
1996 			stcb->asoc.hb_ect_randombit = 0;
1997 			/*
1998 			 * this_random will be 0 - 256 ms RTO is in ms.
1999 			 */
2000 			if ((stcb->asoc.hb_is_disabled) &&
2001 			    (cnt_of_unconf == 0)) {
2002 				return;
2003 			}
2004 			if (net) {
2005 				int delay;
2006 
2007 				delay = stcb->asoc.heart_beat_delay;
2008 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2009 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2010 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2011 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2012 						delay = 0;
2013 					}
2014 				}
2015 				if (net->RTO == 0) {
2016 					/* Never been checked */
2017 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2018 				} else {
2019 					/* set rto_val to the ms */
2020 					to_ticks = delay + net->RTO + this_random;
2021 				}
2022 			} else {
2023 				if (cnt_of_unconf) {
2024 					to_ticks = this_random + stcb->asoc.initial_rto;
2025 				} else {
2026 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2027 				}
2028 			}
2029 			/*
2030 			 * Now we must convert the to_ticks that are now in
2031 			 * ms to ticks.
2032 			 */
2033 			to_ticks = MSEC_TO_TICKS(to_ticks);
2034 			tmr = &stcb->asoc.hb_timer;
2035 		}
2036 		break;
2037 	case SCTP_TIMER_TYPE_COOKIE:
2038 		/*
2039 		 * Here we can use the RTO timer from the network since one
2040 		 * RTT was compelete. If a retran happened then we will be
2041 		 * using the RTO initial value.
2042 		 */
2043 		if ((stcb == NULL) || (net == NULL)) {
2044 			return;
2045 		}
2046 		if (net->RTO == 0) {
2047 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2048 		} else {
2049 			to_ticks = MSEC_TO_TICKS(net->RTO);
2050 		}
2051 		tmr = &net->rxt_timer;
2052 		break;
2053 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2054 		/*
2055 		 * nothing needed but the endpoint here ususually about 60
2056 		 * minutes.
2057 		 */
2058 		if (inp == NULL) {
2059 			return;
2060 		}
2061 		tmr = &inp->sctp_ep.signature_change;
2062 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2063 		break;
2064 	case SCTP_TIMER_TYPE_ASOCKILL:
2065 		if (stcb == NULL) {
2066 			return;
2067 		}
2068 		tmr = &stcb->asoc.strreset_timer;
2069 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2070 		break;
2071 	case SCTP_TIMER_TYPE_INPKILL:
2072 		/*
2073 		 * The inp is setup to die. We re-use the signature_chage
2074 		 * timer since that has stopped and we are in the GONE
2075 		 * state.
2076 		 */
2077 		if (inp == NULL) {
2078 			return;
2079 		}
2080 		tmr = &inp->sctp_ep.signature_change;
2081 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2082 		break;
2083 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2084 		/*
2085 		 * Here we use the value found in the EP for PMTU ususually
2086 		 * about 10 minutes.
2087 		 */
2088 		if ((stcb == NULL) || (inp == NULL)) {
2089 			return;
2090 		}
2091 		if (net == NULL) {
2092 			return;
2093 		}
2094 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2095 		tmr = &net->pmtu_timer;
2096 		break;
2097 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2098 		/* Here we use the RTO of the destination */
2099 		if ((stcb == NULL) || (net == NULL)) {
2100 			return;
2101 		}
2102 		if (net->RTO == 0) {
2103 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2104 		} else {
2105 			to_ticks = MSEC_TO_TICKS(net->RTO);
2106 		}
2107 		tmr = &net->rxt_timer;
2108 		break;
2109 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2110 		/*
2111 		 * Here we use the endpoints shutdown guard timer usually
2112 		 * about 3 minutes.
2113 		 */
2114 		if ((inp == NULL) || (stcb == NULL)) {
2115 			return;
2116 		}
2117 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2118 		tmr = &stcb->asoc.shut_guard_timer;
2119 		break;
2120 	case SCTP_TIMER_TYPE_STRRESET:
2121 		/*
2122 		 * Here the timer comes from the stcb but its value is from
2123 		 * the net's RTO.
2124 		 */
2125 		if ((stcb == NULL) || (net == NULL)) {
2126 			return;
2127 		}
2128 		if (net->RTO == 0) {
2129 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2130 		} else {
2131 			to_ticks = MSEC_TO_TICKS(net->RTO);
2132 		}
2133 		tmr = &stcb->asoc.strreset_timer;
2134 		break;
2135 
2136 	case SCTP_TIMER_TYPE_EARLYFR:
2137 		{
2138 			unsigned int msec;
2139 
2140 			if ((stcb == NULL) || (net == NULL)) {
2141 				return;
2142 			}
2143 			if (net->flight_size > net->cwnd) {
2144 				/* no need to start */
2145 				return;
2146 			}
2147 			SCTP_STAT_INCR(sctps_earlyfrstart);
2148 			if (net->lastsa == 0) {
2149 				/* Hmm no rtt estimate yet? */
2150 				msec = stcb->asoc.initial_rto >> 2;
2151 			} else {
2152 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2153 			}
2154 			if (msec < sctp_early_fr_msec) {
2155 				msec = sctp_early_fr_msec;
2156 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2157 					msec = SCTP_MINFR_MSEC_FLOOR;
2158 				}
2159 			}
2160 			to_ticks = MSEC_TO_TICKS(msec);
2161 			tmr = &net->fr_timer;
2162 		}
2163 		break;
2164 	case SCTP_TIMER_TYPE_ASCONF:
2165 		/*
2166 		 * Here the timer comes from the stcb but its value is from
2167 		 * the net's RTO.
2168 		 */
2169 		if ((stcb == NULL) || (net == NULL)) {
2170 			return;
2171 		}
2172 		if (net->RTO == 0) {
2173 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2174 		} else {
2175 			to_ticks = MSEC_TO_TICKS(net->RTO);
2176 		}
2177 		tmr = &stcb->asoc.asconf_timer;
2178 		break;
2179 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2180 		if ((stcb == NULL) || (net != NULL)) {
2181 			return;
2182 		}
2183 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2184 		tmr = &stcb->asoc.delete_prim_timer;
2185 		break;
2186 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2187 		if (stcb == NULL) {
2188 			return;
2189 		}
2190 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2191 			/*
2192 			 * Really an error since stcb is NOT set to
2193 			 * autoclose
2194 			 */
2195 			return;
2196 		}
2197 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2198 		tmr = &stcb->asoc.autoclose_timer;
2199 		break;
2200 	default:
2201 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2202 		    __FUNCTION__, t_type);
2203 		return;
2204 		break;
2205 	};
2206 	if ((to_ticks <= 0) || (tmr == NULL)) {
2207 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2208 		    __FUNCTION__, t_type, to_ticks, tmr);
2209 		return;
2210 	}
2211 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2212 		/*
2213 		 * we do NOT allow you to have it already running. if it is
2214 		 * we leave the current one up unchanged
2215 		 */
2216 		return;
2217 	}
2218 	/* At this point we can proceed */
2219 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2220 		stcb->asoc.num_send_timers_up++;
2221 	}
2222 	tmr->stopped_from = 0;
2223 	tmr->type = t_type;
2224 	tmr->ep = (void *)inp;
2225 	tmr->tcb = (void *)stcb;
2226 	tmr->net = (void *)net;
2227 	tmr->self = (void *)tmr;
2228 	tmr->ticks = sctp_get_tick_count();
2229 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2230 	return;
2231 }
2232 
2233 void
2234 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2235     struct sctp_nets *net, uint32_t from)
2236 {
2237 	struct sctp_timer *tmr;
2238 
2239 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2240 	    (inp == NULL))
2241 		return;
2242 
2243 	tmr = NULL;
2244 	if (stcb) {
2245 		SCTP_TCB_LOCK_ASSERT(stcb);
2246 	}
2247 	switch (t_type) {
2248 	case SCTP_TIMER_TYPE_ZERO_COPY:
2249 		tmr = &inp->sctp_ep.zero_copy_timer;
2250 		break;
2251 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2252 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2253 		break;
2254 	case SCTP_TIMER_TYPE_ADDR_WQ:
2255 		tmr = &sctppcbinfo.addr_wq_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_EARLYFR:
2258 		if ((stcb == NULL) || (net == NULL)) {
2259 			return;
2260 		}
2261 		tmr = &net->fr_timer;
2262 		SCTP_STAT_INCR(sctps_earlyfrstop);
2263 		break;
2264 	case SCTP_TIMER_TYPE_ITERATOR:
2265 		{
2266 			struct sctp_iterator *it;
2267 
2268 			it = (struct sctp_iterator *)inp;
2269 			tmr = &it->tmr;
2270 		}
2271 		break;
2272 	case SCTP_TIMER_TYPE_SEND:
2273 		if ((stcb == NULL) || (net == NULL)) {
2274 			return;
2275 		}
2276 		tmr = &net->rxt_timer;
2277 		break;
2278 	case SCTP_TIMER_TYPE_INIT:
2279 		if ((stcb == NULL) || (net == NULL)) {
2280 			return;
2281 		}
2282 		tmr = &net->rxt_timer;
2283 		break;
2284 	case SCTP_TIMER_TYPE_RECV:
2285 		if (stcb == NULL) {
2286 			return;
2287 		}
2288 		tmr = &stcb->asoc.dack_timer;
2289 		break;
2290 	case SCTP_TIMER_TYPE_SHUTDOWN:
2291 		if ((stcb == NULL) || (net == NULL)) {
2292 			return;
2293 		}
2294 		tmr = &net->rxt_timer;
2295 		break;
2296 	case SCTP_TIMER_TYPE_HEARTBEAT:
2297 		if (stcb == NULL) {
2298 			return;
2299 		}
2300 		tmr = &stcb->asoc.hb_timer;
2301 		break;
2302 	case SCTP_TIMER_TYPE_COOKIE:
2303 		if ((stcb == NULL) || (net == NULL)) {
2304 			return;
2305 		}
2306 		tmr = &net->rxt_timer;
2307 		break;
2308 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2309 		/* nothing needed but the endpoint here */
2310 		tmr = &inp->sctp_ep.signature_change;
2311 		/*
2312 		 * We re-use the newcookie timer for the INP kill timer. We
2313 		 * must assure that we do not kill it by accident.
2314 		 */
2315 		break;
2316 	case SCTP_TIMER_TYPE_ASOCKILL:
2317 		/*
2318 		 * Stop the asoc kill timer.
2319 		 */
2320 		if (stcb == NULL) {
2321 			return;
2322 		}
2323 		tmr = &stcb->asoc.strreset_timer;
2324 		break;
2325 
2326 	case SCTP_TIMER_TYPE_INPKILL:
2327 		/*
2328 		 * The inp is setup to die. We re-use the signature_chage
2329 		 * timer since that has stopped and we are in the GONE
2330 		 * state.
2331 		 */
2332 		tmr = &inp->sctp_ep.signature_change;
2333 		break;
2334 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2335 		if ((stcb == NULL) || (net == NULL)) {
2336 			return;
2337 		}
2338 		tmr = &net->pmtu_timer;
2339 		break;
2340 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2341 		if ((stcb == NULL) || (net == NULL)) {
2342 			return;
2343 		}
2344 		tmr = &net->rxt_timer;
2345 		break;
2346 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2347 		if (stcb == NULL) {
2348 			return;
2349 		}
2350 		tmr = &stcb->asoc.shut_guard_timer;
2351 		break;
2352 	case SCTP_TIMER_TYPE_STRRESET:
2353 		if (stcb == NULL) {
2354 			return;
2355 		}
2356 		tmr = &stcb->asoc.strreset_timer;
2357 		break;
2358 	case SCTP_TIMER_TYPE_ASCONF:
2359 		if (stcb == NULL) {
2360 			return;
2361 		}
2362 		tmr = &stcb->asoc.asconf_timer;
2363 		break;
2364 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2365 		if (stcb == NULL) {
2366 			return;
2367 		}
2368 		tmr = &stcb->asoc.delete_prim_timer;
2369 		break;
2370 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2371 		if (stcb == NULL) {
2372 			return;
2373 		}
2374 		tmr = &stcb->asoc.autoclose_timer;
2375 		break;
2376 	default:
2377 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2378 		    __FUNCTION__, t_type);
2379 		break;
2380 	};
2381 	if (tmr == NULL) {
2382 		return;
2383 	}
2384 	if ((tmr->type != t_type) && tmr->type) {
2385 		/*
2386 		 * Ok we have a timer that is under joint use. Cookie timer
2387 		 * per chance with the SEND timer. We therefore are NOT
2388 		 * running the timer that the caller wants stopped.  So just
2389 		 * return.
2390 		 */
2391 		return;
2392 	}
2393 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2394 		stcb->asoc.num_send_timers_up--;
2395 		if (stcb->asoc.num_send_timers_up < 0) {
2396 			stcb->asoc.num_send_timers_up = 0;
2397 		}
2398 	}
2399 	tmr->self = NULL;
2400 	tmr->stopped_from = from;
2401 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2402 	return;
2403 }
2404 
2405 #ifdef SCTP_USE_ADLER32
2406 static uint32_t
2407 update_adler32(uint32_t adler, uint8_t * buf, int32_t len)
2408 {
2409 	uint32_t s1 = adler & 0xffff;
2410 	uint32_t s2 = (adler >> 16) & 0xffff;
2411 	int n;
2412 
2413 	for (n = 0; n < len; n++, buf++) {
2414 		/* s1 = (s1 + buf[n]) % BASE */
2415 		/* first we add */
2416 		s1 = (s1 + *buf);
2417 		/*
2418 		 * now if we need to, we do a mod by subtracting. It seems a
2419 		 * bit faster since I really will only ever do one subtract
2420 		 * at the MOST, since buf[n] is a max of 255.
2421 		 */
2422 		if (s1 >= SCTP_ADLER32_BASE) {
2423 			s1 -= SCTP_ADLER32_BASE;
2424 		}
2425 		/* s2 = (s2 + s1) % BASE */
2426 		/* first we add */
2427 		s2 = (s2 + s1);
2428 		/*
2429 		 * again, it is more efficent (it seems) to subtract since
2430 		 * the most s2 will ever be is (BASE-1 + BASE-1) in the
2431 		 * worse case. This would then be (2 * BASE) - 2, which will
2432 		 * still only do one subtract. On Intel this is much better
2433 		 * to do this way and avoid the divide. Have not -pg'd on
2434 		 * sparc.
2435 		 */
2436 		if (s2 >= SCTP_ADLER32_BASE) {
2437 			s2 -= SCTP_ADLER32_BASE;
2438 		}
2439 	}
2440 	/* Return the adler32 of the bytes buf[0..len-1] */
2441 	return ((s2 << 16) + s1);
2442 }
2443 
2444 #endif
2445 
2446 
2447 uint32_t
2448 sctp_calculate_len(struct mbuf *m)
2449 {
2450 	uint32_t tlen = 0;
2451 	struct mbuf *at;
2452 
2453 	at = m;
2454 	while (at) {
2455 		tlen += SCTP_BUF_LEN(at);
2456 		at = SCTP_BUF_NEXT(at);
2457 	}
2458 	return (tlen);
2459 }
2460 
2461 #if defined(SCTP_WITH_NO_CSUM)
2462 
2463 uint32_t
2464 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2465 {
2466 	/*
2467 	 * given a mbuf chain with a packetheader offset by 'offset'
2468 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2469 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2470 	 * has a side bonus as it will calculate the total length of the
2471 	 * mbuf chain. Note: if offset is greater than the total mbuf
2472 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2473 	 */
2474 	if (pktlen == NULL)
2475 		return (0);
2476 	*pktlen = sctp_calculate_len(m);
2477 	return (0);
2478 }
2479 
2480 #elif defined(SCTP_USE_INCHKSUM)
2481 
2482 #include <machine/in_cksum.h>
2483 
2484 uint32_t
2485 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2486 {
2487 	/*
2488 	 * given a mbuf chain with a packetheader offset by 'offset'
2489 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2490 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2491 	 * has a side bonus as it will calculate the total length of the
2492 	 * mbuf chain. Note: if offset is greater than the total mbuf
2493 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2494 	 */
2495 	int32_t tlen = 0;
2496 	struct mbuf *at;
2497 	uint32_t the_sum, retsum;
2498 
2499 	at = m;
2500 	while (at) {
2501 		tlen += SCTP_BUF_LEN(at);
2502 		at = SCTP_BUF_NEXT(at);
2503 	}
2504 	the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset));
2505 	if (pktlen != NULL)
2506 		*pktlen = (tlen - offset);
2507 	retsum = htons(the_sum);
2508 	return (the_sum);
2509 }
2510 
2511 #else
2512 
2513 uint32_t
2514 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2515 {
2516 	/*
2517 	 * given a mbuf chain with a packetheader offset by 'offset'
2518 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2519 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2520 	 * has a side bonus as it will calculate the total length of the
2521 	 * mbuf chain. Note: if offset is greater than the total mbuf
2522 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2523 	 */
2524 	int32_t tlen = 0;
2525 
2526 #ifdef SCTP_USE_ADLER32
2527 	uint32_t base = 1L;
2528 
2529 #else
2530 	uint32_t base = 0xffffffff;
2531 
2532 #endif
2533 	struct mbuf *at;
2534 
2535 	at = m;
2536 	/* find the correct mbuf and offset into mbuf */
2537 	while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
2538 		offset -= SCTP_BUF_LEN(at);	/* update remaining offset
2539 						 * left */
2540 		at = SCTP_BUF_NEXT(at);
2541 	}
2542 	while (at != NULL) {
2543 		if ((SCTP_BUF_LEN(at) - offset) > 0) {
2544 #ifdef SCTP_USE_ADLER32
2545 			base = update_adler32(base,
2546 			    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2547 			    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2548 #else
2549 			if ((SCTP_BUF_LEN(at) - offset) < 4) {
2550 				/* Use old method if less than 4 bytes */
2551 				base = old_update_crc32(base,
2552 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2553 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2554 			} else {
2555 				base = update_crc32(base,
2556 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2557 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2558 			}
2559 #endif
2560 			tlen += SCTP_BUF_LEN(at) - offset;
2561 			/* we only offset once into the first mbuf */
2562 		}
2563 		if (offset) {
2564 			if (offset < (uint32_t) SCTP_BUF_LEN(at))
2565 				offset = 0;
2566 			else
2567 				offset -= SCTP_BUF_LEN(at);
2568 		}
2569 		at = SCTP_BUF_NEXT(at);
2570 	}
2571 	if (pktlen != NULL) {
2572 		*pktlen = tlen;
2573 	}
2574 #ifdef SCTP_USE_ADLER32
2575 	/* Adler32 */
2576 	base = htonl(base);
2577 #else
2578 	/* CRC-32c */
2579 	base = sctp_csum_finalize(base);
2580 #endif
2581 	return (base);
2582 }
2583 
2584 
2585 #endif
2586 
2587 void
2588 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2589     struct sctp_association *asoc, uint32_t mtu)
2590 {
2591 	/*
2592 	 * Reset the P-MTU size on this association, this involves changing
2593 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2594 	 * allow the DF flag to be cleared.
2595 	 */
2596 	struct sctp_tmit_chunk *chk;
2597 	unsigned int eff_mtu, ovh;
2598 
2599 #ifdef SCTP_PRINT_FOR_B_AND_M
2600 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2601 	    inp, asoc, mtu);
2602 #endif
2603 	asoc->smallest_mtu = mtu;
2604 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2605 		ovh = SCTP_MIN_OVERHEAD;
2606 	} else {
2607 		ovh = SCTP_MIN_V4_OVERHEAD;
2608 	}
2609 	eff_mtu = mtu - ovh;
2610 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2611 
2612 		if (chk->send_size > eff_mtu) {
2613 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2614 		}
2615 	}
2616 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2617 		if (chk->send_size > eff_mtu) {
2618 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2619 		}
2620 	}
2621 }
2622 
2623 
2624 /*
2625  * given an association and starting time of the current RTT period return
2626  * RTO in number of msecs net should point to the current network
2627  */
2628 uint32_t
2629 sctp_calculate_rto(struct sctp_tcb *stcb,
2630     struct sctp_association *asoc,
2631     struct sctp_nets *net,
2632     struct timeval *told,
2633     int safe)
2634 {
2635 	/*-
2636 	 * given an association and the starting time of the current RTT
2637 	 * period (in value1/value2) return RTO in number of msecs.
2638 	 */
2639 	int calc_time = 0;
2640 	int o_calctime;
2641 	uint32_t new_rto = 0;
2642 	int first_measure = 0;
2643 	struct timeval now, then, *old;
2644 
2645 	/* Copy it out for sparc64 */
2646 	if (safe == sctp_align_unsafe_makecopy) {
2647 		old = &then;
2648 		memcpy(&then, told, sizeof(struct timeval));
2649 	} else if (safe == sctp_align_safe_nocopy) {
2650 		old = told;
2651 	} else {
2652 		/* error */
2653 		SCTP_PRINTF("Huh, bad rto calc call\n");
2654 		return (0);
2655 	}
2656 	/************************/
2657 	/* 1. calculate new RTT */
2658 	/************************/
2659 	/* get the current time */
2660 	(void)SCTP_GETTIME_TIMEVAL(&now);
2661 	/* compute the RTT value */
2662 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2663 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2664 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2665 			calc_time += (((u_long)now.tv_usec -
2666 			    (u_long)old->tv_usec) / 1000);
2667 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2668 			/* Borrow 1,000ms from current calculation */
2669 			calc_time -= 1000;
2670 			/* Add in the slop over */
2671 			calc_time += ((int)now.tv_usec / 1000);
2672 			/* Add in the pre-second ms's */
2673 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2674 		}
2675 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2676 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2677 			calc_time = ((u_long)now.tv_usec -
2678 			    (u_long)old->tv_usec) / 1000;
2679 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2680 			/* impossible .. garbage in nothing out */
2681 			goto calc_rto;
2682 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2683 			/*
2684 			 * We have to have 1 usec :-D this must be the
2685 			 * loopback.
2686 			 */
2687 			calc_time = 1;
2688 		} else {
2689 			/* impossible .. garbage in nothing out */
2690 			goto calc_rto;
2691 		}
2692 	} else {
2693 		/* Clock wrapped? */
2694 		goto calc_rto;
2695 	}
2696 	/***************************/
2697 	/* 2. update RTTVAR & SRTT */
2698 	/***************************/
2699 	o_calctime = calc_time;
2700 	/* this is Van Jacobson's integer version */
2701 	if (net->RTO_measured) {
2702 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2703 								 * shift=3 */
2704 		if (sctp_logging_level & SCTP_RTTVAR_LOGGING_ENABLE) {
2705 			rto_logging(net, SCTP_LOG_RTTVAR);
2706 		}
2707 		net->prev_rtt = o_calctime;
2708 		net->lastsa += calc_time;	/* add 7/8th into sa when
2709 						 * shift=3 */
2710 		if (calc_time < 0) {
2711 			calc_time = -calc_time;
2712 		}
2713 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2714 									 * VAR shift=2 */
2715 		net->lastsv += calc_time;
2716 		if (net->lastsv == 0) {
2717 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2718 		}
2719 	} else {
2720 		/* First RTO measurment */
2721 		net->RTO_measured = 1;
2722 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2723 								 * shift=3 */
2724 		net->lastsv = calc_time;
2725 		if (net->lastsv == 0) {
2726 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2727 		}
2728 		first_measure = 1;
2729 		net->prev_rtt = o_calctime;
2730 		if (sctp_logging_level & SCTP_RTTVAR_LOGGING_ENABLE) {
2731 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2732 		}
2733 	}
2734 calc_rto:
2735 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2736 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2737 	    (stcb->asoc.sat_network_lockout == 0)) {
2738 		stcb->asoc.sat_network = 1;
2739 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2740 		stcb->asoc.sat_network = 0;
2741 		stcb->asoc.sat_network_lockout = 1;
2742 	}
2743 	/* bound it, per C6/C7 in Section 5.3.1 */
2744 	if (new_rto < stcb->asoc.minrto) {
2745 		new_rto = stcb->asoc.minrto;
2746 	}
2747 	if (new_rto > stcb->asoc.maxrto) {
2748 		new_rto = stcb->asoc.maxrto;
2749 	}
2750 	/* we are now returning the RTO */
2751 	return (new_rto);
2752 }
2753 
2754 /*
2755  * return a pointer to a contiguous piece of data from the given mbuf chain
2756  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2757  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2758  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2759  */
2760 caddr_t
2761 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2762 {
2763 	uint32_t count;
2764 	uint8_t *ptr;
2765 
2766 	ptr = in_ptr;
2767 	if ((off < 0) || (len <= 0))
2768 		return (NULL);
2769 
2770 	/* find the desired start location */
2771 	while ((m != NULL) && (off > 0)) {
2772 		if (off < SCTP_BUF_LEN(m))
2773 			break;
2774 		off -= SCTP_BUF_LEN(m);
2775 		m = SCTP_BUF_NEXT(m);
2776 	}
2777 	if (m == NULL)
2778 		return (NULL);
2779 
2780 	/* is the current mbuf large enough (eg. contiguous)? */
2781 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2782 		return (mtod(m, caddr_t)+off);
2783 	} else {
2784 		/* else, it spans more than one mbuf, so save a temp copy... */
2785 		while ((m != NULL) && (len > 0)) {
2786 			count = min(SCTP_BUF_LEN(m) - off, len);
2787 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2788 			len -= count;
2789 			ptr += count;
2790 			off = 0;
2791 			m = SCTP_BUF_NEXT(m);
2792 		}
2793 		if ((m == NULL) && (len > 0))
2794 			return (NULL);
2795 		else
2796 			return ((caddr_t)in_ptr);
2797 	}
2798 }
2799 
2800 
2801 
2802 struct sctp_paramhdr *
2803 sctp_get_next_param(struct mbuf *m,
2804     int offset,
2805     struct sctp_paramhdr *pull,
2806     int pull_limit)
2807 {
2808 	/* This just provides a typed signature to Peter's Pull routine */
2809 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2810 	    (uint8_t *) pull));
2811 }
2812 
2813 
2814 int
2815 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2816 {
2817 	/*
2818 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2819 	 * padlen is > 3 this routine will fail.
2820 	 */
2821 	uint8_t *dp;
2822 	int i;
2823 
2824 	if (padlen > 3) {
2825 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2826 		return (ENOBUFS);
2827 	}
2828 	if (padlen <= M_TRAILINGSPACE(m)) {
2829 		/*
2830 		 * The easy way. We hope the majority of the time we hit
2831 		 * here :)
2832 		 */
2833 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2834 		SCTP_BUF_LEN(m) += padlen;
2835 	} else {
2836 		/* Hard way we must grow the mbuf */
2837 		struct mbuf *tmp;
2838 
2839 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2840 		if (tmp == NULL) {
2841 			/* Out of space GAK! we are in big trouble. */
2842 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2843 			return (ENOSPC);
2844 		}
2845 		/* setup and insert in middle */
2846 		SCTP_BUF_LEN(tmp) = padlen;
2847 		SCTP_BUF_NEXT(tmp) = NULL;
2848 		SCTP_BUF_NEXT(m) = tmp;
2849 		dp = mtod(tmp, uint8_t *);
2850 	}
2851 	/* zero out the pad */
2852 	for (i = 0; i < padlen; i++) {
2853 		*dp = 0;
2854 		dp++;
2855 	}
2856 	return (0);
2857 }
2858 
2859 int
2860 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2861 {
2862 	/* find the last mbuf in chain and pad it */
2863 	struct mbuf *m_at;
2864 
2865 	m_at = m;
2866 	if (last_mbuf) {
2867 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2868 	} else {
2869 		while (m_at) {
2870 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2871 				return (sctp_add_pad_tombuf(m_at, padval));
2872 			}
2873 			m_at = SCTP_BUF_NEXT(m_at);
2874 		}
2875 	}
2876 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2877 	return (EFAULT);
2878 }
2879 
2880 int sctp_asoc_change_wake = 0;
2881 
2882 static void
2883 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2884     uint32_t error, void *data, int so_locked
2885 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2886     SCTP_UNUSED
2887 #endif
2888 )
2889 {
2890 	struct mbuf *m_notify;
2891 	struct sctp_assoc_change *sac;
2892 	struct sctp_queued_to_read *control;
2893 
2894 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2895 	struct socket *so;
2896 
2897 #endif
2898 
2899 	/*
2900 	 * First if we are are going down dump everything we can to the
2901 	 * socket rcv queue.
2902 	 */
2903 
2904 	if ((stcb == NULL) ||
2905 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2906 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2907 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
2908 	    ) {
2909 		/* If the socket is gone we are out of here */
2910 		return;
2911 	}
2912 	/*
2913 	 * For TCP model AND UDP connected sockets we will send an error up
2914 	 * when an ABORT comes in.
2915 	 */
2916 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2917 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2918 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2919 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2920 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2921 			stcb->sctp_socket->so_error = ECONNREFUSED;
2922 		} else {
2923 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2924 			stcb->sctp_socket->so_error = ECONNRESET;
2925 		}
2926 		/* Wake ANY sleepers */
2927 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2928 		so = SCTP_INP_SO(stcb->sctp_ep);
2929 		if (!so_locked) {
2930 			atomic_add_int(&stcb->asoc.refcnt, 1);
2931 			SCTP_TCB_UNLOCK(stcb);
2932 			SCTP_SOCKET_LOCK(so, 1);
2933 			SCTP_TCB_LOCK(stcb);
2934 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2935 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2936 				SCTP_SOCKET_UNLOCK(so, 1);
2937 				return;
2938 			}
2939 		}
2940 #endif
2941 		sorwakeup(stcb->sctp_socket);
2942 		sowwakeup(stcb->sctp_socket);
2943 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2944 		if (!so_locked) {
2945 			SCTP_SOCKET_UNLOCK(so, 1);
2946 		}
2947 #endif
2948 		sctp_asoc_change_wake++;
2949 	}
2950 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2951 		/* event not enabled */
2952 		return;
2953 	}
2954 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2955 	if (m_notify == NULL)
2956 		/* no space left */
2957 		return;
2958 	SCTP_BUF_LEN(m_notify) = 0;
2959 
2960 	sac = mtod(m_notify, struct sctp_assoc_change *);
2961 	sac->sac_type = SCTP_ASSOC_CHANGE;
2962 	sac->sac_flags = 0;
2963 	sac->sac_length = sizeof(struct sctp_assoc_change);
2964 	sac->sac_state = event;
2965 	sac->sac_error = error;
2966 	/* XXX verify these stream counts */
2967 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2968 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2969 	sac->sac_assoc_id = sctp_get_associd(stcb);
2970 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2971 	SCTP_BUF_NEXT(m_notify) = NULL;
2972 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2973 	    0, 0, 0, 0, 0, 0,
2974 	    m_notify);
2975 	if (control == NULL) {
2976 		/* no memory */
2977 		sctp_m_freem(m_notify);
2978 		return;
2979 	}
2980 	control->length = SCTP_BUF_LEN(m_notify);
2981 	/* not that we need this */
2982 	control->tail_mbuf = m_notify;
2983 	control->spec_flags = M_NOTIFICATION;
2984 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2985 	    control,
2986 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
2987 	if (event == SCTP_COMM_LOST) {
2988 		/* Wake up any sleeper */
2989 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2990 		so = SCTP_INP_SO(stcb->sctp_ep);
2991 		if (!so_locked) {
2992 			atomic_add_int(&stcb->asoc.refcnt, 1);
2993 			SCTP_TCB_UNLOCK(stcb);
2994 			SCTP_SOCKET_LOCK(so, 1);
2995 			SCTP_TCB_LOCK(stcb);
2996 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2997 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2998 				SCTP_SOCKET_UNLOCK(so, 1);
2999 				return;
3000 			}
3001 		}
3002 #endif
3003 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3004 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3005 		if (!so_locked) {
3006 			SCTP_SOCKET_UNLOCK(so, 1);
3007 		}
3008 #endif
3009 	}
3010 }
3011 
3012 static void
3013 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3014     struct sockaddr *sa, uint32_t error)
3015 {
3016 	struct mbuf *m_notify;
3017 	struct sctp_paddr_change *spc;
3018 	struct sctp_queued_to_read *control;
3019 
3020 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)))
3021 		/* event not enabled */
3022 		return;
3023 
3024 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
3025 	if (m_notify == NULL)
3026 		return;
3027 	SCTP_BUF_LEN(m_notify) = 0;
3028 	spc = mtod(m_notify, struct sctp_paddr_change *);
3029 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3030 	spc->spc_flags = 0;
3031 	spc->spc_length = sizeof(struct sctp_paddr_change);
3032 	switch (sa->sa_family) {
3033 	case AF_INET:
3034 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3035 		break;
3036 #ifdef INET6
3037 	case AF_INET6:
3038 		{
3039 			struct sockaddr_in6 *sin6;
3040 
3041 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3042 
3043 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3044 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3045 				if (sin6->sin6_scope_id == 0) {
3046 					/* recover scope_id for user */
3047 					(void)sa6_recoverscope(sin6);
3048 				} else {
3049 					/* clear embedded scope_id for user */
3050 					in6_clearscope(&sin6->sin6_addr);
3051 				}
3052 			}
3053 			break;
3054 		}
3055 #endif
3056 	default:
3057 		/* TSNH */
3058 		break;
3059 	}
3060 	spc->spc_state = state;
3061 	spc->spc_error = error;
3062 	spc->spc_assoc_id = sctp_get_associd(stcb);
3063 
3064 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3065 	SCTP_BUF_NEXT(m_notify) = NULL;
3066 
3067 	/* append to socket */
3068 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3069 	    0, 0, 0, 0, 0, 0,
3070 	    m_notify);
3071 	if (control == NULL) {
3072 		/* no memory */
3073 		sctp_m_freem(m_notify);
3074 		return;
3075 	}
3076 	control->length = SCTP_BUF_LEN(m_notify);
3077 	control->spec_flags = M_NOTIFICATION;
3078 	/* not that we need this */
3079 	control->tail_mbuf = m_notify;
3080 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3081 	    control,
3082 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3083 }
3084 
3085 
3086 static void
3087 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
3088     struct sctp_tmit_chunk *chk, int so_locked
3089 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3090     SCTP_UNUSED
3091 #endif
3092 )
3093 {
3094 	struct mbuf *m_notify;
3095 	struct sctp_send_failed *ssf;
3096 	struct sctp_queued_to_read *control;
3097 	int length;
3098 
3099 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3100 		/* event not enabled */
3101 		return;
3102 
3103 	length = sizeof(struct sctp_send_failed) + chk->send_size;
3104 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3105 	if (m_notify == NULL)
3106 		/* no space left */
3107 		return;
3108 	SCTP_BUF_LEN(m_notify) = 0;
3109 	ssf = mtod(m_notify, struct sctp_send_failed *);
3110 	ssf->ssf_type = SCTP_SEND_FAILED;
3111 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3112 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3113 	else
3114 		ssf->ssf_flags = SCTP_DATA_SENT;
3115 	ssf->ssf_length = length;
3116 	ssf->ssf_error = error;
3117 	/* not exactly what the user sent in, but should be close :) */
3118 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3119 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3120 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3121 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3122 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3123 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3124 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3125 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3126 	SCTP_BUF_NEXT(m_notify) = chk->data;
3127 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3128 
3129 	/* Steal off the mbuf */
3130 	chk->data = NULL;
3131 	/*
3132 	 * For this case, we check the actual socket buffer, since the assoc
3133 	 * is going away we don't want to overfill the socket buffer for a
3134 	 * non-reader
3135 	 */
3136 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3137 		sctp_m_freem(m_notify);
3138 		return;
3139 	}
3140 	/* append to socket */
3141 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3142 	    0, 0, 0, 0, 0, 0,
3143 	    m_notify);
3144 	if (control == NULL) {
3145 		/* no memory */
3146 		sctp_m_freem(m_notify);
3147 		return;
3148 	}
3149 	control->spec_flags = M_NOTIFICATION;
3150 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3151 	    control,
3152 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3153 }
3154 
3155 
3156 static void
3157 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3158     struct sctp_stream_queue_pending *sp, int so_locked
3159 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3160     SCTP_UNUSED
3161 #endif
3162 )
3163 {
3164 	struct mbuf *m_notify;
3165 	struct sctp_send_failed *ssf;
3166 	struct sctp_queued_to_read *control;
3167 	int length;
3168 
3169 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3170 		/* event not enabled */
3171 		return;
3172 
3173 	length = sizeof(struct sctp_send_failed) + sp->length;
3174 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3175 	if (m_notify == NULL)
3176 		/* no space left */
3177 		return;
3178 	SCTP_BUF_LEN(m_notify) = 0;
3179 	ssf = mtod(m_notify, struct sctp_send_failed *);
3180 	ssf->ssf_type = SCTP_SEND_FAILED;
3181 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3182 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3183 	else
3184 		ssf->ssf_flags = SCTP_DATA_SENT;
3185 	ssf->ssf_length = length;
3186 	ssf->ssf_error = error;
3187 	/* not exactly what the user sent in, but should be close :) */
3188 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3189 	ssf->ssf_info.sinfo_stream = sp->stream;
3190 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3191 	ssf->ssf_info.sinfo_flags = sp->sinfo_flags;
3192 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3193 	ssf->ssf_info.sinfo_context = sp->context;
3194 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3195 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3196 	SCTP_BUF_NEXT(m_notify) = sp->data;
3197 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3198 
3199 	/* Steal off the mbuf */
3200 	sp->data = NULL;
3201 	/*
3202 	 * For this case, we check the actual socket buffer, since the assoc
3203 	 * is going away we don't want to overfill the socket buffer for a
3204 	 * non-reader
3205 	 */
3206 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3207 		sctp_m_freem(m_notify);
3208 		return;
3209 	}
3210 	/* append to socket */
3211 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3212 	    0, 0, 0, 0, 0, 0,
3213 	    m_notify);
3214 	if (control == NULL) {
3215 		/* no memory */
3216 		sctp_m_freem(m_notify);
3217 		return;
3218 	}
3219 	control->spec_flags = M_NOTIFICATION;
3220 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3221 	    control,
3222 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3223 }
3224 
3225 
3226 
3227 static void
3228 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3229     uint32_t error)
3230 {
3231 	struct mbuf *m_notify;
3232 	struct sctp_adaptation_event *sai;
3233 	struct sctp_queued_to_read *control;
3234 
3235 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)))
3236 		/* event not enabled */
3237 		return;
3238 
3239 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3240 	if (m_notify == NULL)
3241 		/* no space left */
3242 		return;
3243 	SCTP_BUF_LEN(m_notify) = 0;
3244 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3245 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3246 	sai->sai_flags = 0;
3247 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3248 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3249 	sai->sai_assoc_id = sctp_get_associd(stcb);
3250 
3251 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3252 	SCTP_BUF_NEXT(m_notify) = NULL;
3253 
3254 	/* append to socket */
3255 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3256 	    0, 0, 0, 0, 0, 0,
3257 	    m_notify);
3258 	if (control == NULL) {
3259 		/* no memory */
3260 		sctp_m_freem(m_notify);
3261 		return;
3262 	}
3263 	control->length = SCTP_BUF_LEN(m_notify);
3264 	control->spec_flags = M_NOTIFICATION;
3265 	/* not that we need this */
3266 	control->tail_mbuf = m_notify;
3267 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3268 	    control,
3269 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3270 }
3271 
3272 /* This always must be called with the read-queue LOCKED in the INP */
3273 void
3274 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3275     int nolock, uint32_t val)
3276 {
3277 	struct mbuf *m_notify;
3278 	struct sctp_pdapi_event *pdapi;
3279 	struct sctp_queued_to_read *control;
3280 	struct sockbuf *sb;
3281 
3282 	if ((stcb == NULL) || (stcb->sctp_socket == NULL) ||
3283 	    sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
3284 		/* event not enabled */
3285 		return;
3286 
3287 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3288 	if (m_notify == NULL)
3289 		/* no space left */
3290 		return;
3291 	SCTP_BUF_LEN(m_notify) = 0;
3292 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3293 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3294 	pdapi->pdapi_flags = 0;
3295 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3296 	pdapi->pdapi_indication = error;
3297 	pdapi->pdapi_stream = (val >> 16);
3298 	pdapi->pdapi_seq = (val & 0x0000ffff);
3299 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3300 
3301 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3302 	SCTP_BUF_NEXT(m_notify) = NULL;
3303 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3304 	    0, 0, 0, 0, 0, 0,
3305 	    m_notify);
3306 	if (control == NULL) {
3307 		/* no memory */
3308 		sctp_m_freem(m_notify);
3309 		return;
3310 	}
3311 	control->spec_flags = M_NOTIFICATION;
3312 	control->length = SCTP_BUF_LEN(m_notify);
3313 	/* not that we need this */
3314 	control->tail_mbuf = m_notify;
3315 	control->held_length = 0;
3316 	control->length = 0;
3317 	if (nolock == 0) {
3318 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
3319 	}
3320 	sb = &stcb->sctp_socket->so_rcv;
3321 	if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
3322 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3323 	}
3324 	sctp_sballoc(stcb, sb, m_notify);
3325 	if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
3326 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3327 	}
3328 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3329 	control->end_added = 1;
3330 	if (stcb->asoc.control_pdapi)
3331 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3332 	else {
3333 		/* we really should not see this case */
3334 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3335 	}
3336 	if (nolock == 0) {
3337 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3338 	}
3339 	if (stcb->sctp_ep && stcb->sctp_socket) {
3340 		/* This should always be the case */
3341 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3342 	}
3343 }
3344 
3345 static void
3346 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3347 {
3348 	struct mbuf *m_notify;
3349 	struct sctp_shutdown_event *sse;
3350 	struct sctp_queued_to_read *control;
3351 
3352 	/*
3353 	 * For TCP model AND UDP connected sockets we will send an error up
3354 	 * when an SHUTDOWN completes
3355 	 */
3356 	if (stcb == NULL) {
3357 		return;
3358 	}
3359 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3360 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3361 		/* mark socket closed for read/write and wakeup! */
3362 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3363 		struct socket *so;
3364 
3365 		so = SCTP_INP_SO(stcb->sctp_ep);
3366 		atomic_add_int(&stcb->asoc.refcnt, 1);
3367 		SCTP_TCB_UNLOCK(stcb);
3368 		SCTP_SOCKET_LOCK(so, 1);
3369 		SCTP_TCB_LOCK(stcb);
3370 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3371 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3372 			SCTP_SOCKET_UNLOCK(so, 1);
3373 			return;
3374 		}
3375 #endif
3376 		socantsendmore(stcb->sctp_socket);
3377 		socantrcvmore(stcb->sctp_socket);
3378 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3379 		SCTP_SOCKET_UNLOCK(so, 1);
3380 #endif
3381 	}
3382 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
3383 		/* event not enabled */
3384 		return;
3385 
3386 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3387 	if (m_notify == NULL)
3388 		/* no space left */
3389 		return;
3390 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3391 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3392 	sse->sse_flags = 0;
3393 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3394 	sse->sse_assoc_id = sctp_get_associd(stcb);
3395 
3396 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3397 	SCTP_BUF_NEXT(m_notify) = NULL;
3398 
3399 	/* append to socket */
3400 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3401 	    0, 0, 0, 0, 0, 0,
3402 	    m_notify);
3403 	if (control == NULL) {
3404 		/* no memory */
3405 		sctp_m_freem(m_notify);
3406 		return;
3407 	}
3408 	control->spec_flags = M_NOTIFICATION;
3409 	control->length = SCTP_BUF_LEN(m_notify);
3410 	/* not that we need this */
3411 	control->tail_mbuf = m_notify;
3412 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3413 	    control,
3414 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3415 }
3416 
3417 static void
3418 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3419     int number_entries, uint16_t * list, int flag)
3420 {
3421 	struct mbuf *m_notify;
3422 	struct sctp_queued_to_read *control;
3423 	struct sctp_stream_reset_event *strreset;
3424 	int len;
3425 
3426 	if (stcb == NULL) {
3427 		return;
3428 	}
3429 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
3430 		/* event not enabled */
3431 		return;
3432 
3433 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3434 	if (m_notify == NULL)
3435 		/* no space left */
3436 		return;
3437 	SCTP_BUF_LEN(m_notify) = 0;
3438 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3439 	if (len > M_TRAILINGSPACE(m_notify)) {
3440 		/* never enough room */
3441 		sctp_m_freem(m_notify);
3442 		return;
3443 	}
3444 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3445 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3446 	if (number_entries == 0) {
3447 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3448 	} else {
3449 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3450 	}
3451 	strreset->strreset_length = len;
3452 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3453 	if (number_entries) {
3454 		int i;
3455 
3456 		for (i = 0; i < number_entries; i++) {
3457 			strreset->strreset_list[i] = ntohs(list[i]);
3458 		}
3459 	}
3460 	SCTP_BUF_LEN(m_notify) = len;
3461 	SCTP_BUF_NEXT(m_notify) = NULL;
3462 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3463 		/* no space */
3464 		sctp_m_freem(m_notify);
3465 		return;
3466 	}
3467 	/* append to socket */
3468 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3469 	    0, 0, 0, 0, 0, 0,
3470 	    m_notify);
3471 	if (control == NULL) {
3472 		/* no memory */
3473 		sctp_m_freem(m_notify);
3474 		return;
3475 	}
3476 	control->spec_flags = M_NOTIFICATION;
3477 	control->length = SCTP_BUF_LEN(m_notify);
3478 	/* not that we need this */
3479 	control->tail_mbuf = m_notify;
3480 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3481 	    control,
3482 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3483 }
3484 
3485 
3486 void
3487 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3488     uint32_t error, void *data, int so_locked
3489 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3490     SCTP_UNUSED
3491 #endif
3492 )
3493 {
3494 	if (stcb == NULL) {
3495 		/* unlikely but */
3496 		return;
3497 	}
3498 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3499 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3500 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
3501 	    ) {
3502 		/* No notifications up when we are in a no socket state */
3503 		return;
3504 	}
3505 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3506 		/* Can't send up to a closed socket any notifications */
3507 		return;
3508 	}
3509 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3510 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3511 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3512 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3513 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3514 			/* Don't report these in front states */
3515 			return;
3516 		}
3517 	}
3518 	switch (notification) {
3519 	case SCTP_NOTIFY_ASSOC_UP:
3520 		if (stcb->asoc.assoc_up_sent == 0) {
3521 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3522 			stcb->asoc.assoc_up_sent = 1;
3523 		}
3524 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3525 			sctp_notify_adaptation_layer(stcb, error);
3526 		}
3527 		break;
3528 	case SCTP_NOTIFY_ASSOC_DOWN:
3529 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3530 		break;
3531 	case SCTP_NOTIFY_INTERFACE_DOWN:
3532 		{
3533 			struct sctp_nets *net;
3534 
3535 			net = (struct sctp_nets *)data;
3536 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3537 			    (struct sockaddr *)&net->ro._l_addr, error);
3538 			break;
3539 		}
3540 	case SCTP_NOTIFY_INTERFACE_UP:
3541 		{
3542 			struct sctp_nets *net;
3543 
3544 			net = (struct sctp_nets *)data;
3545 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3546 			    (struct sockaddr *)&net->ro._l_addr, error);
3547 			break;
3548 		}
3549 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3550 		{
3551 			struct sctp_nets *net;
3552 
3553 			net = (struct sctp_nets *)data;
3554 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3555 			    (struct sockaddr *)&net->ro._l_addr, error);
3556 			break;
3557 		}
3558 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3559 		sctp_notify_send_failed2(stcb, error,
3560 		    (struct sctp_stream_queue_pending *)data, so_locked);
3561 		break;
3562 	case SCTP_NOTIFY_DG_FAIL:
3563 		sctp_notify_send_failed(stcb, error,
3564 		    (struct sctp_tmit_chunk *)data, so_locked);
3565 		break;
3566 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3567 		{
3568 			uint32_t val;
3569 
3570 			val = *((uint32_t *) data);
3571 
3572 			sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3573 		}
3574 		break;
3575 	case SCTP_NOTIFY_STRDATA_ERR:
3576 		break;
3577 	case SCTP_NOTIFY_ASSOC_ABORTED:
3578 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3579 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3580 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3581 		} else {
3582 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3583 		}
3584 		break;
3585 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3586 		break;
3587 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3588 		break;
3589 	case SCTP_NOTIFY_ASSOC_RESTART:
3590 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3591 		break;
3592 	case SCTP_NOTIFY_HB_RESP:
3593 		break;
3594 	case SCTP_NOTIFY_STR_RESET_SEND:
3595 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3596 		break;
3597 	case SCTP_NOTIFY_STR_RESET_RECV:
3598 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3599 		break;
3600 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3601 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3602 		break;
3603 
3604 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3605 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3606 		break;
3607 
3608 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3609 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3610 		    error);
3611 		break;
3612 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3613 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3614 		    error);
3615 		break;
3616 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3617 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3618 		    error);
3619 		break;
3620 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3621 		break;
3622 	case SCTP_NOTIFY_ASCONF_FAILED:
3623 		break;
3624 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3625 		sctp_notify_shutdown_event(stcb);
3626 		break;
3627 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3628 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3629 		    (uint16_t) (uintptr_t) data);
3630 		break;
3631 #if 0
3632 	case SCTP_NOTIFY_AUTH_KEY_CONFLICT:
3633 		sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT,
3634 		    error, (uint16_t) (uintptr_t) data);
3635 		break;
3636 #endif				/* not yet? remove? */
3637 
3638 
3639 	default:
3640 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3641 		    __FUNCTION__, notification, notification);
3642 		break;
3643 	}			/* end switch */
3644 }
3645 
3646 void
3647 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3648 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3649     SCTP_UNUSED
3650 #endif
3651 )
3652 {
3653 	struct sctp_association *asoc;
3654 	struct sctp_stream_out *outs;
3655 	struct sctp_tmit_chunk *chk;
3656 	struct sctp_stream_queue_pending *sp;
3657 	int i;
3658 
3659 	asoc = &stcb->asoc;
3660 
3661 	if (stcb == NULL) {
3662 		return;
3663 	}
3664 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3665 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3666 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3667 		return;
3668 	}
3669 	/* now through all the gunk freeing chunks */
3670 	if (holds_lock == 0) {
3671 		SCTP_TCB_SEND_LOCK(stcb);
3672 	}
3673 	/* sent queue SHOULD be empty */
3674 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3675 		chk = TAILQ_FIRST(&asoc->sent_queue);
3676 		while (chk) {
3677 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3678 			asoc->sent_queue_cnt--;
3679 			if (chk->data) {
3680 				/*
3681 				 * trim off the sctp chunk header(it should
3682 				 * be there)
3683 				 */
3684 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3685 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3686 					sctp_mbuf_crush(chk->data);
3687 					chk->send_size -= sizeof(struct sctp_data_chunk);
3688 				}
3689 			}
3690 			sctp_free_bufspace(stcb, asoc, chk, 1);
3691 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3692 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3693 			if (chk->data) {
3694 				sctp_m_freem(chk->data);
3695 				chk->data = NULL;
3696 			}
3697 			sctp_free_a_chunk(stcb, chk);
3698 			/* sa_ignore FREED_MEMORY */
3699 			chk = TAILQ_FIRST(&asoc->sent_queue);
3700 		}
3701 	}
3702 	/* pending send queue SHOULD be empty */
3703 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3704 		chk = TAILQ_FIRST(&asoc->send_queue);
3705 		while (chk) {
3706 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3707 			asoc->send_queue_cnt--;
3708 			if (chk->data) {
3709 				/*
3710 				 * trim off the sctp chunk header(it should
3711 				 * be there)
3712 				 */
3713 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3714 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3715 					sctp_mbuf_crush(chk->data);
3716 					chk->send_size -= sizeof(struct sctp_data_chunk);
3717 				}
3718 			}
3719 			sctp_free_bufspace(stcb, asoc, chk, 1);
3720 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3721 			if (chk->data) {
3722 				sctp_m_freem(chk->data);
3723 				chk->data = NULL;
3724 			}
3725 			sctp_free_a_chunk(stcb, chk);
3726 			/* sa_ignore FREED_MEMORY */
3727 			chk = TAILQ_FIRST(&asoc->send_queue);
3728 		}
3729 	}
3730 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3731 		/* For each stream */
3732 		outs = &stcb->asoc.strmout[i];
3733 		/* clean up any sends there */
3734 		stcb->asoc.locked_on_sending = NULL;
3735 		sp = TAILQ_FIRST(&outs->outqueue);
3736 		while (sp) {
3737 			stcb->asoc.stream_queue_cnt--;
3738 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3739 			sctp_free_spbufspace(stcb, asoc, sp);
3740 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3741 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3742 			if (sp->data) {
3743 				sctp_m_freem(sp->data);
3744 				sp->data = NULL;
3745 			}
3746 			if (sp->net)
3747 				sctp_free_remote_addr(sp->net);
3748 			sp->net = NULL;
3749 			/* Free the chunk */
3750 			sctp_free_a_strmoq(stcb, sp);
3751 			/* sa_ignore FREED_MEMORY */
3752 			sp = TAILQ_FIRST(&outs->outqueue);
3753 		}
3754 	}
3755 
3756 	if (holds_lock == 0) {
3757 		SCTP_TCB_SEND_UNLOCK(stcb);
3758 	}
3759 }
3760 
3761 void
3762 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3763 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3764     SCTP_UNUSED
3765 #endif
3766 )
3767 {
3768 
3769 	if (stcb == NULL) {
3770 		return;
3771 	}
3772 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3773 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3774 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3775 		return;
3776 	}
3777 	/* Tell them we lost the asoc */
3778 	sctp_report_all_outbound(stcb, 1, so_locked);
3779 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3780 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3781 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3782 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3783 	}
3784 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3785 }
3786 
3787 void
3788 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3789     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3790     uint32_t vrf_id)
3791 {
3792 	uint32_t vtag;
3793 
3794 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3795 	struct socket *so;
3796 
3797 #endif
3798 
3799 	vtag = 0;
3800 	if (stcb != NULL) {
3801 		/* We have a TCB to abort, send notification too */
3802 		vtag = stcb->asoc.peer_vtag;
3803 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3804 		/* get the assoc vrf id and table id */
3805 		vrf_id = stcb->asoc.vrf_id;
3806 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3807 	}
3808 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id);
3809 	if (stcb != NULL) {
3810 		/* Ok, now lets free it */
3811 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3812 		so = SCTP_INP_SO(inp);
3813 		atomic_add_int(&stcb->asoc.refcnt, 1);
3814 		SCTP_TCB_UNLOCK(stcb);
3815 		SCTP_SOCKET_LOCK(so, 1);
3816 		SCTP_TCB_LOCK(stcb);
3817 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3818 #endif
3819 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3820 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3821 		SCTP_SOCKET_UNLOCK(so, 1);
3822 #endif
3823 	} else {
3824 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3825 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3826 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3827 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3828 			}
3829 		}
3830 	}
3831 }
3832 
3833 #ifdef SCTP_ASOCLOG_OF_TSNS
3834 void
3835 sctp_print_out_track_log(struct sctp_tcb *stcb)
3836 {
3837 #ifdef NOSIY_PRINTS
3838 	int i;
3839 
3840 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3841 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3842 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3843 		SCTP_PRINTF("None rcvd\n");
3844 		goto none_in;
3845 	}
3846 	if (stcb->asoc.tsn_in_wrapped) {
3847 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3848 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3849 			    stcb->asoc.in_tsnlog[i].tsn,
3850 			    stcb->asoc.in_tsnlog[i].strm,
3851 			    stcb->asoc.in_tsnlog[i].seq,
3852 			    stcb->asoc.in_tsnlog[i].flgs,
3853 			    stcb->asoc.in_tsnlog[i].sz);
3854 		}
3855 	}
3856 	if (stcb->asoc.tsn_in_at) {
3857 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3858 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3859 			    stcb->asoc.in_tsnlog[i].tsn,
3860 			    stcb->asoc.in_tsnlog[i].strm,
3861 			    stcb->asoc.in_tsnlog[i].seq,
3862 			    stcb->asoc.in_tsnlog[i].flgs,
3863 			    stcb->asoc.in_tsnlog[i].sz);
3864 		}
3865 	}
3866 none_in:
3867 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3868 	if ((stcb->asoc.tsn_out_at == 0) &&
3869 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3870 		SCTP_PRINTF("None sent\n");
3871 	}
3872 	if (stcb->asoc.tsn_out_wrapped) {
3873 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3874 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3875 			    stcb->asoc.out_tsnlog[i].tsn,
3876 			    stcb->asoc.out_tsnlog[i].strm,
3877 			    stcb->asoc.out_tsnlog[i].seq,
3878 			    stcb->asoc.out_tsnlog[i].flgs,
3879 			    stcb->asoc.out_tsnlog[i].sz);
3880 		}
3881 	}
3882 	if (stcb->asoc.tsn_out_at) {
3883 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3884 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3885 			    stcb->asoc.out_tsnlog[i].tsn,
3886 			    stcb->asoc.out_tsnlog[i].strm,
3887 			    stcb->asoc.out_tsnlog[i].seq,
3888 			    stcb->asoc.out_tsnlog[i].flgs,
3889 			    stcb->asoc.out_tsnlog[i].sz);
3890 		}
3891 	}
3892 #endif
3893 }
3894 
3895 #endif
3896 
3897 void
3898 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3899     int error, struct mbuf *op_err,
3900     int so_locked
3901 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3902     SCTP_UNUSED
3903 #endif
3904 )
3905 {
3906 	uint32_t vtag;
3907 
3908 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3909 	struct socket *so;
3910 
3911 #endif
3912 
3913 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3914 	so = SCTP_INP_SO(inp);
3915 #endif
3916 	if (stcb == NULL) {
3917 		/* Got to have a TCB */
3918 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3919 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3920 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3921 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3922 			}
3923 		}
3924 		return;
3925 	} else {
3926 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3927 	}
3928 	vtag = stcb->asoc.peer_vtag;
3929 	/* notify the ulp */
3930 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3931 		sctp_abort_notification(stcb, error, so_locked);
3932 	/* notify the peer */
3933 #if defined(SCTP_PANIC_ON_ABORT)
3934 	panic("aborting an association");
3935 #endif
3936 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3937 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3938 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3939 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3940 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3941 	}
3942 	/* now free the asoc */
3943 #ifdef SCTP_ASOCLOG_OF_TSNS
3944 	sctp_print_out_track_log(stcb);
3945 #endif
3946 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3947 	if (!so_locked) {
3948 		atomic_add_int(&stcb->asoc.refcnt, 1);
3949 		SCTP_TCB_UNLOCK(stcb);
3950 		SCTP_SOCKET_LOCK(so, 1);
3951 		SCTP_TCB_LOCK(stcb);
3952 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3953 	}
3954 #endif
3955 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3956 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3957 	if (!so_locked) {
3958 		SCTP_SOCKET_UNLOCK(so, 1);
3959 	}
3960 #endif
3961 }
3962 
3963 void
3964 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3965     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id)
3966 {
3967 	struct sctp_chunkhdr *ch, chunk_buf;
3968 	unsigned int chk_length;
3969 
3970 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3971 	/* Generate a TO address for future reference */
3972 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3973 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3974 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3975 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3976 		}
3977 	}
3978 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3979 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3980 	while (ch != NULL) {
3981 		chk_length = ntohs(ch->chunk_length);
3982 		if (chk_length < sizeof(*ch)) {
3983 			/* break to abort land */
3984 			break;
3985 		}
3986 		switch (ch->chunk_type) {
3987 		case SCTP_COOKIE_ECHO:
3988 			/* We hit here only if the assoc is being freed */
3989 			return;
3990 		case SCTP_PACKET_DROPPED:
3991 			/* we don't respond to pkt-dropped */
3992 			return;
3993 		case SCTP_ABORT_ASSOCIATION:
3994 			/* we don't respond with an ABORT to an ABORT */
3995 			return;
3996 		case SCTP_SHUTDOWN_COMPLETE:
3997 			/*
3998 			 * we ignore it since we are not waiting for it and
3999 			 * peer is gone
4000 			 */
4001 			return;
4002 		case SCTP_SHUTDOWN_ACK:
4003 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id);
4004 			return;
4005 		default:
4006 			break;
4007 		}
4008 		offset += SCTP_SIZE32(chk_length);
4009 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4010 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4011 	}
4012 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id);
4013 }
4014 
4015 /*
4016  * check the inbound datagram to make sure there is not an abort inside it,
4017  * if there is return 1, else return 0.
4018  */
4019 int
4020 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4021 {
4022 	struct sctp_chunkhdr *ch;
4023 	struct sctp_init_chunk *init_chk, chunk_buf;
4024 	int offset;
4025 	unsigned int chk_length;
4026 
4027 	offset = iphlen + sizeof(struct sctphdr);
4028 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4029 	    (uint8_t *) & chunk_buf);
4030 	while (ch != NULL) {
4031 		chk_length = ntohs(ch->chunk_length);
4032 		if (chk_length < sizeof(*ch)) {
4033 			/* packet is probably corrupt */
4034 			break;
4035 		}
4036 		/* we seem to be ok, is it an abort? */
4037 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4038 			/* yep, tell them */
4039 			return (1);
4040 		}
4041 		if (ch->chunk_type == SCTP_INITIATION) {
4042 			/* need to update the Vtag */
4043 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4044 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4045 			if (init_chk != NULL) {
4046 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4047 			}
4048 		}
4049 		/* Nope, move to the next chunk */
4050 		offset += SCTP_SIZE32(chk_length);
4051 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4052 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4053 	}
4054 	return (0);
4055 }
4056 
4057 /*
4058  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4059  * set (i.e. it's 0) so, create this function to compare link local scopes
4060  */
4061 #ifdef INET6
4062 uint32_t
4063 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4064 {
4065 	struct sockaddr_in6 a, b;
4066 
4067 	/* save copies */
4068 	a = *addr1;
4069 	b = *addr2;
4070 
4071 	if (a.sin6_scope_id == 0)
4072 		if (sa6_recoverscope(&a)) {
4073 			/* can't get scope, so can't match */
4074 			return (0);
4075 		}
4076 	if (b.sin6_scope_id == 0)
4077 		if (sa6_recoverscope(&b)) {
4078 			/* can't get scope, so can't match */
4079 			return (0);
4080 		}
4081 	if (a.sin6_scope_id != b.sin6_scope_id)
4082 		return (0);
4083 
4084 	return (1);
4085 }
4086 
4087 /*
4088  * returns a sockaddr_in6 with embedded scope recovered and removed
4089  */
4090 struct sockaddr_in6 *
4091 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4092 {
4093 	/* check and strip embedded scope junk */
4094 	if (addr->sin6_family == AF_INET6) {
4095 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4096 			if (addr->sin6_scope_id == 0) {
4097 				*store = *addr;
4098 				if (!sa6_recoverscope(store)) {
4099 					/* use the recovered scope */
4100 					addr = store;
4101 				}
4102 			} else {
4103 				/* else, return the original "to" addr */
4104 				in6_clearscope(&addr->sin6_addr);
4105 			}
4106 		}
4107 	}
4108 	return (addr);
4109 }
4110 
4111 #endif
4112 
4113 /*
4114  * are the two addresses the same?  currently a "scopeless" check returns: 1
4115  * if same, 0 if not
4116  */
4117 int
4118 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4119 {
4120 
4121 	/* must be valid */
4122 	if (sa1 == NULL || sa2 == NULL)
4123 		return (0);
4124 
4125 	/* must be the same family */
4126 	if (sa1->sa_family != sa2->sa_family)
4127 		return (0);
4128 
4129 	switch (sa1->sa_family) {
4130 #ifdef INET6
4131 	case AF_INET6:
4132 		{
4133 			/* IPv6 addresses */
4134 			struct sockaddr_in6 *sin6_1, *sin6_2;
4135 
4136 			sin6_1 = (struct sockaddr_in6 *)sa1;
4137 			sin6_2 = (struct sockaddr_in6 *)sa2;
4138 			return (SCTP6_ARE_ADDR_EQUAL(&sin6_1->sin6_addr,
4139 			    &sin6_2->sin6_addr));
4140 		}
4141 #endif
4142 	case AF_INET:
4143 		{
4144 			/* IPv4 addresses */
4145 			struct sockaddr_in *sin_1, *sin_2;
4146 
4147 			sin_1 = (struct sockaddr_in *)sa1;
4148 			sin_2 = (struct sockaddr_in *)sa2;
4149 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4150 		}
4151 	default:
4152 		/* we don't do these... */
4153 		return (0);
4154 	}
4155 }
4156 
4157 void
4158 sctp_print_address(struct sockaddr *sa)
4159 {
4160 #ifdef INET6
4161 	char ip6buf[INET6_ADDRSTRLEN];
4162 
4163 	ip6buf[0] = 0;
4164 #endif
4165 
4166 	switch (sa->sa_family) {
4167 #ifdef INET6
4168 	case AF_INET6:
4169 		{
4170 			struct sockaddr_in6 *sin6;
4171 
4172 			sin6 = (struct sockaddr_in6 *)sa;
4173 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4174 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4175 			    ntohs(sin6->sin6_port),
4176 			    sin6->sin6_scope_id);
4177 			break;
4178 		}
4179 #endif
4180 	case AF_INET:
4181 		{
4182 			struct sockaddr_in *sin;
4183 			unsigned char *p;
4184 
4185 			sin = (struct sockaddr_in *)sa;
4186 			p = (unsigned char *)&sin->sin_addr;
4187 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4188 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4189 			break;
4190 		}
4191 	default:
4192 		SCTP_PRINTF("?\n");
4193 		break;
4194 	}
4195 }
4196 
4197 void
4198 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4199 {
4200 	switch (iph->ip_v) {
4201 		case IPVERSION:
4202 		{
4203 			struct sockaddr_in lsa, fsa;
4204 
4205 			bzero(&lsa, sizeof(lsa));
4206 			lsa.sin_len = sizeof(lsa);
4207 			lsa.sin_family = AF_INET;
4208 			lsa.sin_addr = iph->ip_src;
4209 			lsa.sin_port = sh->src_port;
4210 			bzero(&fsa, sizeof(fsa));
4211 			fsa.sin_len = sizeof(fsa);
4212 			fsa.sin_family = AF_INET;
4213 			fsa.sin_addr = iph->ip_dst;
4214 			fsa.sin_port = sh->dest_port;
4215 			SCTP_PRINTF("src: ");
4216 			sctp_print_address((struct sockaddr *)&lsa);
4217 			SCTP_PRINTF("dest: ");
4218 			sctp_print_address((struct sockaddr *)&fsa);
4219 			break;
4220 		}
4221 #ifdef INET6
4222 	case IPV6_VERSION >> 4:
4223 		{
4224 			struct ip6_hdr *ip6;
4225 			struct sockaddr_in6 lsa6, fsa6;
4226 
4227 			ip6 = (struct ip6_hdr *)iph;
4228 			bzero(&lsa6, sizeof(lsa6));
4229 			lsa6.sin6_len = sizeof(lsa6);
4230 			lsa6.sin6_family = AF_INET6;
4231 			lsa6.sin6_addr = ip6->ip6_src;
4232 			lsa6.sin6_port = sh->src_port;
4233 			bzero(&fsa6, sizeof(fsa6));
4234 			fsa6.sin6_len = sizeof(fsa6);
4235 			fsa6.sin6_family = AF_INET6;
4236 			fsa6.sin6_addr = ip6->ip6_dst;
4237 			fsa6.sin6_port = sh->dest_port;
4238 			SCTP_PRINTF("src: ");
4239 			sctp_print_address((struct sockaddr *)&lsa6);
4240 			SCTP_PRINTF("dest: ");
4241 			sctp_print_address((struct sockaddr *)&fsa6);
4242 			break;
4243 		}
4244 #endif
4245 	default:
4246 		/* TSNH */
4247 		break;
4248 	}
4249 }
4250 
4251 void
4252 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4253     struct sctp_inpcb *new_inp,
4254     struct sctp_tcb *stcb,
4255     int waitflags)
4256 {
4257 	/*
4258 	 * go through our old INP and pull off any control structures that
4259 	 * belong to stcb and move then to the new inp.
4260 	 */
4261 	struct socket *old_so, *new_so;
4262 	struct sctp_queued_to_read *control, *nctl;
4263 	struct sctp_readhead tmp_queue;
4264 	struct mbuf *m;
4265 	int error = 0;
4266 
4267 	old_so = old_inp->sctp_socket;
4268 	new_so = new_inp->sctp_socket;
4269 	TAILQ_INIT(&tmp_queue);
4270 	error = sblock(&old_so->so_rcv, waitflags);
4271 	if (error) {
4272 		/*
4273 		 * Gak, can't get sblock, we have a problem. data will be
4274 		 * left stranded.. and we don't dare look at it since the
4275 		 * other thread may be reading something. Oh well, its a
4276 		 * screwed up app that does a peeloff OR a accept while
4277 		 * reading from the main socket... actually its only the
4278 		 * peeloff() case, since I think read will fail on a
4279 		 * listening socket..
4280 		 */
4281 		return;
4282 	}
4283 	/* lock the socket buffers */
4284 	SCTP_INP_READ_LOCK(old_inp);
4285 	control = TAILQ_FIRST(&old_inp->read_queue);
4286 	/* Pull off all for out target stcb */
4287 	while (control) {
4288 		nctl = TAILQ_NEXT(control, next);
4289 		if (control->stcb == stcb) {
4290 			/* remove it we want it */
4291 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4292 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4293 			m = control->data;
4294 			while (m) {
4295 				if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4296 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4297 				}
4298 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4299 				if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4300 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4301 				}
4302 				m = SCTP_BUF_NEXT(m);
4303 			}
4304 		}
4305 		control = nctl;
4306 	}
4307 	SCTP_INP_READ_UNLOCK(old_inp);
4308 	/* Remove the sb-lock on the old socket */
4309 
4310 	sbunlock(&old_so->so_rcv);
4311 	/* Now we move them over to the new socket buffer */
4312 	control = TAILQ_FIRST(&tmp_queue);
4313 	SCTP_INP_READ_LOCK(new_inp);
4314 	while (control) {
4315 		nctl = TAILQ_NEXT(control, next);
4316 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4317 		m = control->data;
4318 		while (m) {
4319 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4320 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4321 			}
4322 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4323 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4324 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4325 			}
4326 			m = SCTP_BUF_NEXT(m);
4327 		}
4328 		control = nctl;
4329 	}
4330 	SCTP_INP_READ_UNLOCK(new_inp);
4331 }
4332 
4333 
4334 void
4335 sctp_add_to_readq(struct sctp_inpcb *inp,
4336     struct sctp_tcb *stcb,
4337     struct sctp_queued_to_read *control,
4338     struct sockbuf *sb,
4339     int end,
4340     int so_locked
4341 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4342     SCTP_UNUSED
4343 #endif
4344 )
4345 {
4346 	/*
4347 	 * Here we must place the control on the end of the socket read
4348 	 * queue AND increment sb_cc so that select will work properly on
4349 	 * read.
4350 	 */
4351 	struct mbuf *m, *prev = NULL;
4352 
4353 	if (inp == NULL) {
4354 		/* Gak, TSNH!! */
4355 #ifdef INVARIANTS
4356 		panic("Gak, inp NULL on add_to_readq");
4357 #endif
4358 		return;
4359 	}
4360 	SCTP_INP_READ_LOCK(inp);
4361 	if (!(control->spec_flags & M_NOTIFICATION)) {
4362 		atomic_add_int(&inp->total_recvs, 1);
4363 		if (!control->do_not_ref_stcb) {
4364 			atomic_add_int(&stcb->total_recvs, 1);
4365 		}
4366 	}
4367 	m = control->data;
4368 	control->held_length = 0;
4369 	control->length = 0;
4370 	while (m) {
4371 		if (SCTP_BUF_LEN(m) == 0) {
4372 			/* Skip mbufs with NO length */
4373 			if (prev == NULL) {
4374 				/* First one */
4375 				control->data = sctp_m_free(m);
4376 				m = control->data;
4377 			} else {
4378 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4379 				m = SCTP_BUF_NEXT(prev);
4380 			}
4381 			if (m == NULL) {
4382 				control->tail_mbuf = prev;;
4383 			}
4384 			continue;
4385 		}
4386 		prev = m;
4387 		if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4388 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4389 		}
4390 		sctp_sballoc(stcb, sb, m);
4391 		if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4392 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4393 		}
4394 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4395 		m = SCTP_BUF_NEXT(m);
4396 	}
4397 	if (prev != NULL) {
4398 		control->tail_mbuf = prev;
4399 	} else {
4400 		/* Everything got collapsed out?? */
4401 		return;
4402 	}
4403 	if (end) {
4404 		control->end_added = 1;
4405 	}
4406 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4407 	SCTP_INP_READ_UNLOCK(inp);
4408 	if (inp && inp->sctp_socket) {
4409 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4410 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4411 		} else {
4412 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4413 			struct socket *so;
4414 
4415 			so = SCTP_INP_SO(inp);
4416 			if (!so_locked) {
4417 				atomic_add_int(&stcb->asoc.refcnt, 1);
4418 				SCTP_TCB_UNLOCK(stcb);
4419 				SCTP_SOCKET_LOCK(so, 1);
4420 				SCTP_TCB_LOCK(stcb);
4421 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4422 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4423 					SCTP_SOCKET_UNLOCK(so, 1);
4424 					return;
4425 				}
4426 			}
4427 #endif
4428 			sctp_sorwakeup(inp, inp->sctp_socket);
4429 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4430 			if (!so_locked) {
4431 				SCTP_SOCKET_UNLOCK(so, 1);
4432 			}
4433 #endif
4434 		}
4435 	}
4436 }
4437 
4438 
4439 int
4440 sctp_append_to_readq(struct sctp_inpcb *inp,
4441     struct sctp_tcb *stcb,
4442     struct sctp_queued_to_read *control,
4443     struct mbuf *m,
4444     int end,
4445     int ctls_cumack,
4446     struct sockbuf *sb)
4447 {
4448 	/*
4449 	 * A partial delivery API event is underway. OR we are appending on
4450 	 * the reassembly queue.
4451 	 *
4452 	 * If PDAPI this means we need to add m to the end of the data.
4453 	 * Increase the length in the control AND increment the sb_cc.
4454 	 * Otherwise sb is NULL and all we need to do is put it at the end
4455 	 * of the mbuf chain.
4456 	 */
4457 	int len = 0;
4458 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4459 
4460 	if (inp) {
4461 		SCTP_INP_READ_LOCK(inp);
4462 	}
4463 	if (control == NULL) {
4464 get_out:
4465 		if (inp) {
4466 			SCTP_INP_READ_UNLOCK(inp);
4467 		}
4468 		return (-1);
4469 	}
4470 	if (control->end_added) {
4471 		/* huh this one is complete? */
4472 		goto get_out;
4473 	}
4474 	mm = m;
4475 	if (mm == NULL) {
4476 		goto get_out;
4477 	}
4478 	while (mm) {
4479 		if (SCTP_BUF_LEN(mm) == 0) {
4480 			/* Skip mbufs with NO lenght */
4481 			if (prev == NULL) {
4482 				/* First one */
4483 				m = sctp_m_free(mm);
4484 				mm = m;
4485 			} else {
4486 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4487 				mm = SCTP_BUF_NEXT(prev);
4488 			}
4489 			continue;
4490 		}
4491 		prev = mm;
4492 		len += SCTP_BUF_LEN(mm);
4493 		if (sb) {
4494 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4495 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4496 			}
4497 			sctp_sballoc(stcb, sb, mm);
4498 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
4499 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4500 			}
4501 		}
4502 		mm = SCTP_BUF_NEXT(mm);
4503 	}
4504 	if (prev) {
4505 		tail = prev;
4506 	} else {
4507 		/* Really there should always be a prev */
4508 		if (m == NULL) {
4509 			/* Huh nothing left? */
4510 #ifdef INVARIANTS
4511 			panic("Nothing left to add?");
4512 #else
4513 			goto get_out;
4514 #endif
4515 		}
4516 		tail = m;
4517 	}
4518 	if (control->tail_mbuf) {
4519 		/* append */
4520 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4521 		control->tail_mbuf = tail;
4522 	} else {
4523 		/* nothing there */
4524 #ifdef INVARIANTS
4525 		if (control->data != NULL) {
4526 			panic("This should NOT happen");
4527 		}
4528 #endif
4529 		control->data = m;
4530 		control->tail_mbuf = tail;
4531 	}
4532 	atomic_add_int(&control->length, len);
4533 	if (end) {
4534 		/* message is complete */
4535 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4536 			stcb->asoc.control_pdapi = NULL;
4537 		}
4538 		control->held_length = 0;
4539 		control->end_added = 1;
4540 	}
4541 	if (stcb == NULL) {
4542 		control->do_not_ref_stcb = 1;
4543 	}
4544 	/*
4545 	 * When we are appending in partial delivery, the cum-ack is used
4546 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4547 	 * is populated in the outbound sinfo structure from the true cumack
4548 	 * if the association exists...
4549 	 */
4550 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4551 	if (inp) {
4552 		SCTP_INP_READ_UNLOCK(inp);
4553 	}
4554 	if (inp && inp->sctp_socket) {
4555 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4556 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4557 		} else {
4558 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4559 			struct socket *so;
4560 
4561 			so = SCTP_INP_SO(inp);
4562 			atomic_add_int(&stcb->asoc.refcnt, 1);
4563 			SCTP_TCB_UNLOCK(stcb);
4564 			SCTP_SOCKET_LOCK(so, 1);
4565 			SCTP_TCB_LOCK(stcb);
4566 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4567 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4568 				SCTP_SOCKET_UNLOCK(so, 1);
4569 				return (0);
4570 			}
4571 #endif
4572 			sctp_sorwakeup(inp, inp->sctp_socket);
4573 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4574 			SCTP_SOCKET_UNLOCK(so, 1);
4575 #endif
4576 		}
4577 	}
4578 	return (0);
4579 }
4580 
4581 
4582 
4583 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4584  *************ALTERNATE ROUTING CODE
4585  */
4586 
4587 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4588  *************ALTERNATE ROUTING CODE
4589  */
4590 
4591 struct mbuf *
4592 sctp_generate_invmanparam(int err)
4593 {
4594 	/* Return a MBUF with a invalid mandatory parameter */
4595 	struct mbuf *m;
4596 
4597 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4598 	if (m) {
4599 		struct sctp_paramhdr *ph;
4600 
4601 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4602 		ph = mtod(m, struct sctp_paramhdr *);
4603 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4604 		ph->param_type = htons(err);
4605 	}
4606 	return (m);
4607 }
4608 
4609 #ifdef SCTP_MBCNT_LOGGING
4610 void
4611 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4612     struct sctp_tmit_chunk *tp1, int chk_cnt)
4613 {
4614 	if (tp1->data == NULL) {
4615 		return;
4616 	}
4617 	asoc->chunks_on_out_queue -= chk_cnt;
4618 	if (sctp_logging_level & SCTP_MBCNT_LOGGING_ENABLE) {
4619 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4620 		    asoc->total_output_queue_size,
4621 		    tp1->book_size,
4622 		    0,
4623 		    tp1->mbcnt);
4624 	}
4625 	if (asoc->total_output_queue_size >= tp1->book_size) {
4626 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4627 	} else {
4628 		asoc->total_output_queue_size = 0;
4629 	}
4630 
4631 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4632 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4633 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4634 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4635 		} else {
4636 			stcb->sctp_socket->so_snd.sb_cc = 0;
4637 
4638 		}
4639 	}
4640 }
4641 
4642 #endif
4643 
4644 int
4645 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4646     int reason, struct sctpchunk_listhead *queue, int so_locked
4647 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4648     SCTP_UNUSED
4649 #endif
4650 )
4651 {
4652 	int ret_sz = 0;
4653 	int notdone;
4654 	uint8_t foundeom = 0;
4655 
4656 	do {
4657 		ret_sz += tp1->book_size;
4658 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4659 		if (tp1->data) {
4660 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4661 			struct socket *so;
4662 
4663 #endif
4664 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4665 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, SCTP_SO_NOT_LOCKED);
4666 			sctp_m_freem(tp1->data);
4667 			tp1->data = NULL;
4668 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4669 			so = SCTP_INP_SO(stcb->sctp_ep);
4670 			if (!so_locked) {
4671 				atomic_add_int(&stcb->asoc.refcnt, 1);
4672 				SCTP_TCB_UNLOCK(stcb);
4673 				SCTP_SOCKET_LOCK(so, 1);
4674 				SCTP_TCB_LOCK(stcb);
4675 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4676 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4677 					/*
4678 					 * assoc was freed while we were
4679 					 * unlocked
4680 					 */
4681 					SCTP_SOCKET_UNLOCK(so, 1);
4682 					return (ret_sz);
4683 				}
4684 			}
4685 #endif
4686 			sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4687 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4688 			if (!so_locked) {
4689 				SCTP_SOCKET_UNLOCK(so, 1);
4690 			}
4691 #endif
4692 		}
4693 		if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4694 			stcb->asoc.sent_queue_cnt_removeable--;
4695 		}
4696 		if (queue == &stcb->asoc.send_queue) {
4697 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4698 			/* on to the sent queue */
4699 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4700 			    sctp_next);
4701 			stcb->asoc.sent_queue_cnt++;
4702 		}
4703 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4704 		    SCTP_DATA_NOT_FRAG) {
4705 			/* not frag'ed we ae done   */
4706 			notdone = 0;
4707 			foundeom = 1;
4708 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4709 			/* end of frag, we are done */
4710 			notdone = 0;
4711 			foundeom = 1;
4712 		} else {
4713 			/*
4714 			 * Its a begin or middle piece, we must mark all of
4715 			 * it
4716 			 */
4717 			notdone = 1;
4718 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4719 		}
4720 	} while (tp1 && notdone);
4721 	if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
4722 		/*
4723 		 * The multi-part message was scattered across the send and
4724 		 * sent queue.
4725 		 */
4726 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4727 		/*
4728 		 * recurse throught the send_queue too, starting at the
4729 		 * beginning.
4730 		 */
4731 		if (tp1) {
4732 			ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
4733 			    &stcb->asoc.send_queue, so_locked);
4734 		} else {
4735 			SCTP_PRINTF("hmm, nothing on the send queue and no EOM?\n");
4736 		}
4737 	}
4738 	return (ret_sz);
4739 }
4740 
4741 /*
4742  * checks to see if the given address, sa, is one that is currently known by
4743  * the kernel note: can't distinguish the same address on multiple interfaces
4744  * and doesn't handle multiple addresses with different zone/scope id's note:
4745  * ifa_ifwithaddr() compares the entire sockaddr struct
4746  */
4747 struct sctp_ifa *
4748 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4749     int holds_lock)
4750 {
4751 	struct sctp_laddr *laddr;
4752 
4753 	if (holds_lock == 0) {
4754 		SCTP_INP_RLOCK(inp);
4755 	}
4756 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4757 		if (laddr->ifa == NULL)
4758 			continue;
4759 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4760 			continue;
4761 		if (addr->sa_family == AF_INET) {
4762 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4763 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4764 				/* found him. */
4765 				if (holds_lock == 0) {
4766 					SCTP_INP_RUNLOCK(inp);
4767 				}
4768 				return (laddr->ifa);
4769 				break;
4770 			}
4771 		}
4772 #ifdef INET6
4773 		if (addr->sa_family == AF_INET6) {
4774 			if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
4775 			    &laddr->ifa->address.sin6.sin6_addr)) {
4776 				/* found him. */
4777 				if (holds_lock == 0) {
4778 					SCTP_INP_RUNLOCK(inp);
4779 				}
4780 				return (laddr->ifa);
4781 				break;
4782 			}
4783 		}
4784 #endif
4785 	}
4786 	if (holds_lock == 0) {
4787 		SCTP_INP_RUNLOCK(inp);
4788 	}
4789 	return (NULL);
4790 }
4791 
4792 uint32_t
4793 sctp_get_ifa_hash_val(struct sockaddr *addr)
4794 {
4795 	if (addr->sa_family == AF_INET) {
4796 		struct sockaddr_in *sin;
4797 
4798 		sin = (struct sockaddr_in *)addr;
4799 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4800 	} else if (addr->sa_family == AF_INET6) {
4801 		struct sockaddr_in6 *sin6;
4802 		uint32_t hash_of_addr;
4803 
4804 		sin6 = (struct sockaddr_in6 *)addr;
4805 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4806 		    sin6->sin6_addr.s6_addr32[1] +
4807 		    sin6->sin6_addr.s6_addr32[2] +
4808 		    sin6->sin6_addr.s6_addr32[3]);
4809 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4810 		return (hash_of_addr);
4811 	}
4812 	return (0);
4813 }
4814 
4815 struct sctp_ifa *
4816 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4817 {
4818 	struct sctp_ifa *sctp_ifap;
4819 	struct sctp_vrf *vrf;
4820 	struct sctp_ifalist *hash_head;
4821 	uint32_t hash_of_addr;
4822 
4823 	if (holds_lock == 0)
4824 		SCTP_IPI_ADDR_RLOCK();
4825 
4826 	vrf = sctp_find_vrf(vrf_id);
4827 	if (vrf == NULL) {
4828 		if (holds_lock == 0)
4829 			SCTP_IPI_ADDR_RUNLOCK();
4830 		return (NULL);
4831 	}
4832 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4833 
4834 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4835 	if (hash_head == NULL) {
4836 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4837 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4838 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4839 		sctp_print_address(addr);
4840 		SCTP_PRINTF("No such bucket for address\n");
4841 		if (holds_lock == 0)
4842 			SCTP_IPI_ADDR_RUNLOCK();
4843 
4844 		return (NULL);
4845 	}
4846 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4847 		if (sctp_ifap == NULL) {
4848 			panic("Huh LIST_FOREACH corrupt");
4849 		}
4850 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4851 			continue;
4852 		if (addr->sa_family == AF_INET) {
4853 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4854 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4855 				/* found him. */
4856 				if (holds_lock == 0)
4857 					SCTP_IPI_ADDR_RUNLOCK();
4858 				return (sctp_ifap);
4859 				break;
4860 			}
4861 		}
4862 #ifdef INET6
4863 		if (addr->sa_family == AF_INET6) {
4864 			if (SCTP6_ARE_ADDR_EQUAL(&((struct sockaddr_in6 *)addr)->sin6_addr,
4865 			    &sctp_ifap->address.sin6.sin6_addr)) {
4866 				/* found him. */
4867 				if (holds_lock == 0)
4868 					SCTP_IPI_ADDR_RUNLOCK();
4869 				return (sctp_ifap);
4870 				break;
4871 			}
4872 		}
4873 #endif
4874 	}
4875 	if (holds_lock == 0)
4876 		SCTP_IPI_ADDR_RUNLOCK();
4877 	return (NULL);
4878 }
4879 
4880 static void
4881 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4882     uint32_t rwnd_req)
4883 {
4884 	/* User pulled some data, do we need a rwnd update? */
4885 	int r_unlocked = 0;
4886 	uint32_t dif, rwnd;
4887 	struct socket *so = NULL;
4888 
4889 	if (stcb == NULL)
4890 		return;
4891 
4892 	atomic_add_int(&stcb->asoc.refcnt, 1);
4893 
4894 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4895 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4896 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4897 		/* Pre-check If we are freeing no update */
4898 		goto no_lock;
4899 	}
4900 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4901 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4902 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4903 		goto out;
4904 	}
4905 	so = stcb->sctp_socket;
4906 	if (so == NULL) {
4907 		goto out;
4908 	}
4909 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4910 	/* Have you have freed enough to look */
4911 	*freed_so_far = 0;
4912 	/* Yep, its worth a look and the lock overhead */
4913 
4914 	/* Figure out what the rwnd would be */
4915 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4916 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4917 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4918 	} else {
4919 		dif = 0;
4920 	}
4921 	if (dif >= rwnd_req) {
4922 		if (hold_rlock) {
4923 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4924 			r_unlocked = 1;
4925 		}
4926 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4927 			/*
4928 			 * One last check before we allow the guy possibly
4929 			 * to get in. There is a race, where the guy has not
4930 			 * reached the gate. In that case
4931 			 */
4932 			goto out;
4933 		}
4934 		SCTP_TCB_LOCK(stcb);
4935 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4936 			/* No reports here */
4937 			SCTP_TCB_UNLOCK(stcb);
4938 			goto out;
4939 		}
4940 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4941 		sctp_send_sack(stcb);
4942 		sctp_chunk_output(stcb->sctp_ep, stcb,
4943 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4944 		/* make sure no timer is running */
4945 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4946 		SCTP_TCB_UNLOCK(stcb);
4947 	} else {
4948 		/* Update how much we have pending */
4949 		stcb->freed_by_sorcv_sincelast = dif;
4950 	}
4951 out:
4952 	if (so && r_unlocked && hold_rlock) {
4953 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4954 	}
4955 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4956 no_lock:
4957 	atomic_add_int(&stcb->asoc.refcnt, -1);
4958 	return;
4959 }
4960 
4961 int
4962 sctp_sorecvmsg(struct socket *so,
4963     struct uio *uio,
4964     struct mbuf **mp,
4965     struct sockaddr *from,
4966     int fromlen,
4967     int *msg_flags,
4968     struct sctp_sndrcvinfo *sinfo,
4969     int filling_sinfo)
4970 {
4971 	/*
4972 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4973 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4974 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4975 	 * On the way out we may send out any combination of:
4976 	 * MSG_NOTIFICATION MSG_EOR
4977 	 *
4978 	 */
4979 	struct sctp_inpcb *inp = NULL;
4980 	int my_len = 0;
4981 	int cp_len = 0, error = 0;
4982 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4983 	struct mbuf *m = NULL, *embuf = NULL;
4984 	struct sctp_tcb *stcb = NULL;
4985 	int wakeup_read_socket = 0;
4986 	int freecnt_applied = 0;
4987 	int out_flags = 0, in_flags = 0;
4988 	int block_allowed = 1;
4989 	uint32_t freed_so_far = 0;
4990 	uint32_t copied_so_far = 0;
4991 	int in_eeor_mode = 0;
4992 	int no_rcv_needed = 0;
4993 	uint32_t rwnd_req = 0;
4994 	int hold_sblock = 0;
4995 	int hold_rlock = 0;
4996 	int slen = 0;
4997 	uint32_t held_length = 0;
4998 	int sockbuf_lock = 0;
4999 
5000 	if (uio == NULL) {
5001 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5002 		return (EINVAL);
5003 	}
5004 	if (msg_flags) {
5005 		in_flags = *msg_flags;
5006 		if (in_flags & MSG_PEEK)
5007 			SCTP_STAT_INCR(sctps_read_peeks);
5008 	} else {
5009 		in_flags = 0;
5010 	}
5011 	slen = uio->uio_resid;
5012 
5013 	/* Pull in and set up our int flags */
5014 	if (in_flags & MSG_OOB) {
5015 		/* Out of band's NOT supported */
5016 		return (EOPNOTSUPP);
5017 	}
5018 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5019 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5020 		return (EINVAL);
5021 	}
5022 	if ((in_flags & (MSG_DONTWAIT
5023 	    | MSG_NBIO
5024 	    )) ||
5025 	    SCTP_SO_IS_NBIO(so)) {
5026 		block_allowed = 0;
5027 	}
5028 	/* setup the endpoint */
5029 	inp = (struct sctp_inpcb *)so->so_pcb;
5030 	if (inp == NULL) {
5031 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5032 		return (EFAULT);
5033 	}
5034 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5035 	/* Must be at least a MTU's worth */
5036 	if (rwnd_req < SCTP_MIN_RWND)
5037 		rwnd_req = SCTP_MIN_RWND;
5038 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5039 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
5040 		sctp_misc_ints(SCTP_SORECV_ENTER,
5041 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5042 	}
5043 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
5044 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5045 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5046 	}
5047 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5048 	sockbuf_lock = 1;
5049 	if (error) {
5050 		goto release_unlocked;
5051 	}
5052 restart:
5053 
5054 
5055 restart_nosblocks:
5056 	if (hold_sblock == 0) {
5057 		SOCKBUF_LOCK(&so->so_rcv);
5058 		hold_sblock = 1;
5059 	}
5060 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5061 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5062 		goto out;
5063 	}
5064 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5065 		if (so->so_error) {
5066 			error = so->so_error;
5067 			if ((in_flags & MSG_PEEK) == 0)
5068 				so->so_error = 0;
5069 			goto out;
5070 		} else {
5071 			if (so->so_rcv.sb_cc == 0) {
5072 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5073 				/* indicate EOF */
5074 				error = 0;
5075 				goto out;
5076 			}
5077 		}
5078 	}
5079 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5080 		/* we need to wait for data */
5081 		if ((so->so_rcv.sb_cc == 0) &&
5082 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5083 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5084 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5085 				/*
5086 				 * For active open side clear flags for
5087 				 * re-use passive open is blocked by
5088 				 * connect.
5089 				 */
5090 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5091 					/*
5092 					 * You were aborted, passive side
5093 					 * always hits here
5094 					 */
5095 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5096 					error = ECONNRESET;
5097 					/*
5098 					 * You get this once if you are
5099 					 * active open side
5100 					 */
5101 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5102 						/*
5103 						 * Remove flag if on the
5104 						 * active open side
5105 						 */
5106 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5107 					}
5108 				}
5109 				so->so_state &= ~(SS_ISCONNECTING |
5110 				    SS_ISDISCONNECTING |
5111 				    SS_ISCONFIRMING |
5112 				    SS_ISCONNECTED);
5113 				if (error == 0) {
5114 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5115 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5116 						error = ENOTCONN;
5117 					} else {
5118 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5119 					}
5120 				}
5121 				goto out;
5122 			}
5123 		}
5124 		error = sbwait(&so->so_rcv);
5125 		if (error) {
5126 			goto out;
5127 		}
5128 		held_length = 0;
5129 		goto restart_nosblocks;
5130 	} else if (so->so_rcv.sb_cc == 0) {
5131 		if (so->so_error) {
5132 			error = so->so_error;
5133 			if ((in_flags & MSG_PEEK) == 0)
5134 				so->so_error = 0;
5135 		} else {
5136 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5137 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5138 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5139 					/*
5140 					 * For active open side clear flags
5141 					 * for re-use passive open is
5142 					 * blocked by connect.
5143 					 */
5144 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5145 						/*
5146 						 * You were aborted, passive
5147 						 * side always hits here
5148 						 */
5149 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5150 						error = ECONNRESET;
5151 						/*
5152 						 * You get this once if you
5153 						 * are active open side
5154 						 */
5155 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5156 							/*
5157 							 * Remove flag if on
5158 							 * the active open
5159 							 * side
5160 							 */
5161 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5162 						}
5163 					}
5164 					so->so_state &= ~(SS_ISCONNECTING |
5165 					    SS_ISDISCONNECTING |
5166 					    SS_ISCONFIRMING |
5167 					    SS_ISCONNECTED);
5168 					if (error == 0) {
5169 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5170 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5171 							error = ENOTCONN;
5172 						} else {
5173 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5174 						}
5175 					}
5176 					goto out;
5177 				}
5178 			}
5179 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5180 			error = EWOULDBLOCK;
5181 		}
5182 		goto out;
5183 	}
5184 	if (hold_sblock == 1) {
5185 		SOCKBUF_UNLOCK(&so->so_rcv);
5186 		hold_sblock = 0;
5187 	}
5188 	/* we possibly have data we can read */
5189 	/* sa_ignore FREED_MEMORY */
5190 	control = TAILQ_FIRST(&inp->read_queue);
5191 	if (control == NULL) {
5192 		/*
5193 		 * This could be happening since the appender did the
5194 		 * increment but as not yet did the tailq insert onto the
5195 		 * read_queue
5196 		 */
5197 		if (hold_rlock == 0) {
5198 			SCTP_INP_READ_LOCK(inp);
5199 			hold_rlock = 1;
5200 		}
5201 		control = TAILQ_FIRST(&inp->read_queue);
5202 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5203 #ifdef INVARIANTS
5204 			panic("Huh, its non zero and nothing on control?");
5205 #endif
5206 			so->so_rcv.sb_cc = 0;
5207 		}
5208 		SCTP_INP_READ_UNLOCK(inp);
5209 		hold_rlock = 0;
5210 		goto restart;
5211 	}
5212 	if ((control->length == 0) &&
5213 	    (control->do_not_ref_stcb)) {
5214 		/*
5215 		 * Clean up code for freeing assoc that left behind a
5216 		 * pdapi.. maybe a peer in EEOR that just closed after
5217 		 * sending and never indicated a EOR.
5218 		 */
5219 		if (hold_rlock == 0) {
5220 			hold_rlock = 1;
5221 			SCTP_INP_READ_LOCK(inp);
5222 		}
5223 		control->held_length = 0;
5224 		if (control->data) {
5225 			/* Hmm there is data here .. fix */
5226 			struct mbuf *m_tmp;
5227 			int cnt = 0;
5228 
5229 			m_tmp = control->data;
5230 			while (m_tmp) {
5231 				cnt += SCTP_BUF_LEN(m_tmp);
5232 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5233 					control->tail_mbuf = m_tmp;
5234 					control->end_added = 1;
5235 				}
5236 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5237 			}
5238 			control->length = cnt;
5239 		} else {
5240 			/* remove it */
5241 			TAILQ_REMOVE(&inp->read_queue, control, next);
5242 			/* Add back any hiddend data */
5243 			sctp_free_remote_addr(control->whoFrom);
5244 			sctp_free_a_readq(stcb, control);
5245 		}
5246 		if (hold_rlock) {
5247 			hold_rlock = 0;
5248 			SCTP_INP_READ_UNLOCK(inp);
5249 		}
5250 		goto restart;
5251 	}
5252 	if (control->length == 0) {
5253 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5254 		    (filling_sinfo)) {
5255 			/* find a more suitable one then this */
5256 			ctl = TAILQ_NEXT(control, next);
5257 			while (ctl) {
5258 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5259 				    (ctl->some_taken ||
5260 				    (ctl->spec_flags & M_NOTIFICATION) ||
5261 				    ((ctl->do_not_ref_stcb == 0) &&
5262 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5263 				    ) {
5264 					/*-
5265 					 * If we have a different TCB next, and there is data
5266 					 * present. If we have already taken some (pdapi), OR we can
5267 					 * ref the tcb and no delivery as started on this stream, we
5268 					 * take it. Note we allow a notification on a different
5269 					 * assoc to be delivered..
5270 					 */
5271 					control = ctl;
5272 					goto found_one;
5273 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5274 					    (ctl->length) &&
5275 					    ((ctl->some_taken) ||
5276 					    ((ctl->do_not_ref_stcb == 0) &&
5277 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5278 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5279 				    ) {
5280 					/*-
5281 					 * If we have the same tcb, and there is data present, and we
5282 					 * have the strm interleave feature present. Then if we have
5283 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5284 					 * not started a delivery for this stream, we can take it.
5285 					 * Note we do NOT allow a notificaiton on the same assoc to
5286 					 * be delivered.
5287 					 */
5288 					control = ctl;
5289 					goto found_one;
5290 				}
5291 				ctl = TAILQ_NEXT(ctl, next);
5292 			}
5293 		}
5294 		/*
5295 		 * if we reach here, not suitable replacement is available
5296 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5297 		 * into the our held count, and its time to sleep again.
5298 		 */
5299 		held_length = so->so_rcv.sb_cc;
5300 		control->held_length = so->so_rcv.sb_cc;
5301 		goto restart;
5302 	}
5303 	/* Clear the held length since there is something to read */
5304 	control->held_length = 0;
5305 	if (hold_rlock) {
5306 		SCTP_INP_READ_UNLOCK(inp);
5307 		hold_rlock = 0;
5308 	}
5309 found_one:
5310 	/*
5311 	 * If we reach here, control has a some data for us to read off.
5312 	 * Note that stcb COULD be NULL.
5313 	 */
5314 	control->some_taken++;
5315 	if (hold_sblock) {
5316 		SOCKBUF_UNLOCK(&so->so_rcv);
5317 		hold_sblock = 0;
5318 	}
5319 	stcb = control->stcb;
5320 	if (stcb) {
5321 		if ((control->do_not_ref_stcb == 0) &&
5322 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5323 			if (freecnt_applied == 0)
5324 				stcb = NULL;
5325 		} else if (control->do_not_ref_stcb == 0) {
5326 			/* you can't free it on me please */
5327 			/*
5328 			 * The lock on the socket buffer protects us so the
5329 			 * free code will stop. But since we used the
5330 			 * socketbuf lock and the sender uses the tcb_lock
5331 			 * to increment, we need to use the atomic add to
5332 			 * the refcnt
5333 			 */
5334 			if (freecnt_applied) {
5335 #ifdef INVARIANTS
5336 				panic("refcnt already incremented");
5337 #else
5338 				printf("refcnt already incremented?\n");
5339 #endif
5340 			} else {
5341 				atomic_add_int(&stcb->asoc.refcnt, 1);
5342 				freecnt_applied = 1;
5343 			}
5344 			/*
5345 			 * Setup to remember how much we have not yet told
5346 			 * the peer our rwnd has opened up. Note we grab the
5347 			 * value from the tcb from last time. Note too that
5348 			 * sack sending clears this when a sack is sent,
5349 			 * which is fine. Once we hit the rwnd_req, we then
5350 			 * will go to the sctp_user_rcvd() that will not
5351 			 * lock until it KNOWs it MUST send a WUP-SACK.
5352 			 */
5353 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5354 			stcb->freed_by_sorcv_sincelast = 0;
5355 		}
5356 	}
5357 	if (stcb &&
5358 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5359 	    control->do_not_ref_stcb == 0) {
5360 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5361 	}
5362 	/* First lets get off the sinfo and sockaddr info */
5363 	if ((sinfo) && filling_sinfo) {
5364 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5365 		nxt = TAILQ_NEXT(control, next);
5366 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5367 			struct sctp_extrcvinfo *s_extra;
5368 
5369 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5370 			if ((nxt) &&
5371 			    (nxt->length)) {
5372 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5373 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5374 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5375 				}
5376 				if (nxt->spec_flags & M_NOTIFICATION) {
5377 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5378 				}
5379 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5380 				s_extra->sreinfo_next_length = nxt->length;
5381 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5382 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5383 				if (nxt->tail_mbuf != NULL) {
5384 					if (nxt->end_added) {
5385 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5386 					}
5387 				}
5388 			} else {
5389 				/*
5390 				 * we explicitly 0 this, since the memcpy
5391 				 * got some other things beyond the older
5392 				 * sinfo_ that is on the control's structure
5393 				 * :-D
5394 				 */
5395 				nxt = NULL;
5396 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5397 				s_extra->sreinfo_next_aid = 0;
5398 				s_extra->sreinfo_next_length = 0;
5399 				s_extra->sreinfo_next_ppid = 0;
5400 				s_extra->sreinfo_next_stream = 0;
5401 			}
5402 		}
5403 		/*
5404 		 * update off the real current cum-ack, if we have an stcb.
5405 		 */
5406 		if ((control->do_not_ref_stcb == 0) && stcb)
5407 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5408 		/*
5409 		 * mask off the high bits, we keep the actual chunk bits in
5410 		 * there.
5411 		 */
5412 		sinfo->sinfo_flags &= 0x00ff;
5413 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5414 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5415 		}
5416 	}
5417 #ifdef SCTP_ASOCLOG_OF_TSNS
5418 	{
5419 		int index, newindex;
5420 		struct sctp_pcbtsn_rlog *entry;
5421 
5422 		do {
5423 			index = inp->readlog_index;
5424 			newindex = index + 1;
5425 			if (newindex >= SCTP_READ_LOG_SIZE) {
5426 				newindex = 0;
5427 			}
5428 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5429 		entry = &inp->readlog[index];
5430 		entry->vtag = control->sinfo_assoc_id;
5431 		entry->strm = control->sinfo_stream;
5432 		entry->seq = control->sinfo_ssn;
5433 		entry->sz = control->length;
5434 		entry->flgs = control->sinfo_flags;
5435 	}
5436 #endif
5437 	if (fromlen && from) {
5438 		struct sockaddr *to;
5439 
5440 #ifdef INET
5441 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5442 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5443 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5444 #else
5445 		/* No AF_INET use AF_INET6 */
5446 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5447 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5448 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5449 #endif
5450 
5451 		to = from;
5452 #if defined(INET) && defined(INET6)
5453 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5454 		    (to->sa_family == AF_INET) &&
5455 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5456 			struct sockaddr_in *sin;
5457 			struct sockaddr_in6 sin6;
5458 
5459 			sin = (struct sockaddr_in *)to;
5460 			bzero(&sin6, sizeof(sin6));
5461 			sin6.sin6_family = AF_INET6;
5462 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5463 			sin6.sin6_addr.s6_addr32[2] = ntohl(0x0000ffff);
5464 			bcopy(&sin->sin_addr,
5465 			    &sin6.sin6_addr.s6_addr32[3],
5466 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5467 			sin6.sin6_port = sin->sin_port;
5468 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5469 		}
5470 #endif
5471 #if defined(INET6)
5472 		{
5473 			struct sockaddr_in6 lsa6, *to6;
5474 
5475 			to6 = (struct sockaddr_in6 *)to;
5476 			sctp_recover_scope_mac(to6, (&lsa6));
5477 		}
5478 #endif
5479 	}
5480 	/* now copy out what data we can */
5481 	if (mp == NULL) {
5482 		/* copy out each mbuf in the chain up to length */
5483 get_more_data:
5484 		m = control->data;
5485 		while (m) {
5486 			/* Move out all we can */
5487 			cp_len = (int)uio->uio_resid;
5488 			my_len = (int)SCTP_BUF_LEN(m);
5489 			if (cp_len > my_len) {
5490 				/* not enough in this buf */
5491 				cp_len = my_len;
5492 			}
5493 			if (hold_rlock) {
5494 				SCTP_INP_READ_UNLOCK(inp);
5495 				hold_rlock = 0;
5496 			}
5497 			if (cp_len > 0)
5498 				error = uiomove(mtod(m, char *), cp_len, uio);
5499 			/* re-read */
5500 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5501 				goto release;
5502 			}
5503 			if ((control->do_not_ref_stcb == 0) && stcb &&
5504 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5505 				no_rcv_needed = 1;
5506 			}
5507 			if (error) {
5508 				/* error we are out of here */
5509 				goto release;
5510 			}
5511 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5512 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5513 			    ((control->end_added == 0) ||
5514 			    (control->end_added &&
5515 			    (TAILQ_NEXT(control, next) == NULL)))
5516 			    ) {
5517 				SCTP_INP_READ_LOCK(inp);
5518 				hold_rlock = 1;
5519 			}
5520 			if (cp_len == SCTP_BUF_LEN(m)) {
5521 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5522 				    (control->end_added)) {
5523 					out_flags |= MSG_EOR;
5524 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5525 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5526 				}
5527 				if (control->spec_flags & M_NOTIFICATION) {
5528 					out_flags |= MSG_NOTIFICATION;
5529 				}
5530 				/* we ate up the mbuf */
5531 				if (in_flags & MSG_PEEK) {
5532 					/* just looking */
5533 					m = SCTP_BUF_NEXT(m);
5534 					copied_so_far += cp_len;
5535 				} else {
5536 					/* dispose of the mbuf */
5537 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5538 						sctp_sblog(&so->so_rcv,
5539 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5540 					}
5541 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5542 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5543 						sctp_sblog(&so->so_rcv,
5544 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5545 					}
5546 					embuf = m;
5547 					copied_so_far += cp_len;
5548 					freed_so_far += cp_len;
5549 					freed_so_far += MSIZE;
5550 					atomic_subtract_int(&control->length, cp_len);
5551 					control->data = sctp_m_free(m);
5552 					m = control->data;
5553 					/*
5554 					 * been through it all, must hold sb
5555 					 * lock ok to null tail
5556 					 */
5557 					if (control->data == NULL) {
5558 #ifdef INVARIANTS
5559 						if ((control->end_added == 0) ||
5560 						    (TAILQ_NEXT(control, next) == NULL)) {
5561 							/*
5562 							 * If the end is not
5563 							 * added, OR the
5564 							 * next is NOT null
5565 							 * we MUST have the
5566 							 * lock.
5567 							 */
5568 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5569 								panic("Hmm we don't own the lock?");
5570 							}
5571 						}
5572 #endif
5573 						control->tail_mbuf = NULL;
5574 #ifdef INVARIANTS
5575 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5576 							panic("end_added, nothing left and no MSG_EOR");
5577 						}
5578 #endif
5579 					}
5580 				}
5581 			} else {
5582 				/* Do we need to trim the mbuf? */
5583 				if (control->spec_flags & M_NOTIFICATION) {
5584 					out_flags |= MSG_NOTIFICATION;
5585 				}
5586 				if ((in_flags & MSG_PEEK) == 0) {
5587 					SCTP_BUF_RESV_UF(m, cp_len);
5588 					SCTP_BUF_LEN(m) -= cp_len;
5589 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5590 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5591 					}
5592 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5593 					if ((control->do_not_ref_stcb == 0) &&
5594 					    stcb) {
5595 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5596 					}
5597 					copied_so_far += cp_len;
5598 					embuf = m;
5599 					freed_so_far += cp_len;
5600 					freed_so_far += MSIZE;
5601 					if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5602 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5603 						    SCTP_LOG_SBRESULT, 0);
5604 					}
5605 					atomic_subtract_int(&control->length, cp_len);
5606 				} else {
5607 					copied_so_far += cp_len;
5608 				}
5609 			}
5610 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5611 				break;
5612 			}
5613 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5614 			    (control->do_not_ref_stcb == 0) &&
5615 			    (freed_so_far >= rwnd_req)) {
5616 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5617 			}
5618 		}		/* end while(m) */
5619 		/*
5620 		 * At this point we have looked at it all and we either have
5621 		 * a MSG_EOR/or read all the user wants... <OR>
5622 		 * control->length == 0.
5623 		 */
5624 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5625 			/* we are done with this control */
5626 			if (control->length == 0) {
5627 				if (control->data) {
5628 #ifdef INVARIANTS
5629 					panic("control->data not null at read eor?");
5630 #else
5631 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5632 					sctp_m_freem(control->data);
5633 					control->data = NULL;
5634 #endif
5635 				}
5636 		done_with_control:
5637 				if (TAILQ_NEXT(control, next) == NULL) {
5638 					/*
5639 					 * If we don't have a next we need a
5640 					 * lock, if there is a next
5641 					 * interrupt is filling ahead of us
5642 					 * and we don't need a lock to
5643 					 * remove this guy (which is the
5644 					 * head of the queue).
5645 					 */
5646 					if (hold_rlock == 0) {
5647 						SCTP_INP_READ_LOCK(inp);
5648 						hold_rlock = 1;
5649 					}
5650 				}
5651 				TAILQ_REMOVE(&inp->read_queue, control, next);
5652 				/* Add back any hiddend data */
5653 				if (control->held_length) {
5654 					held_length = 0;
5655 					control->held_length = 0;
5656 					wakeup_read_socket = 1;
5657 				}
5658 				if (control->aux_data) {
5659 					sctp_m_free(control->aux_data);
5660 					control->aux_data = NULL;
5661 				}
5662 				no_rcv_needed = control->do_not_ref_stcb;
5663 				sctp_free_remote_addr(control->whoFrom);
5664 				control->data = NULL;
5665 				sctp_free_a_readq(stcb, control);
5666 				control = NULL;
5667 				if ((freed_so_far >= rwnd_req) &&
5668 				    (no_rcv_needed == 0))
5669 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5670 
5671 			} else {
5672 				/*
5673 				 * The user did not read all of this
5674 				 * message, turn off the returned MSG_EOR
5675 				 * since we are leaving more behind on the
5676 				 * control to read.
5677 				 */
5678 #ifdef INVARIANTS
5679 				if (control->end_added &&
5680 				    (control->data == NULL) &&
5681 				    (control->tail_mbuf == NULL)) {
5682 					panic("Gak, control->length is corrupt?");
5683 				}
5684 #endif
5685 				no_rcv_needed = control->do_not_ref_stcb;
5686 				out_flags &= ~MSG_EOR;
5687 			}
5688 		}
5689 		if (out_flags & MSG_EOR) {
5690 			goto release;
5691 		}
5692 		if ((uio->uio_resid == 0) ||
5693 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5694 		    ) {
5695 			goto release;
5696 		}
5697 		/*
5698 		 * If I hit here the receiver wants more and this message is
5699 		 * NOT done (pd-api). So two questions. Can we block? if not
5700 		 * we are done. Did the user NOT set MSG_WAITALL?
5701 		 */
5702 		if (block_allowed == 0) {
5703 			goto release;
5704 		}
5705 		/*
5706 		 * We need to wait for more data a few things: - We don't
5707 		 * sbunlock() so we don't get someone else reading. - We
5708 		 * must be sure to account for the case where what is added
5709 		 * is NOT to our control when we wakeup.
5710 		 */
5711 
5712 		/*
5713 		 * Do we need to tell the transport a rwnd update might be
5714 		 * needed before we go to sleep?
5715 		 */
5716 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5717 		    ((freed_so_far >= rwnd_req) &&
5718 		    (control->do_not_ref_stcb == 0) &&
5719 		    (no_rcv_needed == 0))) {
5720 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5721 		}
5722 wait_some_more:
5723 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5724 			goto release;
5725 		}
5726 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5727 			goto release;
5728 
5729 		if (hold_rlock == 1) {
5730 			SCTP_INP_READ_UNLOCK(inp);
5731 			hold_rlock = 0;
5732 		}
5733 		if (hold_sblock == 0) {
5734 			SOCKBUF_LOCK(&so->so_rcv);
5735 			hold_sblock = 1;
5736 		}
5737 		if ((copied_so_far) && (control->length == 0) &&
5738 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5739 		    ) {
5740 			goto release;
5741 		}
5742 		if (so->so_rcv.sb_cc <= control->held_length) {
5743 			error = sbwait(&so->so_rcv);
5744 			if (error) {
5745 				goto release;
5746 			}
5747 			control->held_length = 0;
5748 		}
5749 		if (hold_sblock) {
5750 			SOCKBUF_UNLOCK(&so->so_rcv);
5751 			hold_sblock = 0;
5752 		}
5753 		if (control->length == 0) {
5754 			/* still nothing here */
5755 			if (control->end_added == 1) {
5756 				/* he aborted, or is done i.e.did a shutdown */
5757 				out_flags |= MSG_EOR;
5758 				if (control->pdapi_aborted) {
5759 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5760 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5761 
5762 					out_flags |= MSG_TRUNC;
5763 				} else {
5764 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5765 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5766 				}
5767 				goto done_with_control;
5768 			}
5769 			if (so->so_rcv.sb_cc > held_length) {
5770 				control->held_length = so->so_rcv.sb_cc;
5771 				held_length = 0;
5772 			}
5773 			goto wait_some_more;
5774 		} else if (control->data == NULL) {
5775 			/*
5776 			 * we must re-sync since data is probably being
5777 			 * added
5778 			 */
5779 			SCTP_INP_READ_LOCK(inp);
5780 			if ((control->length > 0) && (control->data == NULL)) {
5781 				/*
5782 				 * big trouble.. we have the lock and its
5783 				 * corrupt?
5784 				 */
5785 #ifdef INVARIANTS
5786 				panic("Impossible data==NULL length !=0");
5787 #endif
5788 				out_flags |= MSG_EOR;
5789 				out_flags |= MSG_TRUNC;
5790 				control->length = 0;
5791 				SCTP_INP_READ_UNLOCK(inp);
5792 				goto done_with_control;
5793 			}
5794 			SCTP_INP_READ_UNLOCK(inp);
5795 			/* We will fall around to get more data */
5796 		}
5797 		goto get_more_data;
5798 	} else {
5799 		/*-
5800 		 * Give caller back the mbuf chain,
5801 		 * store in uio_resid the length
5802 		 */
5803 		wakeup_read_socket = 0;
5804 		if ((control->end_added == 0) ||
5805 		    (TAILQ_NEXT(control, next) == NULL)) {
5806 			/* Need to get rlock */
5807 			if (hold_rlock == 0) {
5808 				SCTP_INP_READ_LOCK(inp);
5809 				hold_rlock = 1;
5810 			}
5811 		}
5812 		if (control->end_added) {
5813 			out_flags |= MSG_EOR;
5814 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5815 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5816 		}
5817 		if (control->spec_flags & M_NOTIFICATION) {
5818 			out_flags |= MSG_NOTIFICATION;
5819 		}
5820 		uio->uio_resid = control->length;
5821 		*mp = control->data;
5822 		m = control->data;
5823 		while (m) {
5824 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5825 				sctp_sblog(&so->so_rcv,
5826 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5827 			}
5828 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5829 			freed_so_far += SCTP_BUF_LEN(m);
5830 			freed_so_far += MSIZE;
5831 			if (sctp_logging_level & SCTP_SB_LOGGING_ENABLE) {
5832 				sctp_sblog(&so->so_rcv,
5833 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5834 			}
5835 			m = SCTP_BUF_NEXT(m);
5836 		}
5837 		control->data = control->tail_mbuf = NULL;
5838 		control->length = 0;
5839 		if (out_flags & MSG_EOR) {
5840 			/* Done with this control */
5841 			goto done_with_control;
5842 		}
5843 	}
5844 release:
5845 	if (hold_rlock == 1) {
5846 		SCTP_INP_READ_UNLOCK(inp);
5847 		hold_rlock = 0;
5848 	}
5849 	if (hold_sblock == 1) {
5850 		SOCKBUF_UNLOCK(&so->so_rcv);
5851 		hold_sblock = 0;
5852 	}
5853 	sbunlock(&so->so_rcv);
5854 	sockbuf_lock = 0;
5855 
5856 release_unlocked:
5857 	if (hold_sblock) {
5858 		SOCKBUF_UNLOCK(&so->so_rcv);
5859 		hold_sblock = 0;
5860 	}
5861 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5862 		if ((freed_so_far >= rwnd_req) &&
5863 		    (control && (control->do_not_ref_stcb == 0)) &&
5864 		    (no_rcv_needed == 0))
5865 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5866 	}
5867 	if (msg_flags)
5868 		*msg_flags = out_flags;
5869 out:
5870 	if (((out_flags & MSG_EOR) == 0) &&
5871 	    ((in_flags & MSG_PEEK) == 0) &&
5872 	    (sinfo) &&
5873 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5874 		struct sctp_extrcvinfo *s_extra;
5875 
5876 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5877 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5878 	}
5879 	if (hold_rlock == 1) {
5880 		SCTP_INP_READ_UNLOCK(inp);
5881 		hold_rlock = 0;
5882 	}
5883 	if (hold_sblock) {
5884 		SOCKBUF_UNLOCK(&so->so_rcv);
5885 		hold_sblock = 0;
5886 	}
5887 	if (sockbuf_lock) {
5888 		sbunlock(&so->so_rcv);
5889 	}
5890 	if (freecnt_applied) {
5891 		/*
5892 		 * The lock on the socket buffer protects us so the free
5893 		 * code will stop. But since we used the socketbuf lock and
5894 		 * the sender uses the tcb_lock to increment, we need to use
5895 		 * the atomic add to the refcnt.
5896 		 */
5897 		if (stcb == NULL) {
5898 			panic("stcb for refcnt has gone NULL?");
5899 		}
5900 		atomic_add_int(&stcb->asoc.refcnt, -1);
5901 		freecnt_applied = 0;
5902 		/* Save the value back for next time */
5903 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5904 	}
5905 	if (sctp_logging_level & SCTP_RECV_RWND_LOGGING_ENABLE) {
5906 		if (stcb) {
5907 			sctp_misc_ints(SCTP_SORECV_DONE,
5908 			    freed_so_far,
5909 			    ((uio) ? (slen - uio->uio_resid) : slen),
5910 			    stcb->asoc.my_rwnd,
5911 			    so->so_rcv.sb_cc);
5912 		} else {
5913 			sctp_misc_ints(SCTP_SORECV_DONE,
5914 			    freed_so_far,
5915 			    ((uio) ? (slen - uio->uio_resid) : slen),
5916 			    0,
5917 			    so->so_rcv.sb_cc);
5918 		}
5919 	}
5920 	if (wakeup_read_socket) {
5921 		sctp_sorwakeup(inp, so);
5922 	}
5923 	return (error);
5924 }
5925 
5926 
5927 #ifdef SCTP_MBUF_LOGGING
5928 struct mbuf *
5929 sctp_m_free(struct mbuf *m)
5930 {
5931 	if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) {
5932 		if (SCTP_BUF_IS_EXTENDED(m)) {
5933 			sctp_log_mb(m, SCTP_MBUF_IFREE);
5934 		}
5935 	}
5936 	return (m_free(m));
5937 }
5938 
5939 void
5940 sctp_m_freem(struct mbuf *mb)
5941 {
5942 	while (mb != NULL)
5943 		mb = sctp_m_free(mb);
5944 }
5945 
5946 #endif
5947 
5948 int
5949 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5950 {
5951 	/*
5952 	 * Given a local address. For all associations that holds the
5953 	 * address, request a peer-set-primary.
5954 	 */
5955 	struct sctp_ifa *ifa;
5956 	struct sctp_laddr *wi;
5957 
5958 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5959 	if (ifa == NULL) {
5960 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
5961 		return (EADDRNOTAVAIL);
5962 	}
5963 	/*
5964 	 * Now that we have the ifa we must awaken the iterator with this
5965 	 * message.
5966 	 */
5967 	wi = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
5968 	if (wi == NULL) {
5969 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
5970 		return (ENOMEM);
5971 	}
5972 	/* Now incr the count and int wi structure */
5973 	SCTP_INCR_LADDR_COUNT();
5974 	bzero(wi, sizeof(*wi));
5975 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
5976 	wi->ifa = ifa;
5977 	wi->action = SCTP_SET_PRIM_ADDR;
5978 	atomic_add_int(&ifa->refcount, 1);
5979 
5980 	/* Now add it to the work queue */
5981 	SCTP_IPI_ITERATOR_WQ_LOCK();
5982 	/*
5983 	 * Should this really be a tailq? As it is we will process the
5984 	 * newest first :-0
5985 	 */
5986 	LIST_INSERT_HEAD(&sctppcbinfo.addr_wq, wi, sctp_nxt_addr);
5987 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
5988 	    (struct sctp_inpcb *)NULL,
5989 	    (struct sctp_tcb *)NULL,
5990 	    (struct sctp_nets *)NULL);
5991 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
5992 	return (0);
5993 }
5994 
5995 
5996 
5997 
5998 int
5999 sctp_soreceive(struct socket *so,
6000     struct sockaddr **psa,
6001     struct uio *uio,
6002     struct mbuf **mp0,
6003     struct mbuf **controlp,
6004     int *flagsp)
6005 {
6006 	int error, fromlen;
6007 	uint8_t sockbuf[256];
6008 	struct sockaddr *from;
6009 	struct sctp_extrcvinfo sinfo;
6010 	int filling_sinfo = 1;
6011 	struct sctp_inpcb *inp;
6012 
6013 	inp = (struct sctp_inpcb *)so->so_pcb;
6014 	/* pickup the assoc we are reading from */
6015 	if (inp == NULL) {
6016 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6017 		return (EINVAL);
6018 	}
6019 	if ((sctp_is_feature_off(inp,
6020 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6021 	    (controlp == NULL)) {
6022 		/* user does not want the sndrcv ctl */
6023 		filling_sinfo = 0;
6024 	}
6025 	if (psa) {
6026 		from = (struct sockaddr *)sockbuf;
6027 		fromlen = sizeof(sockbuf);
6028 		from->sa_len = 0;
6029 	} else {
6030 		from = NULL;
6031 		fromlen = 0;
6032 	}
6033 
6034 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6035 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6036 	if ((controlp) && (filling_sinfo)) {
6037 		/* copy back the sinfo in a CMSG format */
6038 		if (filling_sinfo)
6039 			*controlp = sctp_build_ctl_nchunk(inp,
6040 			    (struct sctp_sndrcvinfo *)&sinfo);
6041 		else
6042 			*controlp = NULL;
6043 	}
6044 	if (psa) {
6045 		/* copy back the address info */
6046 		if (from && from->sa_len) {
6047 			*psa = sodupsockaddr(from, M_NOWAIT);
6048 		} else {
6049 			*psa = NULL;
6050 		}
6051 	}
6052 	return (error);
6053 }
6054 
6055 
6056 int
6057 sctp_l_soreceive(struct socket *so,
6058     struct sockaddr **name,
6059     struct uio *uio,
6060     char **controlp,
6061     int *controllen,
6062     int *flag)
6063 {
6064 	int error, fromlen;
6065 	uint8_t sockbuf[256];
6066 	struct sockaddr *from;
6067 	struct sctp_extrcvinfo sinfo;
6068 	int filling_sinfo = 1;
6069 	struct sctp_inpcb *inp;
6070 
6071 	inp = (struct sctp_inpcb *)so->so_pcb;
6072 	/* pickup the assoc we are reading from */
6073 	if (inp == NULL) {
6074 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6075 		return (EINVAL);
6076 	}
6077 	if ((sctp_is_feature_off(inp,
6078 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6079 	    (controlp == NULL)) {
6080 		/* user does not want the sndrcv ctl */
6081 		filling_sinfo = 0;
6082 	}
6083 	if (name) {
6084 		from = (struct sockaddr *)sockbuf;
6085 		fromlen = sizeof(sockbuf);
6086 		from->sa_len = 0;
6087 	} else {
6088 		from = NULL;
6089 		fromlen = 0;
6090 	}
6091 
6092 	error = sctp_sorecvmsg(so, uio,
6093 	    (struct mbuf **)NULL,
6094 	    from, fromlen, flag,
6095 	    (struct sctp_sndrcvinfo *)&sinfo,
6096 	    filling_sinfo);
6097 	if ((controlp) && (filling_sinfo)) {
6098 		/*
6099 		 * copy back the sinfo in a CMSG format note that the caller
6100 		 * has reponsibility for freeing the memory.
6101 		 */
6102 		if (filling_sinfo)
6103 			*controlp = sctp_build_ctl_cchunk(inp,
6104 			    controllen,
6105 			    (struct sctp_sndrcvinfo *)&sinfo);
6106 	}
6107 	if (name) {
6108 		/* copy back the address info */
6109 		if (from && from->sa_len) {
6110 			*name = sodupsockaddr(from, M_WAIT);
6111 		} else {
6112 			*name = NULL;
6113 		}
6114 	}
6115 	return (error);
6116 }
6117 
6118 
6119 
6120 
6121 
6122 
6123 
6124 int
6125 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6126     int totaddr, int *error)
6127 {
6128 	int added = 0;
6129 	int i;
6130 	struct sctp_inpcb *inp;
6131 	struct sockaddr *sa;
6132 	size_t incr = 0;
6133 
6134 	sa = addr;
6135 	inp = stcb->sctp_ep;
6136 	*error = 0;
6137 	for (i = 0; i < totaddr; i++) {
6138 		if (sa->sa_family == AF_INET) {
6139 			incr = sizeof(struct sockaddr_in);
6140 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6141 				/* assoc gone no un-lock */
6142 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6143 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6144 				*error = ENOBUFS;
6145 				goto out_now;
6146 			}
6147 			added++;
6148 		} else if (sa->sa_family == AF_INET6) {
6149 			incr = sizeof(struct sockaddr_in6);
6150 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6151 				/* assoc gone no un-lock */
6152 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6153 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6154 				*error = ENOBUFS;
6155 				goto out_now;
6156 			}
6157 			added++;
6158 		}
6159 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6160 	}
6161 out_now:
6162 	return (added);
6163 }
6164 
6165 struct sctp_tcb *
6166 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6167     int *totaddr, int *num_v4, int *num_v6, int *error,
6168     int limit, int *bad_addr)
6169 {
6170 	struct sockaddr *sa;
6171 	struct sctp_tcb *stcb = NULL;
6172 	size_t incr, at, i;
6173 
6174 	at = incr = 0;
6175 	sa = addr;
6176 	*error = *num_v6 = *num_v4 = 0;
6177 	/* account and validate addresses */
6178 	for (i = 0; i < (size_t)*totaddr; i++) {
6179 		if (sa->sa_family == AF_INET) {
6180 			(*num_v4) += 1;
6181 			incr = sizeof(struct sockaddr_in);
6182 			if (sa->sa_len != incr) {
6183 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6184 				*error = EINVAL;
6185 				*bad_addr = 1;
6186 				return (NULL);
6187 			}
6188 		} else if (sa->sa_family == AF_INET6) {
6189 			struct sockaddr_in6 *sin6;
6190 
6191 			sin6 = (struct sockaddr_in6 *)sa;
6192 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6193 				/* Must be non-mapped for connectx */
6194 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6195 				*error = EINVAL;
6196 				*bad_addr = 1;
6197 				return (NULL);
6198 			}
6199 			(*num_v6) += 1;
6200 			incr = sizeof(struct sockaddr_in6);
6201 			if (sa->sa_len != incr) {
6202 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6203 				*error = EINVAL;
6204 				*bad_addr = 1;
6205 				return (NULL);
6206 			}
6207 		} else {
6208 			*totaddr = i;
6209 			/* we are done */
6210 			break;
6211 		}
6212 		SCTP_INP_INCR_REF(inp);
6213 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6214 		if (stcb != NULL) {
6215 			/* Already have or am bring up an association */
6216 			return (stcb);
6217 		} else {
6218 			SCTP_INP_DECR_REF(inp);
6219 		}
6220 		if ((at + incr) > (size_t)limit) {
6221 			*totaddr = i;
6222 			break;
6223 		}
6224 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6225 	}
6226 	return ((struct sctp_tcb *)NULL);
6227 }
6228 
6229 /*
6230  * sctp_bindx(ADD) for one address.
6231  * assumes all arguments are valid/checked by caller.
6232  */
6233 void
6234 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6235     struct sockaddr *sa, sctp_assoc_t assoc_id,
6236     uint32_t vrf_id, int *error, void *p)
6237 {
6238 	struct sockaddr *addr_touse;
6239 
6240 #ifdef INET6
6241 	struct sockaddr_in sin;
6242 
6243 #endif
6244 
6245 	/* see if we're bound all already! */
6246 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6247 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6248 		*error = EINVAL;
6249 		return;
6250 	}
6251 	addr_touse = sa;
6252 #if defined(INET6)
6253 	if (sa->sa_family == AF_INET6) {
6254 		struct sockaddr_in6 *sin6;
6255 
6256 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6257 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6258 			*error = EINVAL;
6259 			return;
6260 		}
6261 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6262 			/* can only bind v6 on PF_INET6 sockets */
6263 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6264 			*error = EINVAL;
6265 			return;
6266 		}
6267 		sin6 = (struct sockaddr_in6 *)addr_touse;
6268 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6269 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6270 			    SCTP_IPV6_V6ONLY(inp)) {
6271 				/* can't bind v4-mapped on PF_INET sockets */
6272 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6273 				*error = EINVAL;
6274 				return;
6275 			}
6276 			in6_sin6_2_sin(&sin, sin6);
6277 			addr_touse = (struct sockaddr *)&sin;
6278 		}
6279 	}
6280 #endif
6281 	if (sa->sa_family == AF_INET) {
6282 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6283 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6284 			*error = EINVAL;
6285 			return;
6286 		}
6287 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6288 		    SCTP_IPV6_V6ONLY(inp)) {
6289 			/* can't bind v4 on PF_INET sockets */
6290 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6291 			*error = EINVAL;
6292 			return;
6293 		}
6294 	}
6295 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6296 		if (p == NULL) {
6297 			/* Can't get proc for Net/Open BSD */
6298 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6299 			*error = EINVAL;
6300 			return;
6301 		}
6302 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6303 		return;
6304 	}
6305 	/*
6306 	 * No locks required here since bind and mgmt_ep_sa all do their own
6307 	 * locking. If we do something for the FIX: below we may need to
6308 	 * lock in that case.
6309 	 */
6310 	if (assoc_id == 0) {
6311 		/* add the address */
6312 		struct sctp_inpcb *lep;
6313 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6314 
6315 		/* validate the incoming port */
6316 		if ((lsin->sin_port != 0) &&
6317 		    (lsin->sin_port != inp->sctp_lport)) {
6318 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6319 			*error = EINVAL;
6320 			return;
6321 		} else {
6322 			/* user specified 0 port, set it to existing port */
6323 			lsin->sin_port = inp->sctp_lport;
6324 		}
6325 
6326 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6327 		if (lep != NULL) {
6328 			/*
6329 			 * We must decrement the refcount since we have the
6330 			 * ep already and are binding. No remove going on
6331 			 * here.
6332 			 */
6333 			SCTP_INP_DECR_REF(inp);
6334 		}
6335 		if (lep == inp) {
6336 			/* already bound to it.. ok */
6337 			return;
6338 		} else if (lep == NULL) {
6339 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6340 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6341 			    SCTP_ADD_IP_ADDRESS,
6342 			    vrf_id, NULL);
6343 		} else {
6344 			*error = EADDRINUSE;
6345 		}
6346 		if (*error)
6347 			return;
6348 	} else {
6349 		/*
6350 		 * FIX: decide whether we allow assoc based bindx
6351 		 */
6352 	}
6353 }
6354 
6355 /*
6356  * sctp_bindx(DELETE) for one address.
6357  * assumes all arguments are valid/checked by caller.
6358  */
6359 void
6360 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6361     struct sockaddr *sa, sctp_assoc_t assoc_id,
6362     uint32_t vrf_id, int *error)
6363 {
6364 	struct sockaddr *addr_touse;
6365 
6366 #ifdef INET6
6367 	struct sockaddr_in sin;
6368 
6369 #endif
6370 
6371 	/* see if we're bound all already! */
6372 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6373 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6374 		*error = EINVAL;
6375 		return;
6376 	}
6377 	addr_touse = sa;
6378 #if defined(INET6)
6379 	if (sa->sa_family == AF_INET6) {
6380 		struct sockaddr_in6 *sin6;
6381 
6382 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6383 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6384 			*error = EINVAL;
6385 			return;
6386 		}
6387 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6388 			/* can only bind v6 on PF_INET6 sockets */
6389 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6390 			*error = EINVAL;
6391 			return;
6392 		}
6393 		sin6 = (struct sockaddr_in6 *)addr_touse;
6394 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6395 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6396 			    SCTP_IPV6_V6ONLY(inp)) {
6397 				/* can't bind mapped-v4 on PF_INET sockets */
6398 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6399 				*error = EINVAL;
6400 				return;
6401 			}
6402 			in6_sin6_2_sin(&sin, sin6);
6403 			addr_touse = (struct sockaddr *)&sin;
6404 		}
6405 	}
6406 #endif
6407 	if (sa->sa_family == AF_INET) {
6408 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6409 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6410 			*error = EINVAL;
6411 			return;
6412 		}
6413 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6414 		    SCTP_IPV6_V6ONLY(inp)) {
6415 			/* can't bind v4 on PF_INET sockets */
6416 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6417 			*error = EINVAL;
6418 			return;
6419 		}
6420 	}
6421 	/*
6422 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6423 	 * below is ever changed we may need to lock before calling
6424 	 * association level binding.
6425 	 */
6426 	if (assoc_id == 0) {
6427 		/* delete the address */
6428 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6429 		    SCTP_DEL_IP_ADDRESS,
6430 		    vrf_id, NULL);
6431 	} else {
6432 		/*
6433 		 * FIX: decide whether we allow assoc based bindx
6434 		 */
6435 	}
6436 }
6437 
6438 /*
6439  * returns the valid local address count for an assoc, taking into account
6440  * all scoping rules
6441  */
6442 int
6443 sctp_local_addr_count(struct sctp_tcb *stcb)
6444 {
6445 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6446 	int ipv4_addr_legal, ipv6_addr_legal;
6447 	struct sctp_vrf *vrf;
6448 	struct sctp_ifn *sctp_ifn;
6449 	struct sctp_ifa *sctp_ifa;
6450 	int count = 0;
6451 
6452 	/* Turn on all the appropriate scopes */
6453 	loopback_scope = stcb->asoc.loopback_scope;
6454 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6455 	local_scope = stcb->asoc.local_scope;
6456 	site_scope = stcb->asoc.site_scope;
6457 	ipv4_addr_legal = ipv6_addr_legal = 0;
6458 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6459 		ipv6_addr_legal = 1;
6460 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6461 			ipv4_addr_legal = 1;
6462 		}
6463 	} else {
6464 		ipv4_addr_legal = 1;
6465 	}
6466 
6467 	SCTP_IPI_ADDR_RLOCK();
6468 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6469 	if (vrf == NULL) {
6470 		/* no vrf, no addresses */
6471 		SCTP_IPI_ADDR_RUNLOCK();
6472 		return (0);
6473 	}
6474 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6475 		/*
6476 		 * bound all case: go through all ifns on the vrf
6477 		 */
6478 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6479 			if ((loopback_scope == 0) &&
6480 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6481 				continue;
6482 			}
6483 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6484 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6485 					continue;
6486 				switch (sctp_ifa->address.sa.sa_family) {
6487 				case AF_INET:
6488 					if (ipv4_addr_legal) {
6489 						struct sockaddr_in *sin;
6490 
6491 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6492 						if (sin->sin_addr.s_addr == 0) {
6493 							/*
6494 							 * skip unspecified
6495 							 * addrs
6496 							 */
6497 							continue;
6498 						}
6499 						if ((ipv4_local_scope == 0) &&
6500 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6501 							continue;
6502 						}
6503 						/* count this one */
6504 						count++;
6505 					} else {
6506 						continue;
6507 					}
6508 					break;
6509 #ifdef INET6
6510 				case AF_INET6:
6511 					if (ipv6_addr_legal) {
6512 						struct sockaddr_in6 *sin6;
6513 
6514 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6515 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6516 							continue;
6517 						}
6518 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6519 							if (local_scope == 0)
6520 								continue;
6521 							if (sin6->sin6_scope_id == 0) {
6522 								if (sa6_recoverscope(sin6) != 0)
6523 									/*
6524 									 *
6525 									 * bad
6526 									 *
6527 									 * li
6528 									 * nk
6529 									 *
6530 									 * loc
6531 									 * al
6532 									 *
6533 									 * add
6534 									 * re
6535 									 * ss
6536 									 * */
6537 									continue;
6538 							}
6539 						}
6540 						if ((site_scope == 0) &&
6541 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6542 							continue;
6543 						}
6544 						/* count this one */
6545 						count++;
6546 					}
6547 					break;
6548 #endif
6549 				default:
6550 					/* TSNH */
6551 					break;
6552 				}
6553 			}
6554 		}
6555 	} else {
6556 		/*
6557 		 * subset bound case
6558 		 */
6559 		struct sctp_laddr *laddr;
6560 
6561 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6562 		    sctp_nxt_addr) {
6563 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6564 				continue;
6565 			}
6566 			/* count this one */
6567 			count++;
6568 		}
6569 	}
6570 	SCTP_IPI_ADDR_RUNLOCK();
6571 	return (count);
6572 }
6573 
6574 #if defined(SCTP_LOCAL_TRACE_BUF)
6575 
6576 void
6577 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6578 {
6579 	uint32_t saveindex, newindex;
6580 
6581 	do {
6582 		saveindex = sctp_log.index;
6583 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6584 			newindex = 1;
6585 		} else {
6586 			newindex = saveindex + 1;
6587 		}
6588 	} while (atomic_cmpset_int(&sctp_log.index, saveindex, newindex) == 0);
6589 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6590 		saveindex = 0;
6591 	}
6592 	sctp_log.entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6593 	sctp_log.entry[saveindex].subsys = subsys;
6594 	sctp_log.entry[saveindex].params[0] = a;
6595 	sctp_log.entry[saveindex].params[1] = b;
6596 	sctp_log.entry[saveindex].params[2] = c;
6597 	sctp_log.entry[saveindex].params[3] = d;
6598 	sctp_log.entry[saveindex].params[4] = e;
6599 	sctp_log.entry[saveindex].params[5] = f;
6600 }
6601 
6602 #endif
6603