xref: /freebsd/sys/netinet/sctputil.c (revision 721351876cd4d3a8a700f62d2061331fa951a488)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #include <netinet6/sctp6_var.h>
43 #endif
44 #include <netinet/sctp_header.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_uio.h>
47 #include <netinet/sctp_timer.h>
48 #include <netinet/sctp_crc32.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_cc_functions.h>
53 
54 #define NUMBER_OF_MTU_SIZES 18
55 
56 
57 #ifndef KTR_SCTP
58 #define KTR_SCTP KTR_SUBSYS
59 #endif
60 
61 void
62 sctp_sblog(struct sockbuf *sb,
63     struct sctp_tcb *stcb, int from, int incr)
64 {
65 	struct sctp_cwnd_log sctp_clog;
66 
67 	sctp_clog.x.sb.stcb = stcb;
68 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
69 	if (stcb)
70 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
71 	else
72 		sctp_clog.x.sb.stcb_sbcc = 0;
73 	sctp_clog.x.sb.incr = incr;
74 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
75 	    SCTP_LOG_EVENT_SB,
76 	    from,
77 	    sctp_clog.x.misc.log1,
78 	    sctp_clog.x.misc.log2,
79 	    sctp_clog.x.misc.log3,
80 	    sctp_clog.x.misc.log4);
81 }
82 
83 void
84 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
85 {
86 	struct sctp_cwnd_log sctp_clog;
87 
88 	sctp_clog.x.close.inp = (void *)inp;
89 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
90 	if (stcb) {
91 		sctp_clog.x.close.stcb = (void *)stcb;
92 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
93 	} else {
94 		sctp_clog.x.close.stcb = 0;
95 		sctp_clog.x.close.state = 0;
96 	}
97 	sctp_clog.x.close.loc = loc;
98 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
99 	    SCTP_LOG_EVENT_CLOSE,
100 	    0,
101 	    sctp_clog.x.misc.log1,
102 	    sctp_clog.x.misc.log2,
103 	    sctp_clog.x.misc.log3,
104 	    sctp_clog.x.misc.log4);
105 }
106 
107 
108 void
109 rto_logging(struct sctp_nets *net, int from)
110 {
111 	struct sctp_cwnd_log sctp_clog;
112 
113 	memset(&sctp_clog, 0, sizeof(sctp_clog));
114 	sctp_clog.x.rto.net = (void *)net;
115 	sctp_clog.x.rto.rtt = net->prev_rtt;
116 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
117 	    SCTP_LOG_EVENT_RTT,
118 	    from,
119 	    sctp_clog.x.misc.log1,
120 	    sctp_clog.x.misc.log2,
121 	    sctp_clog.x.misc.log3,
122 	    sctp_clog.x.misc.log4);
123 
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 
145 }
146 
147 void
148 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
149 {
150 	struct sctp_cwnd_log sctp_clog;
151 
152 	sctp_clog.x.nagle.stcb = (void *)stcb;
153 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
154 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
155 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
156 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
157 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
158 	    SCTP_LOG_EVENT_NAGLE,
159 	    action,
160 	    sctp_clog.x.misc.log1,
161 	    sctp_clog.x.misc.log2,
162 	    sctp_clog.x.misc.log3,
163 	    sctp_clog.x.misc.log4);
164 }
165 
166 
167 void
168 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
169 {
170 	struct sctp_cwnd_log sctp_clog;
171 
172 	sctp_clog.x.sack.cumack = cumack;
173 	sctp_clog.x.sack.oldcumack = old_cumack;
174 	sctp_clog.x.sack.tsn = tsn;
175 	sctp_clog.x.sack.numGaps = gaps;
176 	sctp_clog.x.sack.numDups = dups;
177 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
178 	    SCTP_LOG_EVENT_SACK,
179 	    from,
180 	    sctp_clog.x.misc.log1,
181 	    sctp_clog.x.misc.log2,
182 	    sctp_clog.x.misc.log3,
183 	    sctp_clog.x.misc.log4);
184 }
185 
186 void
187 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
188 {
189 	struct sctp_cwnd_log sctp_clog;
190 
191 	memset(&sctp_clog, 0, sizeof(sctp_clog));
192 	sctp_clog.x.map.base = map;
193 	sctp_clog.x.map.cum = cum;
194 	sctp_clog.x.map.high = high;
195 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
196 	    SCTP_LOG_EVENT_MAP,
197 	    from,
198 	    sctp_clog.x.misc.log1,
199 	    sctp_clog.x.misc.log2,
200 	    sctp_clog.x.misc.log3,
201 	    sctp_clog.x.misc.log4);
202 }
203 
204 void
205 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
206     int from)
207 {
208 	struct sctp_cwnd_log sctp_clog;
209 
210 	memset(&sctp_clog, 0, sizeof(sctp_clog));
211 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
212 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
213 	sctp_clog.x.fr.tsn = tsn;
214 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
215 	    SCTP_LOG_EVENT_FR,
216 	    from,
217 	    sctp_clog.x.misc.log1,
218 	    sctp_clog.x.misc.log2,
219 	    sctp_clog.x.misc.log3,
220 	    sctp_clog.x.misc.log4);
221 
222 }
223 
224 
225 void
226 sctp_log_mb(struct mbuf *m, int from)
227 {
228 	struct sctp_cwnd_log sctp_clog;
229 
230 	sctp_clog.x.mb.mp = m;
231 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
232 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
233 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
234 	if (SCTP_BUF_IS_EXTENDED(m)) {
235 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
236 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
237 	} else {
238 		sctp_clog.x.mb.ext = 0;
239 		sctp_clog.x.mb.refcnt = 0;
240 	}
241 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
242 	    SCTP_LOG_EVENT_MBUF,
243 	    from,
244 	    sctp_clog.x.misc.log1,
245 	    sctp_clog.x.misc.log2,
246 	    sctp_clog.x.misc.log3,
247 	    sctp_clog.x.misc.log4);
248 }
249 
250 
251 void
252 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
253     int from)
254 {
255 	struct sctp_cwnd_log sctp_clog;
256 
257 	if (control == NULL) {
258 		SCTP_PRINTF("Gak log of NULL?\n");
259 		return;
260 	}
261 	sctp_clog.x.strlog.stcb = control->stcb;
262 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
263 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
264 	sctp_clog.x.strlog.strm = control->sinfo_stream;
265 	if (poschk != NULL) {
266 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
267 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
268 	} else {
269 		sctp_clog.x.strlog.e_tsn = 0;
270 		sctp_clog.x.strlog.e_sseq = 0;
271 	}
272 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
273 	    SCTP_LOG_EVENT_STRM,
274 	    from,
275 	    sctp_clog.x.misc.log1,
276 	    sctp_clog.x.misc.log2,
277 	    sctp_clog.x.misc.log3,
278 	    sctp_clog.x.misc.log4);
279 
280 }
281 
282 void
283 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
284 {
285 	struct sctp_cwnd_log sctp_clog;
286 
287 	sctp_clog.x.cwnd.net = net;
288 	if (stcb->asoc.send_queue_cnt > 255)
289 		sctp_clog.x.cwnd.cnt_in_send = 255;
290 	else
291 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
292 	if (stcb->asoc.stream_queue_cnt > 255)
293 		sctp_clog.x.cwnd.cnt_in_str = 255;
294 	else
295 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
296 
297 	if (net) {
298 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
299 		sctp_clog.x.cwnd.inflight = net->flight_size;
300 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
301 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
302 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
303 	}
304 	if (SCTP_CWNDLOG_PRESEND == from) {
305 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
306 	}
307 	sctp_clog.x.cwnd.cwnd_augment = augment;
308 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
309 	    SCTP_LOG_EVENT_CWND,
310 	    from,
311 	    sctp_clog.x.misc.log1,
312 	    sctp_clog.x.misc.log2,
313 	    sctp_clog.x.misc.log3,
314 	    sctp_clog.x.misc.log4);
315 
316 }
317 
318 void
319 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
320 {
321 	struct sctp_cwnd_log sctp_clog;
322 
323 	memset(&sctp_clog, 0, sizeof(sctp_clog));
324 	if (inp) {
325 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
326 
327 	} else {
328 		sctp_clog.x.lock.sock = (void *)NULL;
329 	}
330 	sctp_clog.x.lock.inp = (void *)inp;
331 	if (stcb) {
332 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
333 	} else {
334 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
335 	}
336 	if (inp) {
337 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
338 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
339 	} else {
340 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
341 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
342 	}
343 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
344 	if (inp->sctp_socket) {
345 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
346 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
347 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
348 	} else {
349 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
351 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
352 	}
353 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
354 	    SCTP_LOG_LOCK_EVENT,
355 	    from,
356 	    sctp_clog.x.misc.log1,
357 	    sctp_clog.x.misc.log2,
358 	    sctp_clog.x.misc.log3,
359 	    sctp_clog.x.misc.log4);
360 
361 }
362 
363 void
364 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
365 {
366 	struct sctp_cwnd_log sctp_clog;
367 
368 	memset(&sctp_clog, 0, sizeof(sctp_clog));
369 	sctp_clog.x.cwnd.net = net;
370 	sctp_clog.x.cwnd.cwnd_new_value = error;
371 	sctp_clog.x.cwnd.inflight = net->flight_size;
372 	sctp_clog.x.cwnd.cwnd_augment = burst;
373 	if (stcb->asoc.send_queue_cnt > 255)
374 		sctp_clog.x.cwnd.cnt_in_send = 255;
375 	else
376 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
377 	if (stcb->asoc.stream_queue_cnt > 255)
378 		sctp_clog.x.cwnd.cnt_in_str = 255;
379 	else
380 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
381 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
382 	    SCTP_LOG_EVENT_MAXBURST,
383 	    from,
384 	    sctp_clog.x.misc.log1,
385 	    sctp_clog.x.misc.log2,
386 	    sctp_clog.x.misc.log3,
387 	    sctp_clog.x.misc.log4);
388 
389 }
390 
391 void
392 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
393 {
394 	struct sctp_cwnd_log sctp_clog;
395 
396 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
397 	sctp_clog.x.rwnd.send_size = snd_size;
398 	sctp_clog.x.rwnd.overhead = overhead;
399 	sctp_clog.x.rwnd.new_rwnd = 0;
400 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
401 	    SCTP_LOG_EVENT_RWND,
402 	    from,
403 	    sctp_clog.x.misc.log1,
404 	    sctp_clog.x.misc.log2,
405 	    sctp_clog.x.misc.log3,
406 	    sctp_clog.x.misc.log4);
407 }
408 
409 void
410 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
411 {
412 	struct sctp_cwnd_log sctp_clog;
413 
414 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
415 	sctp_clog.x.rwnd.send_size = flight_size;
416 	sctp_clog.x.rwnd.overhead = overhead;
417 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
418 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
419 	    SCTP_LOG_EVENT_RWND,
420 	    from,
421 	    sctp_clog.x.misc.log1,
422 	    sctp_clog.x.misc.log2,
423 	    sctp_clog.x.misc.log3,
424 	    sctp_clog.x.misc.log4);
425 }
426 
427 void
428 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
429 {
430 	struct sctp_cwnd_log sctp_clog;
431 
432 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
433 	sctp_clog.x.mbcnt.size_change = book;
434 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
435 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
436 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
437 	    SCTP_LOG_EVENT_MBCNT,
438 	    from,
439 	    sctp_clog.x.misc.log1,
440 	    sctp_clog.x.misc.log2,
441 	    sctp_clog.x.misc.log3,
442 	    sctp_clog.x.misc.log4);
443 
444 }
445 
446 void
447 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
448 {
449 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
450 	    SCTP_LOG_MISC_EVENT,
451 	    from,
452 	    a, b, c, d);
453 }
454 
455 void
456 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
457 {
458 	struct sctp_cwnd_log sctp_clog;
459 
460 	sctp_clog.x.wake.stcb = (void *)stcb;
461 	sctp_clog.x.wake.wake_cnt = wake_cnt;
462 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
463 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
464 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
465 
466 	if (stcb->asoc.stream_queue_cnt < 0xff)
467 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
468 	else
469 		sctp_clog.x.wake.stream_qcnt = 0xff;
470 
471 	if (stcb->asoc.chunks_on_out_queue < 0xff)
472 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
473 	else
474 		sctp_clog.x.wake.chunks_on_oque = 0xff;
475 
476 	sctp_clog.x.wake.sctpflags = 0;
477 	/* set in the defered mode stuff */
478 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
479 		sctp_clog.x.wake.sctpflags |= 1;
480 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
481 		sctp_clog.x.wake.sctpflags |= 2;
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
483 		sctp_clog.x.wake.sctpflags |= 4;
484 	/* what about the sb */
485 	if (stcb->sctp_socket) {
486 		struct socket *so = stcb->sctp_socket;
487 
488 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
489 	} else {
490 		sctp_clog.x.wake.sbflags = 0xff;
491 	}
492 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
493 	    SCTP_LOG_EVENT_WAKE,
494 	    from,
495 	    sctp_clog.x.misc.log1,
496 	    sctp_clog.x.misc.log2,
497 	    sctp_clog.x.misc.log3,
498 	    sctp_clog.x.misc.log4);
499 
500 }
501 
502 void
503 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
504 {
505 	struct sctp_cwnd_log sctp_clog;
506 
507 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
508 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
509 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
510 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
511 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
512 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
513 	sctp_clog.x.blk.sndlen = sendlen;
514 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
515 	    SCTP_LOG_EVENT_BLOCK,
516 	    from,
517 	    sctp_clog.x.misc.log1,
518 	    sctp_clog.x.misc.log2,
519 	    sctp_clog.x.misc.log3,
520 	    sctp_clog.x.misc.log4);
521 
522 }
523 
524 int
525 sctp_fill_stat_log(void *optval, size_t *optsize)
526 {
527 	/* May need to fix this if ktrdump does not work */
528 	return (0);
529 }
530 
531 #ifdef SCTP_AUDITING_ENABLED
532 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
533 static int sctp_audit_indx = 0;
534 
535 static
536 void
537 sctp_print_audit_report(void)
538 {
539 	int i;
540 	int cnt;
541 
542 	cnt = 0;
543 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
544 		if ((sctp_audit_data[i][0] == 0xe0) &&
545 		    (sctp_audit_data[i][1] == 0x01)) {
546 			cnt = 0;
547 			SCTP_PRINTF("\n");
548 		} else if (sctp_audit_data[i][0] == 0xf0) {
549 			cnt = 0;
550 			SCTP_PRINTF("\n");
551 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
552 		    (sctp_audit_data[i][1] == 0x01)) {
553 			SCTP_PRINTF("\n");
554 			cnt = 0;
555 		}
556 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
557 		    (uint32_t) sctp_audit_data[i][1]);
558 		cnt++;
559 		if ((cnt % 14) == 0)
560 			SCTP_PRINTF("\n");
561 	}
562 	for (i = 0; i < sctp_audit_indx; i++) {
563 		if ((sctp_audit_data[i][0] == 0xe0) &&
564 		    (sctp_audit_data[i][1] == 0x01)) {
565 			cnt = 0;
566 			SCTP_PRINTF("\n");
567 		} else if (sctp_audit_data[i][0] == 0xf0) {
568 			cnt = 0;
569 			SCTP_PRINTF("\n");
570 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
571 		    (sctp_audit_data[i][1] == 0x01)) {
572 			SCTP_PRINTF("\n");
573 			cnt = 0;
574 		}
575 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
576 		    (uint32_t) sctp_audit_data[i][1]);
577 		cnt++;
578 		if ((cnt % 14) == 0)
579 			SCTP_PRINTF("\n");
580 	}
581 	SCTP_PRINTF("\n");
582 }
583 
584 void
585 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
586     struct sctp_nets *net)
587 {
588 	int resend_cnt, tot_out, rep, tot_book_cnt;
589 	struct sctp_nets *lnet;
590 	struct sctp_tmit_chunk *chk;
591 
592 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
593 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
594 	sctp_audit_indx++;
595 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
596 		sctp_audit_indx = 0;
597 	}
598 	if (inp == NULL) {
599 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
600 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
601 		sctp_audit_indx++;
602 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
603 			sctp_audit_indx = 0;
604 		}
605 		return;
606 	}
607 	if (stcb == NULL) {
608 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
609 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
610 		sctp_audit_indx++;
611 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
612 			sctp_audit_indx = 0;
613 		}
614 		return;
615 	}
616 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
617 	sctp_audit_data[sctp_audit_indx][1] =
618 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
619 	sctp_audit_indx++;
620 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
621 		sctp_audit_indx = 0;
622 	}
623 	rep = 0;
624 	tot_book_cnt = 0;
625 	resend_cnt = tot_out = 0;
626 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
627 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
628 			resend_cnt++;
629 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
630 			tot_out += chk->book_size;
631 			tot_book_cnt++;
632 		}
633 	}
634 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
635 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
636 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
637 		sctp_audit_indx++;
638 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
639 			sctp_audit_indx = 0;
640 		}
641 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
642 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
643 		rep = 1;
644 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
645 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
646 		sctp_audit_data[sctp_audit_indx][1] =
647 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
648 		sctp_audit_indx++;
649 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
650 			sctp_audit_indx = 0;
651 		}
652 	}
653 	if (tot_out != stcb->asoc.total_flight) {
654 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
655 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
656 		sctp_audit_indx++;
657 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
658 			sctp_audit_indx = 0;
659 		}
660 		rep = 1;
661 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
662 		    (int)stcb->asoc.total_flight);
663 		stcb->asoc.total_flight = tot_out;
664 	}
665 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
666 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
667 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
668 		sctp_audit_indx++;
669 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
670 			sctp_audit_indx = 0;
671 		}
672 		rep = 1;
673 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
674 
675 		stcb->asoc.total_flight_count = tot_book_cnt;
676 	}
677 	tot_out = 0;
678 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
679 		tot_out += lnet->flight_size;
680 	}
681 	if (tot_out != stcb->asoc.total_flight) {
682 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
683 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
684 		sctp_audit_indx++;
685 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
686 			sctp_audit_indx = 0;
687 		}
688 		rep = 1;
689 		SCTP_PRINTF("real flight:%d net total was %d\n",
690 		    stcb->asoc.total_flight, tot_out);
691 		/* now corrective action */
692 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
693 
694 			tot_out = 0;
695 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
696 				if ((chk->whoTo == lnet) &&
697 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
698 					tot_out += chk->book_size;
699 				}
700 			}
701 			if (lnet->flight_size != tot_out) {
702 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
703 				    (uint32_t) lnet, lnet->flight_size,
704 				    tot_out);
705 				lnet->flight_size = tot_out;
706 			}
707 		}
708 	}
709 	if (rep) {
710 		sctp_print_audit_report();
711 	}
712 }
713 
714 void
715 sctp_audit_log(uint8_t ev, uint8_t fd)
716 {
717 
718 	sctp_audit_data[sctp_audit_indx][0] = ev;
719 	sctp_audit_data[sctp_audit_indx][1] = fd;
720 	sctp_audit_indx++;
721 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
722 		sctp_audit_indx = 0;
723 	}
724 }
725 
726 #endif
727 
728 /*
729  * a list of sizes based on typical mtu's, used only if next hop size not
730  * returned.
731  */
732 static int sctp_mtu_sizes[] = {
733 	68,
734 	296,
735 	508,
736 	512,
737 	544,
738 	576,
739 	1006,
740 	1492,
741 	1500,
742 	1536,
743 	2002,
744 	2048,
745 	4352,
746 	4464,
747 	8166,
748 	17914,
749 	32000,
750 	65535
751 };
752 
753 void
754 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
755 {
756 	struct sctp_association *asoc;
757 	struct sctp_nets *net;
758 
759 	asoc = &stcb->asoc;
760 
761 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
762 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
763 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
764 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
765 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
766 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
767 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
768 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
769 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
770 	}
771 }
772 
773 int
774 find_next_best_mtu(int totsz)
775 {
776 	int i, perfer;
777 
778 	/*
779 	 * if we are in here we must find the next best fit based on the
780 	 * size of the dg that failed to be sent.
781 	 */
782 	perfer = 0;
783 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
784 		if (totsz < sctp_mtu_sizes[i]) {
785 			perfer = i - 1;
786 			if (perfer < 0)
787 				perfer = 0;
788 			break;
789 		}
790 	}
791 	return (sctp_mtu_sizes[perfer]);
792 }
793 
794 void
795 sctp_fill_random_store(struct sctp_pcb *m)
796 {
797 	/*
798 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
799 	 * our counter. The result becomes our good random numbers and we
800 	 * then setup to give these out. Note that we do no locking to
801 	 * protect this. This is ok, since if competing folks call this we
802 	 * will get more gobbled gook in the random store which is what we
803 	 * want. There is a danger that two guys will use the same random
804 	 * numbers, but thats ok too since that is random as well :->
805 	 */
806 	m->store_at = 0;
807 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
808 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
809 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
810 	m->random_counter++;
811 }
812 
813 uint32_t
814 sctp_select_initial_TSN(struct sctp_pcb *inp)
815 {
816 	/*
817 	 * A true implementation should use random selection process to get
818 	 * the initial stream sequence number, using RFC1750 as a good
819 	 * guideline
820 	 */
821 	uint32_t x, *xp;
822 	uint8_t *p;
823 	int store_at, new_store;
824 
825 	if (inp->initial_sequence_debug != 0) {
826 		uint32_t ret;
827 
828 		ret = inp->initial_sequence_debug;
829 		inp->initial_sequence_debug++;
830 		return (ret);
831 	}
832 retry:
833 	store_at = inp->store_at;
834 	new_store = store_at + sizeof(uint32_t);
835 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
836 		new_store = 0;
837 	}
838 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
839 		goto retry;
840 	}
841 	if (new_store == 0) {
842 		/* Refill the random store */
843 		sctp_fill_random_store(inp);
844 	}
845 	p = &inp->random_store[store_at];
846 	xp = (uint32_t *) p;
847 	x = *xp;
848 	return (x);
849 }
850 
851 uint32_t
852 sctp_select_a_tag(struct sctp_inpcb *inp, int save_in_twait)
853 {
854 	u_long x, not_done;
855 	struct timeval now;
856 
857 	(void)SCTP_GETTIME_TIMEVAL(&now);
858 	not_done = 1;
859 	while (not_done) {
860 		x = sctp_select_initial_TSN(&inp->sctp_ep);
861 		if (x == 0) {
862 			/* we never use 0 */
863 			continue;
864 		}
865 		if (sctp_is_vtag_good(inp, x, &now, save_in_twait)) {
866 			not_done = 0;
867 		}
868 	}
869 	return (x);
870 }
871 
872 int
873 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
874     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
875 {
876 	struct sctp_association *asoc;
877 
878 	/*
879 	 * Anything set to zero is taken care of by the allocation routine's
880 	 * bzero
881 	 */
882 
883 	/*
884 	 * Up front select what scoping to apply on addresses I tell my peer
885 	 * Not sure what to do with these right now, we will need to come up
886 	 * with a way to set them. We may need to pass them through from the
887 	 * caller in the sctp_aloc_assoc() function.
888 	 */
889 	int i;
890 
891 	asoc = &stcb->asoc;
892 	/* init all variables to a known value. */
893 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
894 	asoc->max_burst = m->sctp_ep.max_burst;
895 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
896 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
897 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
898 	/* JRS 5/21/07 - Init CMT PF variables */
899 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
900 	asoc->sctp_frag_point = m->sctp_frag_point;
901 #ifdef INET
902 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
903 #else
904 	asoc->default_tos = 0;
905 #endif
906 
907 #ifdef INET6
908 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
909 #else
910 	asoc->default_flowlabel = 0;
911 #endif
912 	asoc->sb_send_resv = 0;
913 	if (override_tag) {
914 		struct timeval now;
915 
916 		(void)SCTP_GETTIME_TIMEVAL(&now);
917 		if (sctp_is_in_timewait(override_tag)) {
918 			/*
919 			 * It must be in the time-wait hash, we put it there
920 			 * when we aloc one. If not the peer is playing
921 			 * games.
922 			 */
923 			asoc->my_vtag = override_tag;
924 		} else {
925 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
926 			panic("Huh is_in_timewait fails");
927 			return (ENOMEM);
928 		}
929 
930 	} else {
931 		asoc->my_vtag = sctp_select_a_tag(m, 1);
932 	}
933 	/* Get the nonce tags */
934 	asoc->my_vtag_nonce = sctp_select_a_tag(m, 0);
935 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, 0);
936 	asoc->vrf_id = vrf_id;
937 
938 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
939 		asoc->hb_is_disabled = 1;
940 	else
941 		asoc->hb_is_disabled = 0;
942 
943 #ifdef SCTP_ASOCLOG_OF_TSNS
944 	asoc->tsn_in_at = 0;
945 	asoc->tsn_out_at = 0;
946 	asoc->tsn_in_wrapped = 0;
947 	asoc->tsn_out_wrapped = 0;
948 	asoc->cumack_log_at = 0;
949 	asoc->cumack_log_atsnt = 0;
950 #endif
951 #ifdef SCTP_FS_SPEC_LOG
952 	asoc->fs_index = 0;
953 #endif
954 	asoc->refcnt = 0;
955 	asoc->assoc_up_sent = 0;
956 	asoc->assoc_id = asoc->my_vtag;
957 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
958 	    sctp_select_initial_TSN(&m->sctp_ep);
959 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
960 	/* we are optimisitic here */
961 	asoc->peer_supports_pktdrop = 1;
962 
963 	asoc->sent_queue_retran_cnt = 0;
964 
965 	/* for CMT */
966 	asoc->last_net_data_came_from = NULL;
967 
968 	/* This will need to be adjusted */
969 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
970 	asoc->last_acked_seq = asoc->init_seq_number - 1;
971 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
972 	asoc->asconf_seq_in = asoc->last_acked_seq;
973 
974 	/* here we are different, we hold the next one we expect */
975 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
976 
977 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
978 	asoc->initial_rto = m->sctp_ep.initial_rto;
979 
980 	asoc->max_init_times = m->sctp_ep.max_init_times;
981 	asoc->max_send_times = m->sctp_ep.max_send_times;
982 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
983 	asoc->free_chunk_cnt = 0;
984 
985 	asoc->iam_blocking = 0;
986 	/* ECN Nonce initialization */
987 	asoc->context = m->sctp_context;
988 	asoc->def_send = m->def_send;
989 	asoc->ecn_nonce_allowed = 0;
990 	asoc->receiver_nonce_sum = 1;
991 	asoc->nonce_sum_expect_base = 1;
992 	asoc->nonce_sum_check = 1;
993 	asoc->nonce_resync_tsn = 0;
994 	asoc->nonce_wait_for_ecne = 0;
995 	asoc->nonce_wait_tsn = 0;
996 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
997 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
998 	asoc->pr_sctp_cnt = 0;
999 	asoc->total_output_queue_size = 0;
1000 
1001 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1002 		struct in6pcb *inp6;
1003 
1004 		/* Its a V6 socket */
1005 		inp6 = (struct in6pcb *)m;
1006 		asoc->ipv6_addr_legal = 1;
1007 		/* Now look at the binding flag to see if V4 will be legal */
1008 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1009 			asoc->ipv4_addr_legal = 1;
1010 		} else {
1011 			/* V4 addresses are NOT legal on the association */
1012 			asoc->ipv4_addr_legal = 0;
1013 		}
1014 	} else {
1015 		/* Its a V4 socket, no - V6 */
1016 		asoc->ipv4_addr_legal = 1;
1017 		asoc->ipv6_addr_legal = 0;
1018 	}
1019 
1020 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1021 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1022 
1023 	asoc->smallest_mtu = m->sctp_frag_point;
1024 #ifdef SCTP_PRINT_FOR_B_AND_M
1025 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1026 	    asoc->smallest_mtu);
1027 #endif
1028 	asoc->minrto = m->sctp_ep.sctp_minrto;
1029 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1030 
1031 	asoc->locked_on_sending = NULL;
1032 	asoc->stream_locked_on = 0;
1033 	asoc->ecn_echo_cnt_onq = 0;
1034 	asoc->stream_locked = 0;
1035 
1036 	asoc->send_sack = 1;
1037 
1038 	LIST_INIT(&asoc->sctp_restricted_addrs);
1039 
1040 	TAILQ_INIT(&asoc->nets);
1041 	TAILQ_INIT(&asoc->pending_reply_queue);
1042 	TAILQ_INIT(&asoc->asconf_ack_sent);
1043 	/* Setup to fill the hb random cache at first HB */
1044 	asoc->hb_random_idx = 4;
1045 
1046 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1047 
1048 	/*
1049 	 * JRS - Pick the default congestion control module based on the
1050 	 * sysctl.
1051 	 */
1052 	switch (m->sctp_ep.sctp_default_cc_module) {
1053 		/* JRS - Standard TCP congestion control */
1054 	case SCTP_CC_RFC2581:
1055 		{
1056 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1057 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1058 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1059 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1060 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1061 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1064 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1065 			break;
1066 		}
1067 		/* JRS - High Speed TCP congestion control (Floyd) */
1068 	case SCTP_CC_HSTCP:
1069 		{
1070 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1071 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1072 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1073 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1074 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1075 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1078 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1079 			break;
1080 		}
1081 		/* JRS - HTCP congestion control */
1082 	case SCTP_CC_HTCP:
1083 		{
1084 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1085 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1086 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1087 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1088 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1089 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1092 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1093 			break;
1094 		}
1095 		/* JRS - By default, use RFC2581 */
1096 	default:
1097 		{
1098 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1099 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1100 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1101 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1102 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1103 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1104 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1105 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1106 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1107 			break;
1108 		}
1109 	}
1110 
1111 	/*
1112 	 * Now the stream parameters, here we allocate space for all streams
1113 	 * that we request by default.
1114 	 */
1115 	asoc->streamoutcnt = asoc->pre_open_streams =
1116 	    m->sctp_ep.pre_open_stream_count;
1117 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1118 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1119 	    SCTP_M_STRMO);
1120 	if (asoc->strmout == NULL) {
1121 		/* big trouble no memory */
1122 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1123 		return (ENOMEM);
1124 	}
1125 	for (i = 0; i < asoc->streamoutcnt; i++) {
1126 		/*
1127 		 * inbound side must be set to 0xffff, also NOTE when we get
1128 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1129 		 * count (streamoutcnt) but first check if we sent to any of
1130 		 * the upper streams that were dropped (if some were). Those
1131 		 * that were dropped must be notified to the upper layer as
1132 		 * failed to send.
1133 		 */
1134 		asoc->strmout[i].next_sequence_sent = 0x0;
1135 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1136 		asoc->strmout[i].stream_no = i;
1137 		asoc->strmout[i].last_msg_incomplete = 0;
1138 		asoc->strmout[i].next_spoke.tqe_next = 0;
1139 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1140 	}
1141 	/* Now the mapping array */
1142 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1143 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1144 	    SCTP_M_MAP);
1145 	if (asoc->mapping_array == NULL) {
1146 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1147 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1148 		return (ENOMEM);
1149 	}
1150 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1151 	/* Now the init of the other outqueues */
1152 	TAILQ_INIT(&asoc->free_chunks);
1153 	TAILQ_INIT(&asoc->out_wheel);
1154 	TAILQ_INIT(&asoc->control_send_queue);
1155 	TAILQ_INIT(&asoc->asconf_send_queue);
1156 	TAILQ_INIT(&asoc->send_queue);
1157 	TAILQ_INIT(&asoc->sent_queue);
1158 	TAILQ_INIT(&asoc->reasmqueue);
1159 	TAILQ_INIT(&asoc->resetHead);
1160 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1161 	TAILQ_INIT(&asoc->asconf_queue);
1162 	/* authentication fields */
1163 	asoc->authinfo.random = NULL;
1164 	asoc->authinfo.assoc_key = NULL;
1165 	asoc->authinfo.assoc_keyid = 0;
1166 	asoc->authinfo.recv_key = NULL;
1167 	asoc->authinfo.recv_keyid = 0;
1168 	LIST_INIT(&asoc->shared_keys);
1169 	asoc->marked_retrans = 0;
1170 	asoc->timoinit = 0;
1171 	asoc->timodata = 0;
1172 	asoc->timosack = 0;
1173 	asoc->timoshutdown = 0;
1174 	asoc->timoheartbeat = 0;
1175 	asoc->timocookie = 0;
1176 	asoc->timoshutdownack = 0;
1177 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1178 	asoc->discontinuity_time = asoc->start_time;
1179 	/*
1180 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1181 	 * freed later whe the association is freed.
1182 	 */
1183 	return (0);
1184 }
1185 
1186 int
1187 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1188 {
1189 	/* mapping array needs to grow */
1190 	uint8_t *new_array;
1191 	uint32_t new_size;
1192 
1193 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1194 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1195 	if (new_array == NULL) {
1196 		/* can't get more, forget it */
1197 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1198 		    new_size);
1199 		return (-1);
1200 	}
1201 	memset(new_array, 0, new_size);
1202 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1203 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1204 	asoc->mapping_array = new_array;
1205 	asoc->mapping_array_size = new_size;
1206 	return (0);
1207 }
1208 
1209 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1210 static void
1211 sctp_iterator_work(struct sctp_iterator *it)
1212 {
1213 	int iteration_count = 0;
1214 	int inp_skip = 0;
1215 
1216 	SCTP_ITERATOR_LOCK();
1217 	if (it->inp) {
1218 		SCTP_INP_DECR_REF(it->inp);
1219 	}
1220 	if (it->inp == NULL) {
1221 		/* iterator is complete */
1222 done_with_iterator:
1223 		SCTP_ITERATOR_UNLOCK();
1224 		if (it->function_atend != NULL) {
1225 			(*it->function_atend) (it->pointer, it->val);
1226 		}
1227 		SCTP_FREE(it, SCTP_M_ITER);
1228 		return;
1229 	}
1230 select_a_new_ep:
1231 	SCTP_INP_WLOCK(it->inp);
1232 	while (((it->pcb_flags) &&
1233 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1234 	    ((it->pcb_features) &&
1235 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1236 		/* endpoint flags or features don't match, so keep looking */
1237 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1238 			SCTP_INP_WUNLOCK(it->inp);
1239 			goto done_with_iterator;
1240 		}
1241 		SCTP_INP_WUNLOCK(it->inp);
1242 		it->inp = LIST_NEXT(it->inp, sctp_list);
1243 		if (it->inp == NULL) {
1244 			goto done_with_iterator;
1245 		}
1246 		SCTP_INP_WLOCK(it->inp);
1247 	}
1248 
1249 	SCTP_INP_WUNLOCK(it->inp);
1250 	SCTP_INP_RLOCK(it->inp);
1251 
1252 	/* now go through each assoc which is in the desired state */
1253 	if (it->done_current_ep == 0) {
1254 		if (it->function_inp != NULL)
1255 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1256 		it->done_current_ep = 1;
1257 	}
1258 	if (it->stcb == NULL) {
1259 		/* run the per instance function */
1260 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1261 	}
1262 	if ((inp_skip) || it->stcb == NULL) {
1263 		if (it->function_inp_end != NULL) {
1264 			inp_skip = (*it->function_inp_end) (it->inp,
1265 			    it->pointer,
1266 			    it->val);
1267 		}
1268 		SCTP_INP_RUNLOCK(it->inp);
1269 		goto no_stcb;
1270 	}
1271 	while (it->stcb) {
1272 		SCTP_TCB_LOCK(it->stcb);
1273 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1274 			/* not in the right state... keep looking */
1275 			SCTP_TCB_UNLOCK(it->stcb);
1276 			goto next_assoc;
1277 		}
1278 		/* see if we have limited out the iterator loop */
1279 		iteration_count++;
1280 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1281 			/* Pause to let others grab the lock */
1282 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1283 			SCTP_TCB_UNLOCK(it->stcb);
1284 
1285 			SCTP_INP_INCR_REF(it->inp);
1286 			SCTP_INP_RUNLOCK(it->inp);
1287 			SCTP_ITERATOR_UNLOCK();
1288 			SCTP_ITERATOR_LOCK();
1289 			SCTP_INP_RLOCK(it->inp);
1290 
1291 			SCTP_INP_DECR_REF(it->inp);
1292 			SCTP_TCB_LOCK(it->stcb);
1293 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1294 			iteration_count = 0;
1295 		}
1296 		/* run function on this one */
1297 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1298 
1299 		/*
1300 		 * we lie here, it really needs to have its own type but
1301 		 * first I must verify that this won't effect things :-0
1302 		 */
1303 		if (it->no_chunk_output == 0)
1304 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1305 
1306 		SCTP_TCB_UNLOCK(it->stcb);
1307 next_assoc:
1308 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1309 		if (it->stcb == NULL) {
1310 			/* Run last function */
1311 			if (it->function_inp_end != NULL) {
1312 				inp_skip = (*it->function_inp_end) (it->inp,
1313 				    it->pointer,
1314 				    it->val);
1315 			}
1316 		}
1317 	}
1318 	SCTP_INP_RUNLOCK(it->inp);
1319 no_stcb:
1320 	/* done with all assocs on this endpoint, move on to next endpoint */
1321 	it->done_current_ep = 0;
1322 	SCTP_INP_WLOCK(it->inp);
1323 	SCTP_INP_WUNLOCK(it->inp);
1324 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1325 		it->inp = NULL;
1326 	} else {
1327 		SCTP_INP_INFO_RLOCK();
1328 		it->inp = LIST_NEXT(it->inp, sctp_list);
1329 		SCTP_INP_INFO_RUNLOCK();
1330 	}
1331 	if (it->inp == NULL) {
1332 		goto done_with_iterator;
1333 	}
1334 	goto select_a_new_ep;
1335 }
1336 
1337 void
1338 sctp_iterator_worker(void)
1339 {
1340 	struct sctp_iterator *it = NULL;
1341 
1342 	/* This function is called with the WQ lock in place */
1343 
1344 	SCTP_BASE_INFO(iterator_running) = 1;
1345 again:
1346 	it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1347 	while (it) {
1348 		/* now lets work on this one */
1349 		TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1350 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1351 		sctp_iterator_work(it);
1352 		SCTP_IPI_ITERATOR_WQ_LOCK();
1353 		/* sa_ignore FREED_MEMORY */
1354 		it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1355 	}
1356 	if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
1357 		goto again;
1358 	}
1359 	SCTP_BASE_INFO(iterator_running) = 0;
1360 	return;
1361 }
1362 
1363 #endif
1364 
1365 
1366 static void
1367 sctp_handle_addr_wq(void)
1368 {
1369 	/* deal with the ADDR wq from the rtsock calls */
1370 	struct sctp_laddr *wi;
1371 	struct sctp_asconf_iterator *asc;
1372 
1373 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1374 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1375 	if (asc == NULL) {
1376 		/* Try later, no memory */
1377 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1378 		    (struct sctp_inpcb *)NULL,
1379 		    (struct sctp_tcb *)NULL,
1380 		    (struct sctp_nets *)NULL);
1381 		return;
1382 	}
1383 	LIST_INIT(&asc->list_of_work);
1384 	asc->cnt = 0;
1385 	SCTP_IPI_ITERATOR_WQ_LOCK();
1386 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1387 	while (wi != NULL) {
1388 		LIST_REMOVE(wi, sctp_nxt_addr);
1389 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1390 		asc->cnt++;
1391 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1392 	}
1393 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1394 	if (asc->cnt == 0) {
1395 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1396 	} else {
1397 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1398 		    sctp_asconf_iterator_stcb,
1399 		    NULL,	/* No ep end for boundall */
1400 		    SCTP_PCB_FLAGS_BOUNDALL,
1401 		    SCTP_PCB_ANY_FEATURES,
1402 		    SCTP_ASOC_ANY_STATE,
1403 		    (void *)asc, 0,
1404 		    sctp_asconf_iterator_end, NULL, 0);
1405 	}
1406 }
1407 
1408 int retcode = 0;
1409 int cur_oerr = 0;
1410 
1411 void
1412 sctp_timeout_handler(void *t)
1413 {
1414 	struct sctp_inpcb *inp;
1415 	struct sctp_tcb *stcb;
1416 	struct sctp_nets *net;
1417 	struct sctp_timer *tmr;
1418 
1419 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1420 	struct socket *so;
1421 
1422 #endif
1423 	int did_output, type;
1424 	struct sctp_iterator *it = NULL;
1425 
1426 	tmr = (struct sctp_timer *)t;
1427 	inp = (struct sctp_inpcb *)tmr->ep;
1428 	stcb = (struct sctp_tcb *)tmr->tcb;
1429 	net = (struct sctp_nets *)tmr->net;
1430 	did_output = 1;
1431 
1432 #ifdef SCTP_AUDITING_ENABLED
1433 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1434 	sctp_auditing(3, inp, stcb, net);
1435 #endif
1436 
1437 	/* sanity checks... */
1438 	if (tmr->self != (void *)tmr) {
1439 		/*
1440 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1441 		 * tmr);
1442 		 */
1443 		return;
1444 	}
1445 	tmr->stopped_from = 0xa001;
1446 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1447 		/*
1448 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1449 		 * tmr->type);
1450 		 */
1451 		return;
1452 	}
1453 	tmr->stopped_from = 0xa002;
1454 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1455 		return;
1456 	}
1457 	/* if this is an iterator timeout, get the struct and clear inp */
1458 	tmr->stopped_from = 0xa003;
1459 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1460 		it = (struct sctp_iterator *)inp;
1461 		inp = NULL;
1462 	}
1463 	type = tmr->type;
1464 	if (inp) {
1465 		SCTP_INP_INCR_REF(inp);
1466 		if ((inp->sctp_socket == 0) &&
1467 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1468 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1469 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1470 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1471 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1472 		    ) {
1473 			SCTP_INP_DECR_REF(inp);
1474 			return;
1475 		}
1476 	}
1477 	tmr->stopped_from = 0xa004;
1478 	if (stcb) {
1479 		atomic_add_int(&stcb->asoc.refcnt, 1);
1480 		if (stcb->asoc.state == 0) {
1481 			atomic_add_int(&stcb->asoc.refcnt, -1);
1482 			if (inp) {
1483 				SCTP_INP_DECR_REF(inp);
1484 			}
1485 			return;
1486 		}
1487 	}
1488 	tmr->stopped_from = 0xa005;
1489 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1490 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1491 		if (inp) {
1492 			SCTP_INP_DECR_REF(inp);
1493 		}
1494 		if (stcb) {
1495 			atomic_add_int(&stcb->asoc.refcnt, -1);
1496 		}
1497 		return;
1498 	}
1499 	tmr->stopped_from = 0xa006;
1500 
1501 	if (stcb) {
1502 		SCTP_TCB_LOCK(stcb);
1503 		atomic_add_int(&stcb->asoc.refcnt, -1);
1504 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1505 		    ((stcb->asoc.state == 0) ||
1506 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1507 			SCTP_TCB_UNLOCK(stcb);
1508 			if (inp) {
1509 				SCTP_INP_DECR_REF(inp);
1510 			}
1511 			return;
1512 		}
1513 	}
1514 	/* record in stopped what t-o occured */
1515 	tmr->stopped_from = tmr->type;
1516 
1517 	/* mark as being serviced now */
1518 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1519 		/*
1520 		 * Callout has been rescheduled.
1521 		 */
1522 		goto get_out;
1523 	}
1524 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1525 		/*
1526 		 * Not active, so no action.
1527 		 */
1528 		goto get_out;
1529 	}
1530 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1531 
1532 	/* call the handler for the appropriate timer type */
1533 	switch (tmr->type) {
1534 	case SCTP_TIMER_TYPE_ZERO_COPY:
1535 		if (inp == NULL) {
1536 			break;
1537 		}
1538 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1539 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1540 		}
1541 		break;
1542 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1543 		if (inp == NULL) {
1544 			break;
1545 		}
1546 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1547 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1548 		}
1549 		break;
1550 	case SCTP_TIMER_TYPE_ADDR_WQ:
1551 		sctp_handle_addr_wq();
1552 		break;
1553 	case SCTP_TIMER_TYPE_ITERATOR:
1554 		SCTP_STAT_INCR(sctps_timoiterator);
1555 		sctp_iterator_timer(it);
1556 		break;
1557 	case SCTP_TIMER_TYPE_SEND:
1558 		if ((stcb == NULL) || (inp == NULL)) {
1559 			break;
1560 		}
1561 		SCTP_STAT_INCR(sctps_timodata);
1562 		stcb->asoc.timodata++;
1563 		stcb->asoc.num_send_timers_up--;
1564 		if (stcb->asoc.num_send_timers_up < 0) {
1565 			stcb->asoc.num_send_timers_up = 0;
1566 		}
1567 		SCTP_TCB_LOCK_ASSERT(stcb);
1568 		cur_oerr = stcb->asoc.overall_error_count;
1569 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1570 		if (retcode) {
1571 			/* no need to unlock on tcb its gone */
1572 
1573 			goto out_decr;
1574 		}
1575 		SCTP_TCB_LOCK_ASSERT(stcb);
1576 #ifdef SCTP_AUDITING_ENABLED
1577 		sctp_auditing(4, inp, stcb, net);
1578 #endif
1579 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1580 		if ((stcb->asoc.num_send_timers_up == 0) &&
1581 		    (stcb->asoc.sent_queue_cnt > 0)
1582 		    ) {
1583 			struct sctp_tmit_chunk *chk;
1584 
1585 			/*
1586 			 * safeguard. If there on some on the sent queue
1587 			 * somewhere but no timers running something is
1588 			 * wrong... so we start a timer on the first chunk
1589 			 * on the send queue on whatever net it is sent to.
1590 			 */
1591 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1592 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1593 			    chk->whoTo);
1594 		}
1595 		break;
1596 	case SCTP_TIMER_TYPE_INIT:
1597 		if ((stcb == NULL) || (inp == NULL)) {
1598 			break;
1599 		}
1600 		SCTP_STAT_INCR(sctps_timoinit);
1601 		stcb->asoc.timoinit++;
1602 		if (sctp_t1init_timer(inp, stcb, net)) {
1603 			/* no need to unlock on tcb its gone */
1604 			goto out_decr;
1605 		}
1606 		/* We do output but not here */
1607 		did_output = 0;
1608 		break;
1609 	case SCTP_TIMER_TYPE_RECV:
1610 		if ((stcb == NULL) || (inp == NULL)) {
1611 			break;
1612 		} {
1613 			int abort_flag;
1614 
1615 			SCTP_STAT_INCR(sctps_timosack);
1616 			stcb->asoc.timosack++;
1617 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1618 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1619 			sctp_send_sack(stcb);
1620 		}
1621 #ifdef SCTP_AUDITING_ENABLED
1622 		sctp_auditing(4, inp, stcb, net);
1623 #endif
1624 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1625 		break;
1626 	case SCTP_TIMER_TYPE_SHUTDOWN:
1627 		if ((stcb == NULL) || (inp == NULL)) {
1628 			break;
1629 		}
1630 		if (sctp_shutdown_timer(inp, stcb, net)) {
1631 			/* no need to unlock on tcb its gone */
1632 			goto out_decr;
1633 		}
1634 		SCTP_STAT_INCR(sctps_timoshutdown);
1635 		stcb->asoc.timoshutdown++;
1636 #ifdef SCTP_AUDITING_ENABLED
1637 		sctp_auditing(4, inp, stcb, net);
1638 #endif
1639 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1640 		break;
1641 	case SCTP_TIMER_TYPE_HEARTBEAT:
1642 		{
1643 			struct sctp_nets *lnet;
1644 			int cnt_of_unconf = 0;
1645 
1646 			if ((stcb == NULL) || (inp == NULL)) {
1647 				break;
1648 			}
1649 			SCTP_STAT_INCR(sctps_timoheartbeat);
1650 			stcb->asoc.timoheartbeat++;
1651 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1652 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1653 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1654 					cnt_of_unconf++;
1655 				}
1656 			}
1657 			if (cnt_of_unconf == 0) {
1658 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1659 				    cnt_of_unconf)) {
1660 					/* no need to unlock on tcb its gone */
1661 					goto out_decr;
1662 				}
1663 			}
1664 #ifdef SCTP_AUDITING_ENABLED
1665 			sctp_auditing(4, inp, stcb, lnet);
1666 #endif
1667 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1668 			    stcb->sctp_ep, stcb, lnet);
1669 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1670 		}
1671 		break;
1672 	case SCTP_TIMER_TYPE_COOKIE:
1673 		if ((stcb == NULL) || (inp == NULL)) {
1674 			break;
1675 		}
1676 		if (sctp_cookie_timer(inp, stcb, net)) {
1677 			/* no need to unlock on tcb its gone */
1678 			goto out_decr;
1679 		}
1680 		SCTP_STAT_INCR(sctps_timocookie);
1681 		stcb->asoc.timocookie++;
1682 #ifdef SCTP_AUDITING_ENABLED
1683 		sctp_auditing(4, inp, stcb, net);
1684 #endif
1685 		/*
1686 		 * We consider T3 and Cookie timer pretty much the same with
1687 		 * respect to where from in chunk_output.
1688 		 */
1689 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1690 		break;
1691 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1692 		{
1693 			struct timeval tv;
1694 			int i, secret;
1695 
1696 			if (inp == NULL) {
1697 				break;
1698 			}
1699 			SCTP_STAT_INCR(sctps_timosecret);
1700 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1701 			SCTP_INP_WLOCK(inp);
1702 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1703 			inp->sctp_ep.last_secret_number =
1704 			    inp->sctp_ep.current_secret_number;
1705 			inp->sctp_ep.current_secret_number++;
1706 			if (inp->sctp_ep.current_secret_number >=
1707 			    SCTP_HOW_MANY_SECRETS) {
1708 				inp->sctp_ep.current_secret_number = 0;
1709 			}
1710 			secret = (int)inp->sctp_ep.current_secret_number;
1711 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1712 				inp->sctp_ep.secret_key[secret][i] =
1713 				    sctp_select_initial_TSN(&inp->sctp_ep);
1714 			}
1715 			SCTP_INP_WUNLOCK(inp);
1716 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1717 		}
1718 		did_output = 0;
1719 		break;
1720 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1721 		if ((stcb == NULL) || (inp == NULL)) {
1722 			break;
1723 		}
1724 		SCTP_STAT_INCR(sctps_timopathmtu);
1725 		sctp_pathmtu_timer(inp, stcb, net);
1726 		did_output = 0;
1727 		break;
1728 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1729 		if ((stcb == NULL) || (inp == NULL)) {
1730 			break;
1731 		}
1732 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1733 			/* no need to unlock on tcb its gone */
1734 			goto out_decr;
1735 		}
1736 		SCTP_STAT_INCR(sctps_timoshutdownack);
1737 		stcb->asoc.timoshutdownack++;
1738 #ifdef SCTP_AUDITING_ENABLED
1739 		sctp_auditing(4, inp, stcb, net);
1740 #endif
1741 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1742 		break;
1743 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1744 		if ((stcb == NULL) || (inp == NULL)) {
1745 			break;
1746 		}
1747 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1748 		sctp_abort_an_association(inp, stcb,
1749 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1750 		/* no need to unlock on tcb its gone */
1751 		goto out_decr;
1752 
1753 	case SCTP_TIMER_TYPE_STRRESET:
1754 		if ((stcb == NULL) || (inp == NULL)) {
1755 			break;
1756 		}
1757 		if (sctp_strreset_timer(inp, stcb, net)) {
1758 			/* no need to unlock on tcb its gone */
1759 			goto out_decr;
1760 		}
1761 		SCTP_STAT_INCR(sctps_timostrmrst);
1762 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1763 		break;
1764 	case SCTP_TIMER_TYPE_EARLYFR:
1765 		/* Need to do FR of things for net */
1766 		if ((stcb == NULL) || (inp == NULL)) {
1767 			break;
1768 		}
1769 		SCTP_STAT_INCR(sctps_timoearlyfr);
1770 		sctp_early_fr_timer(inp, stcb, net);
1771 		break;
1772 	case SCTP_TIMER_TYPE_ASCONF:
1773 		if ((stcb == NULL) || (inp == NULL)) {
1774 			break;
1775 		}
1776 		if (sctp_asconf_timer(inp, stcb, net)) {
1777 			/* no need to unlock on tcb its gone */
1778 			goto out_decr;
1779 		}
1780 		SCTP_STAT_INCR(sctps_timoasconf);
1781 #ifdef SCTP_AUDITING_ENABLED
1782 		sctp_auditing(4, inp, stcb, net);
1783 #endif
1784 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1785 		break;
1786 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1787 		if ((stcb == NULL) || (inp == NULL)) {
1788 			break;
1789 		}
1790 		sctp_delete_prim_timer(inp, stcb, net);
1791 		SCTP_STAT_INCR(sctps_timodelprim);
1792 		break;
1793 
1794 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1795 		if ((stcb == NULL) || (inp == NULL)) {
1796 			break;
1797 		}
1798 		SCTP_STAT_INCR(sctps_timoautoclose);
1799 		sctp_autoclose_timer(inp, stcb, net);
1800 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1801 		did_output = 0;
1802 		break;
1803 	case SCTP_TIMER_TYPE_ASOCKILL:
1804 		if ((stcb == NULL) || (inp == NULL)) {
1805 			break;
1806 		}
1807 		SCTP_STAT_INCR(sctps_timoassockill);
1808 		/* Can we free it yet? */
1809 		SCTP_INP_DECR_REF(inp);
1810 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1811 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1812 		so = SCTP_INP_SO(inp);
1813 		atomic_add_int(&stcb->asoc.refcnt, 1);
1814 		SCTP_TCB_UNLOCK(stcb);
1815 		SCTP_SOCKET_LOCK(so, 1);
1816 		SCTP_TCB_LOCK(stcb);
1817 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1818 #endif
1819 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1820 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1821 		SCTP_SOCKET_UNLOCK(so, 1);
1822 #endif
1823 		/*
1824 		 * free asoc, always unlocks (or destroy's) so prevent
1825 		 * duplicate unlock or unlock of a free mtx :-0
1826 		 */
1827 		stcb = NULL;
1828 		goto out_no_decr;
1829 	case SCTP_TIMER_TYPE_INPKILL:
1830 		SCTP_STAT_INCR(sctps_timoinpkill);
1831 		if (inp == NULL) {
1832 			break;
1833 		}
1834 		/*
1835 		 * special case, take away our increment since WE are the
1836 		 * killer
1837 		 */
1838 		SCTP_INP_DECR_REF(inp);
1839 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1840 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1841 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1842 		inp = NULL;
1843 		goto out_no_decr;
1844 	default:
1845 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1846 		    tmr->type);
1847 		break;
1848 	};
1849 #ifdef SCTP_AUDITING_ENABLED
1850 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1851 	if (inp)
1852 		sctp_auditing(5, inp, stcb, net);
1853 #endif
1854 	if ((did_output) && stcb) {
1855 		/*
1856 		 * Now we need to clean up the control chunk chain if an
1857 		 * ECNE is on it. It must be marked as UNSENT again so next
1858 		 * call will continue to send it until such time that we get
1859 		 * a CWR, to remove it. It is, however, less likely that we
1860 		 * will find a ecn echo on the chain though.
1861 		 */
1862 		sctp_fix_ecn_echo(&stcb->asoc);
1863 	}
1864 get_out:
1865 	if (stcb) {
1866 		SCTP_TCB_UNLOCK(stcb);
1867 	}
1868 out_decr:
1869 	if (inp) {
1870 		SCTP_INP_DECR_REF(inp);
1871 	}
1872 out_no_decr:
1873 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1874 	    type);
1875 }
1876 
1877 void
1878 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1879     struct sctp_nets *net)
1880 {
1881 	int to_ticks;
1882 	struct sctp_timer *tmr;
1883 
1884 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1885 		return;
1886 
1887 	to_ticks = 0;
1888 
1889 	tmr = NULL;
1890 	if (stcb) {
1891 		SCTP_TCB_LOCK_ASSERT(stcb);
1892 	}
1893 	switch (t_type) {
1894 	case SCTP_TIMER_TYPE_ZERO_COPY:
1895 		tmr = &inp->sctp_ep.zero_copy_timer;
1896 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1897 		break;
1898 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1899 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1900 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1901 		break;
1902 	case SCTP_TIMER_TYPE_ADDR_WQ:
1903 		/* Only 1 tick away :-) */
1904 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1905 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1906 		break;
1907 	case SCTP_TIMER_TYPE_ITERATOR:
1908 		{
1909 			struct sctp_iterator *it;
1910 
1911 			it = (struct sctp_iterator *)inp;
1912 			tmr = &it->tmr;
1913 			to_ticks = SCTP_ITERATOR_TICKS;
1914 		}
1915 		break;
1916 	case SCTP_TIMER_TYPE_SEND:
1917 		/* Here we use the RTO timer */
1918 		{
1919 			int rto_val;
1920 
1921 			if ((stcb == NULL) || (net == NULL)) {
1922 				return;
1923 			}
1924 			tmr = &net->rxt_timer;
1925 			if (net->RTO == 0) {
1926 				rto_val = stcb->asoc.initial_rto;
1927 			} else {
1928 				rto_val = net->RTO;
1929 			}
1930 			to_ticks = MSEC_TO_TICKS(rto_val);
1931 		}
1932 		break;
1933 	case SCTP_TIMER_TYPE_INIT:
1934 		/*
1935 		 * Here we use the INIT timer default usually about 1
1936 		 * minute.
1937 		 */
1938 		if ((stcb == NULL) || (net == NULL)) {
1939 			return;
1940 		}
1941 		tmr = &net->rxt_timer;
1942 		if (net->RTO == 0) {
1943 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1944 		} else {
1945 			to_ticks = MSEC_TO_TICKS(net->RTO);
1946 		}
1947 		break;
1948 	case SCTP_TIMER_TYPE_RECV:
1949 		/*
1950 		 * Here we use the Delayed-Ack timer value from the inp
1951 		 * ususually about 200ms.
1952 		 */
1953 		if (stcb == NULL) {
1954 			return;
1955 		}
1956 		tmr = &stcb->asoc.dack_timer;
1957 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1958 		break;
1959 	case SCTP_TIMER_TYPE_SHUTDOWN:
1960 		/* Here we use the RTO of the destination. */
1961 		if ((stcb == NULL) || (net == NULL)) {
1962 			return;
1963 		}
1964 		if (net->RTO == 0) {
1965 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1966 		} else {
1967 			to_ticks = MSEC_TO_TICKS(net->RTO);
1968 		}
1969 		tmr = &net->rxt_timer;
1970 		break;
1971 	case SCTP_TIMER_TYPE_HEARTBEAT:
1972 		/*
1973 		 * the net is used here so that we can add in the RTO. Even
1974 		 * though we use a different timer. We also add the HB timer
1975 		 * PLUS a random jitter.
1976 		 */
1977 		if ((inp == NULL) || (stcb == NULL)) {
1978 			return;
1979 		} else {
1980 			uint32_t rndval;
1981 			uint8_t this_random;
1982 			int cnt_of_unconf = 0;
1983 			struct sctp_nets *lnet;
1984 
1985 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1986 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1987 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1988 					cnt_of_unconf++;
1989 				}
1990 			}
1991 			if (cnt_of_unconf) {
1992 				net = lnet = NULL;
1993 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
1994 			}
1995 			if (stcb->asoc.hb_random_idx > 3) {
1996 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1997 				memcpy(stcb->asoc.hb_random_values, &rndval,
1998 				    sizeof(stcb->asoc.hb_random_values));
1999 				stcb->asoc.hb_random_idx = 0;
2000 			}
2001 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2002 			stcb->asoc.hb_random_idx++;
2003 			stcb->asoc.hb_ect_randombit = 0;
2004 			/*
2005 			 * this_random will be 0 - 256 ms RTO is in ms.
2006 			 */
2007 			if ((stcb->asoc.hb_is_disabled) &&
2008 			    (cnt_of_unconf == 0)) {
2009 				return;
2010 			}
2011 			if (net) {
2012 				int delay;
2013 
2014 				delay = stcb->asoc.heart_beat_delay;
2015 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2016 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2017 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2018 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2019 						delay = 0;
2020 					}
2021 				}
2022 				if (net->RTO == 0) {
2023 					/* Never been checked */
2024 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2025 				} else {
2026 					/* set rto_val to the ms */
2027 					to_ticks = delay + net->RTO + this_random;
2028 				}
2029 			} else {
2030 				if (cnt_of_unconf) {
2031 					to_ticks = this_random + stcb->asoc.initial_rto;
2032 				} else {
2033 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2034 				}
2035 			}
2036 			/*
2037 			 * Now we must convert the to_ticks that are now in
2038 			 * ms to ticks.
2039 			 */
2040 			to_ticks = MSEC_TO_TICKS(to_ticks);
2041 			tmr = &stcb->asoc.hb_timer;
2042 		}
2043 		break;
2044 	case SCTP_TIMER_TYPE_COOKIE:
2045 		/*
2046 		 * Here we can use the RTO timer from the network since one
2047 		 * RTT was compelete. If a retran happened then we will be
2048 		 * using the RTO initial value.
2049 		 */
2050 		if ((stcb == NULL) || (net == NULL)) {
2051 			return;
2052 		}
2053 		if (net->RTO == 0) {
2054 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2055 		} else {
2056 			to_ticks = MSEC_TO_TICKS(net->RTO);
2057 		}
2058 		tmr = &net->rxt_timer;
2059 		break;
2060 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2061 		/*
2062 		 * nothing needed but the endpoint here ususually about 60
2063 		 * minutes.
2064 		 */
2065 		if (inp == NULL) {
2066 			return;
2067 		}
2068 		tmr = &inp->sctp_ep.signature_change;
2069 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2070 		break;
2071 	case SCTP_TIMER_TYPE_ASOCKILL:
2072 		if (stcb == NULL) {
2073 			return;
2074 		}
2075 		tmr = &stcb->asoc.strreset_timer;
2076 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2077 		break;
2078 	case SCTP_TIMER_TYPE_INPKILL:
2079 		/*
2080 		 * The inp is setup to die. We re-use the signature_chage
2081 		 * timer since that has stopped and we are in the GONE
2082 		 * state.
2083 		 */
2084 		if (inp == NULL) {
2085 			return;
2086 		}
2087 		tmr = &inp->sctp_ep.signature_change;
2088 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2089 		break;
2090 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2091 		/*
2092 		 * Here we use the value found in the EP for PMTU ususually
2093 		 * about 10 minutes.
2094 		 */
2095 		if ((stcb == NULL) || (inp == NULL)) {
2096 			return;
2097 		}
2098 		if (net == NULL) {
2099 			return;
2100 		}
2101 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2102 		tmr = &net->pmtu_timer;
2103 		break;
2104 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2105 		/* Here we use the RTO of the destination */
2106 		if ((stcb == NULL) || (net == NULL)) {
2107 			return;
2108 		}
2109 		if (net->RTO == 0) {
2110 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2111 		} else {
2112 			to_ticks = MSEC_TO_TICKS(net->RTO);
2113 		}
2114 		tmr = &net->rxt_timer;
2115 		break;
2116 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2117 		/*
2118 		 * Here we use the endpoints shutdown guard timer usually
2119 		 * about 3 minutes.
2120 		 */
2121 		if ((inp == NULL) || (stcb == NULL)) {
2122 			return;
2123 		}
2124 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2125 		tmr = &stcb->asoc.shut_guard_timer;
2126 		break;
2127 	case SCTP_TIMER_TYPE_STRRESET:
2128 		/*
2129 		 * Here the timer comes from the stcb but its value is from
2130 		 * the net's RTO.
2131 		 */
2132 		if ((stcb == NULL) || (net == NULL)) {
2133 			return;
2134 		}
2135 		if (net->RTO == 0) {
2136 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2137 		} else {
2138 			to_ticks = MSEC_TO_TICKS(net->RTO);
2139 		}
2140 		tmr = &stcb->asoc.strreset_timer;
2141 		break;
2142 
2143 	case SCTP_TIMER_TYPE_EARLYFR:
2144 		{
2145 			unsigned int msec;
2146 
2147 			if ((stcb == NULL) || (net == NULL)) {
2148 				return;
2149 			}
2150 			if (net->flight_size > net->cwnd) {
2151 				/* no need to start */
2152 				return;
2153 			}
2154 			SCTP_STAT_INCR(sctps_earlyfrstart);
2155 			if (net->lastsa == 0) {
2156 				/* Hmm no rtt estimate yet? */
2157 				msec = stcb->asoc.initial_rto >> 2;
2158 			} else {
2159 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2160 			}
2161 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2162 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2163 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2164 					msec = SCTP_MINFR_MSEC_FLOOR;
2165 				}
2166 			}
2167 			to_ticks = MSEC_TO_TICKS(msec);
2168 			tmr = &net->fr_timer;
2169 		}
2170 		break;
2171 	case SCTP_TIMER_TYPE_ASCONF:
2172 		/*
2173 		 * Here the timer comes from the stcb but its value is from
2174 		 * the net's RTO.
2175 		 */
2176 		if ((stcb == NULL) || (net == NULL)) {
2177 			return;
2178 		}
2179 		if (net->RTO == 0) {
2180 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2181 		} else {
2182 			to_ticks = MSEC_TO_TICKS(net->RTO);
2183 		}
2184 		tmr = &stcb->asoc.asconf_timer;
2185 		break;
2186 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2187 		if ((stcb == NULL) || (net != NULL)) {
2188 			return;
2189 		}
2190 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2191 		tmr = &stcb->asoc.delete_prim_timer;
2192 		break;
2193 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2194 		if (stcb == NULL) {
2195 			return;
2196 		}
2197 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2198 			/*
2199 			 * Really an error since stcb is NOT set to
2200 			 * autoclose
2201 			 */
2202 			return;
2203 		}
2204 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2205 		tmr = &stcb->asoc.autoclose_timer;
2206 		break;
2207 	default:
2208 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2209 		    __FUNCTION__, t_type);
2210 		return;
2211 		break;
2212 	};
2213 	if ((to_ticks <= 0) || (tmr == NULL)) {
2214 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2215 		    __FUNCTION__, t_type, to_ticks, tmr);
2216 		return;
2217 	}
2218 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2219 		/*
2220 		 * we do NOT allow you to have it already running. if it is
2221 		 * we leave the current one up unchanged
2222 		 */
2223 		return;
2224 	}
2225 	/* At this point we can proceed */
2226 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2227 		stcb->asoc.num_send_timers_up++;
2228 	}
2229 	tmr->stopped_from = 0;
2230 	tmr->type = t_type;
2231 	tmr->ep = (void *)inp;
2232 	tmr->tcb = (void *)stcb;
2233 	tmr->net = (void *)net;
2234 	tmr->self = (void *)tmr;
2235 	tmr->ticks = sctp_get_tick_count();
2236 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2237 	return;
2238 }
2239 
2240 void
2241 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2242     struct sctp_nets *net, uint32_t from)
2243 {
2244 	struct sctp_timer *tmr;
2245 
2246 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2247 	    (inp == NULL))
2248 		return;
2249 
2250 	tmr = NULL;
2251 	if (stcb) {
2252 		SCTP_TCB_LOCK_ASSERT(stcb);
2253 	}
2254 	switch (t_type) {
2255 	case SCTP_TIMER_TYPE_ZERO_COPY:
2256 		tmr = &inp->sctp_ep.zero_copy_timer;
2257 		break;
2258 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2259 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2260 		break;
2261 	case SCTP_TIMER_TYPE_ADDR_WQ:
2262 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2263 		break;
2264 	case SCTP_TIMER_TYPE_EARLYFR:
2265 		if ((stcb == NULL) || (net == NULL)) {
2266 			return;
2267 		}
2268 		tmr = &net->fr_timer;
2269 		SCTP_STAT_INCR(sctps_earlyfrstop);
2270 		break;
2271 	case SCTP_TIMER_TYPE_ITERATOR:
2272 		{
2273 			struct sctp_iterator *it;
2274 
2275 			it = (struct sctp_iterator *)inp;
2276 			tmr = &it->tmr;
2277 		}
2278 		break;
2279 	case SCTP_TIMER_TYPE_SEND:
2280 		if ((stcb == NULL) || (net == NULL)) {
2281 			return;
2282 		}
2283 		tmr = &net->rxt_timer;
2284 		break;
2285 	case SCTP_TIMER_TYPE_INIT:
2286 		if ((stcb == NULL) || (net == NULL)) {
2287 			return;
2288 		}
2289 		tmr = &net->rxt_timer;
2290 		break;
2291 	case SCTP_TIMER_TYPE_RECV:
2292 		if (stcb == NULL) {
2293 			return;
2294 		}
2295 		tmr = &stcb->asoc.dack_timer;
2296 		break;
2297 	case SCTP_TIMER_TYPE_SHUTDOWN:
2298 		if ((stcb == NULL) || (net == NULL)) {
2299 			return;
2300 		}
2301 		tmr = &net->rxt_timer;
2302 		break;
2303 	case SCTP_TIMER_TYPE_HEARTBEAT:
2304 		if (stcb == NULL) {
2305 			return;
2306 		}
2307 		tmr = &stcb->asoc.hb_timer;
2308 		break;
2309 	case SCTP_TIMER_TYPE_COOKIE:
2310 		if ((stcb == NULL) || (net == NULL)) {
2311 			return;
2312 		}
2313 		tmr = &net->rxt_timer;
2314 		break;
2315 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2316 		/* nothing needed but the endpoint here */
2317 		tmr = &inp->sctp_ep.signature_change;
2318 		/*
2319 		 * We re-use the newcookie timer for the INP kill timer. We
2320 		 * must assure that we do not kill it by accident.
2321 		 */
2322 		break;
2323 	case SCTP_TIMER_TYPE_ASOCKILL:
2324 		/*
2325 		 * Stop the asoc kill timer.
2326 		 */
2327 		if (stcb == NULL) {
2328 			return;
2329 		}
2330 		tmr = &stcb->asoc.strreset_timer;
2331 		break;
2332 
2333 	case SCTP_TIMER_TYPE_INPKILL:
2334 		/*
2335 		 * The inp is setup to die. We re-use the signature_chage
2336 		 * timer since that has stopped and we are in the GONE
2337 		 * state.
2338 		 */
2339 		tmr = &inp->sctp_ep.signature_change;
2340 		break;
2341 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2342 		if ((stcb == NULL) || (net == NULL)) {
2343 			return;
2344 		}
2345 		tmr = &net->pmtu_timer;
2346 		break;
2347 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2348 		if ((stcb == NULL) || (net == NULL)) {
2349 			return;
2350 		}
2351 		tmr = &net->rxt_timer;
2352 		break;
2353 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2354 		if (stcb == NULL) {
2355 			return;
2356 		}
2357 		tmr = &stcb->asoc.shut_guard_timer;
2358 		break;
2359 	case SCTP_TIMER_TYPE_STRRESET:
2360 		if (stcb == NULL) {
2361 			return;
2362 		}
2363 		tmr = &stcb->asoc.strreset_timer;
2364 		break;
2365 	case SCTP_TIMER_TYPE_ASCONF:
2366 		if (stcb == NULL) {
2367 			return;
2368 		}
2369 		tmr = &stcb->asoc.asconf_timer;
2370 		break;
2371 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2372 		if (stcb == NULL) {
2373 			return;
2374 		}
2375 		tmr = &stcb->asoc.delete_prim_timer;
2376 		break;
2377 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2378 		if (stcb == NULL) {
2379 			return;
2380 		}
2381 		tmr = &stcb->asoc.autoclose_timer;
2382 		break;
2383 	default:
2384 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2385 		    __FUNCTION__, t_type);
2386 		break;
2387 	};
2388 	if (tmr == NULL) {
2389 		return;
2390 	}
2391 	if ((tmr->type != t_type) && tmr->type) {
2392 		/*
2393 		 * Ok we have a timer that is under joint use. Cookie timer
2394 		 * per chance with the SEND timer. We therefore are NOT
2395 		 * running the timer that the caller wants stopped.  So just
2396 		 * return.
2397 		 */
2398 		return;
2399 	}
2400 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2401 		stcb->asoc.num_send_timers_up--;
2402 		if (stcb->asoc.num_send_timers_up < 0) {
2403 			stcb->asoc.num_send_timers_up = 0;
2404 		}
2405 	}
2406 	tmr->self = NULL;
2407 	tmr->stopped_from = from;
2408 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2409 	return;
2410 }
2411 
2412 #ifdef SCTP_USE_ADLER32
2413 static uint32_t
2414 update_adler32(uint32_t adler, uint8_t * buf, int32_t len)
2415 {
2416 	uint32_t s1 = adler & 0xffff;
2417 	uint32_t s2 = (adler >> 16) & 0xffff;
2418 	int n;
2419 
2420 	for (n = 0; n < len; n++, buf++) {
2421 		/* s1 = (s1 + buf[n]) % BASE */
2422 		/* first we add */
2423 		s1 = (s1 + *buf);
2424 		/*
2425 		 * now if we need to, we do a mod by subtracting. It seems a
2426 		 * bit faster since I really will only ever do one subtract
2427 		 * at the MOST, since buf[n] is a max of 255.
2428 		 */
2429 		if (s1 >= SCTP_ADLER32_BASE) {
2430 			s1 -= SCTP_ADLER32_BASE;
2431 		}
2432 		/* s2 = (s2 + s1) % BASE */
2433 		/* first we add */
2434 		s2 = (s2 + s1);
2435 		/*
2436 		 * again, it is more efficent (it seems) to subtract since
2437 		 * the most s2 will ever be is (BASE-1 + BASE-1) in the
2438 		 * worse case. This would then be (2 * BASE) - 2, which will
2439 		 * still only do one subtract. On Intel this is much better
2440 		 * to do this way and avoid the divide. Have not -pg'd on
2441 		 * sparc.
2442 		 */
2443 		if (s2 >= SCTP_ADLER32_BASE) {
2444 			s2 -= SCTP_ADLER32_BASE;
2445 		}
2446 	}
2447 	/* Return the adler32 of the bytes buf[0..len-1] */
2448 	return ((s2 << 16) + s1);
2449 }
2450 
2451 #endif
2452 
2453 
2454 uint32_t
2455 sctp_calculate_len(struct mbuf *m)
2456 {
2457 	uint32_t tlen = 0;
2458 	struct mbuf *at;
2459 
2460 	at = m;
2461 	while (at) {
2462 		tlen += SCTP_BUF_LEN(at);
2463 		at = SCTP_BUF_NEXT(at);
2464 	}
2465 	return (tlen);
2466 }
2467 
2468 #if defined(SCTP_WITH_NO_CSUM)
2469 
2470 uint32_t
2471 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2472 {
2473 	/*
2474 	 * given a mbuf chain with a packetheader offset by 'offset'
2475 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2476 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2477 	 * has a side bonus as it will calculate the total length of the
2478 	 * mbuf chain. Note: if offset is greater than the total mbuf
2479 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2480 	 */
2481 	if (pktlen == NULL)
2482 		return (0);
2483 	*pktlen = sctp_calculate_len(m);
2484 	return (0);
2485 }
2486 
2487 #elif defined(SCTP_USE_INCHKSUM)
2488 
2489 #include <machine/in_cksum.h>
2490 
2491 uint32_t
2492 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2493 {
2494 	/*
2495 	 * given a mbuf chain with a packetheader offset by 'offset'
2496 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2497 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2498 	 * has a side bonus as it will calculate the total length of the
2499 	 * mbuf chain. Note: if offset is greater than the total mbuf
2500 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2501 	 */
2502 	int32_t tlen = 0;
2503 	struct mbuf *at;
2504 	uint32_t the_sum, retsum;
2505 
2506 	at = m;
2507 	while (at) {
2508 		tlen += SCTP_BUF_LEN(at);
2509 		at = SCTP_BUF_NEXT(at);
2510 	}
2511 	the_sum = (uint32_t) (in_cksum_skip(m, tlen, offset));
2512 	if (pktlen != NULL)
2513 		*pktlen = (tlen - offset);
2514 	retsum = htons(the_sum);
2515 	return (the_sum);
2516 }
2517 
2518 #else
2519 
2520 uint32_t
2521 sctp_calculate_sum(struct mbuf *m, int32_t * pktlen, uint32_t offset)
2522 {
2523 	/*
2524 	 * given a mbuf chain with a packetheader offset by 'offset'
2525 	 * pointing at a sctphdr (with csum set to 0) go through the chain
2526 	 * of SCTP_BUF_NEXT()'s and calculate the SCTP checksum. This also
2527 	 * has a side bonus as it will calculate the total length of the
2528 	 * mbuf chain. Note: if offset is greater than the total mbuf
2529 	 * length, checksum=1, pktlen=0 is returned (ie. no real error code)
2530 	 */
2531 	int32_t tlen = 0;
2532 
2533 #ifdef SCTP_USE_ADLER32
2534 	uint32_t base = 1L;
2535 
2536 #else
2537 	uint32_t base = 0xffffffff;
2538 
2539 #endif
2540 	struct mbuf *at;
2541 
2542 	at = m;
2543 	/* find the correct mbuf and offset into mbuf */
2544 	while ((at != NULL) && (offset > (uint32_t) SCTP_BUF_LEN(at))) {
2545 		offset -= SCTP_BUF_LEN(at);	/* update remaining offset
2546 						 * left */
2547 		at = SCTP_BUF_NEXT(at);
2548 	}
2549 	while (at != NULL) {
2550 		if ((SCTP_BUF_LEN(at) - offset) > 0) {
2551 #ifdef SCTP_USE_ADLER32
2552 			base = update_adler32(base,
2553 			    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2554 			    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2555 #else
2556 			if ((SCTP_BUF_LEN(at) - offset) < 4) {
2557 				/* Use old method if less than 4 bytes */
2558 				base = old_update_crc32(base,
2559 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2560 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2561 			} else {
2562 				base = update_crc32(base,
2563 				    (unsigned char *)(SCTP_BUF_AT(at, offset)),
2564 				    (unsigned int)(SCTP_BUF_LEN(at) - offset));
2565 			}
2566 #endif
2567 			tlen += SCTP_BUF_LEN(at) - offset;
2568 			/* we only offset once into the first mbuf */
2569 		}
2570 		if (offset) {
2571 			if (offset < (uint32_t) SCTP_BUF_LEN(at))
2572 				offset = 0;
2573 			else
2574 				offset -= SCTP_BUF_LEN(at);
2575 		}
2576 		at = SCTP_BUF_NEXT(at);
2577 	}
2578 	if (pktlen != NULL) {
2579 		*pktlen = tlen;
2580 	}
2581 #ifdef SCTP_USE_ADLER32
2582 	/* Adler32 */
2583 	base = htonl(base);
2584 #else
2585 	/* CRC-32c */
2586 	base = sctp_csum_finalize(base);
2587 #endif
2588 	return (base);
2589 }
2590 
2591 
2592 #endif
2593 
2594 void
2595 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2596     struct sctp_association *asoc, uint32_t mtu)
2597 {
2598 	/*
2599 	 * Reset the P-MTU size on this association, this involves changing
2600 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2601 	 * allow the DF flag to be cleared.
2602 	 */
2603 	struct sctp_tmit_chunk *chk;
2604 	unsigned int eff_mtu, ovh;
2605 
2606 #ifdef SCTP_PRINT_FOR_B_AND_M
2607 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2608 	    inp, asoc, mtu);
2609 #endif
2610 	asoc->smallest_mtu = mtu;
2611 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2612 		ovh = SCTP_MIN_OVERHEAD;
2613 	} else {
2614 		ovh = SCTP_MIN_V4_OVERHEAD;
2615 	}
2616 	eff_mtu = mtu - ovh;
2617 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2618 
2619 		if (chk->send_size > eff_mtu) {
2620 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2621 		}
2622 	}
2623 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2624 		if (chk->send_size > eff_mtu) {
2625 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2626 		}
2627 	}
2628 }
2629 
2630 
2631 /*
2632  * given an association and starting time of the current RTT period return
2633  * RTO in number of msecs net should point to the current network
2634  */
2635 uint32_t
2636 sctp_calculate_rto(struct sctp_tcb *stcb,
2637     struct sctp_association *asoc,
2638     struct sctp_nets *net,
2639     struct timeval *told,
2640     int safe)
2641 {
2642 	/*-
2643 	 * given an association and the starting time of the current RTT
2644 	 * period (in value1/value2) return RTO in number of msecs.
2645 	 */
2646 	int calc_time = 0;
2647 	int o_calctime;
2648 	uint32_t new_rto = 0;
2649 	int first_measure = 0;
2650 	struct timeval now, then, *old;
2651 
2652 	/* Copy it out for sparc64 */
2653 	if (safe == sctp_align_unsafe_makecopy) {
2654 		old = &then;
2655 		memcpy(&then, told, sizeof(struct timeval));
2656 	} else if (safe == sctp_align_safe_nocopy) {
2657 		old = told;
2658 	} else {
2659 		/* error */
2660 		SCTP_PRINTF("Huh, bad rto calc call\n");
2661 		return (0);
2662 	}
2663 	/************************/
2664 	/* 1. calculate new RTT */
2665 	/************************/
2666 	/* get the current time */
2667 	(void)SCTP_GETTIME_TIMEVAL(&now);
2668 	/* compute the RTT value */
2669 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2670 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2671 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2672 			calc_time += (((u_long)now.tv_usec -
2673 			    (u_long)old->tv_usec) / 1000);
2674 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2675 			/* Borrow 1,000ms from current calculation */
2676 			calc_time -= 1000;
2677 			/* Add in the slop over */
2678 			calc_time += ((int)now.tv_usec / 1000);
2679 			/* Add in the pre-second ms's */
2680 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2681 		}
2682 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2683 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2684 			calc_time = ((u_long)now.tv_usec -
2685 			    (u_long)old->tv_usec) / 1000;
2686 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2687 			/* impossible .. garbage in nothing out */
2688 			goto calc_rto;
2689 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2690 			/*
2691 			 * We have to have 1 usec :-D this must be the
2692 			 * loopback.
2693 			 */
2694 			calc_time = 1;
2695 		} else {
2696 			/* impossible .. garbage in nothing out */
2697 			goto calc_rto;
2698 		}
2699 	} else {
2700 		/* Clock wrapped? */
2701 		goto calc_rto;
2702 	}
2703 	/***************************/
2704 	/* 2. update RTTVAR & SRTT */
2705 	/***************************/
2706 	o_calctime = calc_time;
2707 	/* this is Van Jacobson's integer version */
2708 	if (net->RTO_measured) {
2709 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2710 								 * shift=3 */
2711 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2712 			rto_logging(net, SCTP_LOG_RTTVAR);
2713 		}
2714 		net->prev_rtt = o_calctime;
2715 		net->lastsa += calc_time;	/* add 7/8th into sa when
2716 						 * shift=3 */
2717 		if (calc_time < 0) {
2718 			calc_time = -calc_time;
2719 		}
2720 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2721 									 * VAR shift=2 */
2722 		net->lastsv += calc_time;
2723 		if (net->lastsv == 0) {
2724 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2725 		}
2726 	} else {
2727 		/* First RTO measurment */
2728 		net->RTO_measured = 1;
2729 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2730 								 * shift=3 */
2731 		net->lastsv = calc_time;
2732 		if (net->lastsv == 0) {
2733 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2734 		}
2735 		first_measure = 1;
2736 		net->prev_rtt = o_calctime;
2737 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2738 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2739 		}
2740 	}
2741 calc_rto:
2742 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2743 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2744 	    (stcb->asoc.sat_network_lockout == 0)) {
2745 		stcb->asoc.sat_network = 1;
2746 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2747 		stcb->asoc.sat_network = 0;
2748 		stcb->asoc.sat_network_lockout = 1;
2749 	}
2750 	/* bound it, per C6/C7 in Section 5.3.1 */
2751 	if (new_rto < stcb->asoc.minrto) {
2752 		new_rto = stcb->asoc.minrto;
2753 	}
2754 	if (new_rto > stcb->asoc.maxrto) {
2755 		new_rto = stcb->asoc.maxrto;
2756 	}
2757 	/* we are now returning the RTO */
2758 	return (new_rto);
2759 }
2760 
2761 /*
2762  * return a pointer to a contiguous piece of data from the given mbuf chain
2763  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2764  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2765  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2766  */
2767 caddr_t
2768 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2769 {
2770 	uint32_t count;
2771 	uint8_t *ptr;
2772 
2773 	ptr = in_ptr;
2774 	if ((off < 0) || (len <= 0))
2775 		return (NULL);
2776 
2777 	/* find the desired start location */
2778 	while ((m != NULL) && (off > 0)) {
2779 		if (off < SCTP_BUF_LEN(m))
2780 			break;
2781 		off -= SCTP_BUF_LEN(m);
2782 		m = SCTP_BUF_NEXT(m);
2783 	}
2784 	if (m == NULL)
2785 		return (NULL);
2786 
2787 	/* is the current mbuf large enough (eg. contiguous)? */
2788 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2789 		return (mtod(m, caddr_t)+off);
2790 	} else {
2791 		/* else, it spans more than one mbuf, so save a temp copy... */
2792 		while ((m != NULL) && (len > 0)) {
2793 			count = min(SCTP_BUF_LEN(m) - off, len);
2794 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2795 			len -= count;
2796 			ptr += count;
2797 			off = 0;
2798 			m = SCTP_BUF_NEXT(m);
2799 		}
2800 		if ((m == NULL) && (len > 0))
2801 			return (NULL);
2802 		else
2803 			return ((caddr_t)in_ptr);
2804 	}
2805 }
2806 
2807 
2808 
2809 struct sctp_paramhdr *
2810 sctp_get_next_param(struct mbuf *m,
2811     int offset,
2812     struct sctp_paramhdr *pull,
2813     int pull_limit)
2814 {
2815 	/* This just provides a typed signature to Peter's Pull routine */
2816 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2817 	    (uint8_t *) pull));
2818 }
2819 
2820 
2821 int
2822 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2823 {
2824 	/*
2825 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2826 	 * padlen is > 3 this routine will fail.
2827 	 */
2828 	uint8_t *dp;
2829 	int i;
2830 
2831 	if (padlen > 3) {
2832 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2833 		return (ENOBUFS);
2834 	}
2835 	if (padlen <= M_TRAILINGSPACE(m)) {
2836 		/*
2837 		 * The easy way. We hope the majority of the time we hit
2838 		 * here :)
2839 		 */
2840 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2841 		SCTP_BUF_LEN(m) += padlen;
2842 	} else {
2843 		/* Hard way we must grow the mbuf */
2844 		struct mbuf *tmp;
2845 
2846 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2847 		if (tmp == NULL) {
2848 			/* Out of space GAK! we are in big trouble. */
2849 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2850 			return (ENOSPC);
2851 		}
2852 		/* setup and insert in middle */
2853 		SCTP_BUF_LEN(tmp) = padlen;
2854 		SCTP_BUF_NEXT(tmp) = NULL;
2855 		SCTP_BUF_NEXT(m) = tmp;
2856 		dp = mtod(tmp, uint8_t *);
2857 	}
2858 	/* zero out the pad */
2859 	for (i = 0; i < padlen; i++) {
2860 		*dp = 0;
2861 		dp++;
2862 	}
2863 	return (0);
2864 }
2865 
2866 int
2867 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2868 {
2869 	/* find the last mbuf in chain and pad it */
2870 	struct mbuf *m_at;
2871 
2872 	m_at = m;
2873 	if (last_mbuf) {
2874 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2875 	} else {
2876 		while (m_at) {
2877 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2878 				return (sctp_add_pad_tombuf(m_at, padval));
2879 			}
2880 			m_at = SCTP_BUF_NEXT(m_at);
2881 		}
2882 	}
2883 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2884 	return (EFAULT);
2885 }
2886 
2887 int sctp_asoc_change_wake = 0;
2888 
2889 static void
2890 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2891     uint32_t error, void *data, int so_locked
2892 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2893     SCTP_UNUSED
2894 #endif
2895 )
2896 {
2897 	struct mbuf *m_notify;
2898 	struct sctp_assoc_change *sac;
2899 	struct sctp_queued_to_read *control;
2900 
2901 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2902 	struct socket *so;
2903 
2904 #endif
2905 
2906 	/*
2907 	 * First if we are are going down dump everything we can to the
2908 	 * socket rcv queue.
2909 	 */
2910 
2911 	if ((stcb == NULL) ||
2912 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
2913 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
2914 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
2915 	    ) {
2916 		/* If the socket is gone we are out of here */
2917 		return;
2918 	}
2919 	/*
2920 	 * For TCP model AND UDP connected sockets we will send an error up
2921 	 * when an ABORT comes in.
2922 	 */
2923 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2924 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2925 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2926 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2927 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2928 			stcb->sctp_socket->so_error = ECONNREFUSED;
2929 		} else {
2930 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2931 			stcb->sctp_socket->so_error = ECONNRESET;
2932 		}
2933 		/* Wake ANY sleepers */
2934 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2935 		so = SCTP_INP_SO(stcb->sctp_ep);
2936 		if (!so_locked) {
2937 			atomic_add_int(&stcb->asoc.refcnt, 1);
2938 			SCTP_TCB_UNLOCK(stcb);
2939 			SCTP_SOCKET_LOCK(so, 1);
2940 			SCTP_TCB_LOCK(stcb);
2941 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2942 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2943 				SCTP_SOCKET_UNLOCK(so, 1);
2944 				return;
2945 			}
2946 		}
2947 #endif
2948 		sorwakeup(stcb->sctp_socket);
2949 		sowwakeup(stcb->sctp_socket);
2950 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2951 		if (!so_locked) {
2952 			SCTP_SOCKET_UNLOCK(so, 1);
2953 		}
2954 #endif
2955 		sctp_asoc_change_wake++;
2956 	}
2957 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2958 		/* event not enabled */
2959 		return;
2960 	}
2961 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2962 	if (m_notify == NULL)
2963 		/* no space left */
2964 		return;
2965 	SCTP_BUF_LEN(m_notify) = 0;
2966 
2967 	sac = mtod(m_notify, struct sctp_assoc_change *);
2968 	sac->sac_type = SCTP_ASSOC_CHANGE;
2969 	sac->sac_flags = 0;
2970 	sac->sac_length = sizeof(struct sctp_assoc_change);
2971 	sac->sac_state = event;
2972 	sac->sac_error = error;
2973 	/* XXX verify these stream counts */
2974 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2975 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2976 	sac->sac_assoc_id = sctp_get_associd(stcb);
2977 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2978 	SCTP_BUF_NEXT(m_notify) = NULL;
2979 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2980 	    0, 0, 0, 0, 0, 0,
2981 	    m_notify);
2982 	if (control == NULL) {
2983 		/* no memory */
2984 		sctp_m_freem(m_notify);
2985 		return;
2986 	}
2987 	control->length = SCTP_BUF_LEN(m_notify);
2988 	/* not that we need this */
2989 	control->tail_mbuf = m_notify;
2990 	control->spec_flags = M_NOTIFICATION;
2991 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2992 	    control,
2993 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
2994 	if (event == SCTP_COMM_LOST) {
2995 		/* Wake up any sleeper */
2996 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2997 		so = SCTP_INP_SO(stcb->sctp_ep);
2998 		if (!so_locked) {
2999 			atomic_add_int(&stcb->asoc.refcnt, 1);
3000 			SCTP_TCB_UNLOCK(stcb);
3001 			SCTP_SOCKET_LOCK(so, 1);
3002 			SCTP_TCB_LOCK(stcb);
3003 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3004 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3005 				SCTP_SOCKET_UNLOCK(so, 1);
3006 				return;
3007 			}
3008 		}
3009 #endif
3010 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
3011 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3012 		if (!so_locked) {
3013 			SCTP_SOCKET_UNLOCK(so, 1);
3014 		}
3015 #endif
3016 	}
3017 }
3018 
3019 static void
3020 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
3021     struct sockaddr *sa, uint32_t error)
3022 {
3023 	struct mbuf *m_notify;
3024 	struct sctp_paddr_change *spc;
3025 	struct sctp_queued_to_read *control;
3026 
3027 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)))
3028 		/* event not enabled */
3029 		return;
3030 
3031 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
3032 	if (m_notify == NULL)
3033 		return;
3034 	SCTP_BUF_LEN(m_notify) = 0;
3035 	spc = mtod(m_notify, struct sctp_paddr_change *);
3036 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
3037 	spc->spc_flags = 0;
3038 	spc->spc_length = sizeof(struct sctp_paddr_change);
3039 	switch (sa->sa_family) {
3040 	case AF_INET:
3041 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
3042 		break;
3043 #ifdef INET6
3044 	case AF_INET6:
3045 		{
3046 			struct sockaddr_in6 *sin6;
3047 
3048 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
3049 
3050 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
3051 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
3052 				if (sin6->sin6_scope_id == 0) {
3053 					/* recover scope_id for user */
3054 					(void)sa6_recoverscope(sin6);
3055 				} else {
3056 					/* clear embedded scope_id for user */
3057 					in6_clearscope(&sin6->sin6_addr);
3058 				}
3059 			}
3060 			break;
3061 		}
3062 #endif
3063 	default:
3064 		/* TSNH */
3065 		break;
3066 	}
3067 	spc->spc_state = state;
3068 	spc->spc_error = error;
3069 	spc->spc_assoc_id = sctp_get_associd(stcb);
3070 
3071 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
3072 	SCTP_BUF_NEXT(m_notify) = NULL;
3073 
3074 	/* append to socket */
3075 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3076 	    0, 0, 0, 0, 0, 0,
3077 	    m_notify);
3078 	if (control == NULL) {
3079 		/* no memory */
3080 		sctp_m_freem(m_notify);
3081 		return;
3082 	}
3083 	control->length = SCTP_BUF_LEN(m_notify);
3084 	control->spec_flags = M_NOTIFICATION;
3085 	/* not that we need this */
3086 	control->tail_mbuf = m_notify;
3087 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3088 	    control,
3089 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3090 }
3091 
3092 
3093 static void
3094 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
3095     struct sctp_tmit_chunk *chk, int so_locked
3096 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3097     SCTP_UNUSED
3098 #endif
3099 )
3100 {
3101 	struct mbuf *m_notify;
3102 	struct sctp_send_failed *ssf;
3103 	struct sctp_queued_to_read *control;
3104 	int length;
3105 
3106 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3107 		/* event not enabled */
3108 		return;
3109 
3110 	length = sizeof(struct sctp_send_failed) + chk->send_size;
3111 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3112 	if (m_notify == NULL)
3113 		/* no space left */
3114 		return;
3115 	SCTP_BUF_LEN(m_notify) = 0;
3116 	ssf = mtod(m_notify, struct sctp_send_failed *);
3117 	ssf->ssf_type = SCTP_SEND_FAILED;
3118 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3119 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3120 	else
3121 		ssf->ssf_flags = SCTP_DATA_SENT;
3122 	ssf->ssf_length = length;
3123 	ssf->ssf_error = error;
3124 	/* not exactly what the user sent in, but should be close :) */
3125 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3126 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3127 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3128 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3129 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3130 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3131 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3132 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3133 	SCTP_BUF_NEXT(m_notify) = chk->data;
3134 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3135 
3136 	/* Steal off the mbuf */
3137 	chk->data = NULL;
3138 	/*
3139 	 * For this case, we check the actual socket buffer, since the assoc
3140 	 * is going away we don't want to overfill the socket buffer for a
3141 	 * non-reader
3142 	 */
3143 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3144 		sctp_m_freem(m_notify);
3145 		return;
3146 	}
3147 	/* append to socket */
3148 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3149 	    0, 0, 0, 0, 0, 0,
3150 	    m_notify);
3151 	if (control == NULL) {
3152 		/* no memory */
3153 		sctp_m_freem(m_notify);
3154 		return;
3155 	}
3156 	control->spec_flags = M_NOTIFICATION;
3157 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3158 	    control,
3159 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3160 }
3161 
3162 
3163 static void
3164 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3165     struct sctp_stream_queue_pending *sp, int so_locked
3166 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3167     SCTP_UNUSED
3168 #endif
3169 )
3170 {
3171 	struct mbuf *m_notify;
3172 	struct sctp_send_failed *ssf;
3173 	struct sctp_queued_to_read *control;
3174 	int length;
3175 
3176 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)))
3177 		/* event not enabled */
3178 		return;
3179 
3180 	length = sizeof(struct sctp_send_failed) + sp->length;
3181 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3182 	if (m_notify == NULL)
3183 		/* no space left */
3184 		return;
3185 	SCTP_BUF_LEN(m_notify) = 0;
3186 	ssf = mtod(m_notify, struct sctp_send_failed *);
3187 	ssf->ssf_type = SCTP_SEND_FAILED;
3188 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3189 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3190 	else
3191 		ssf->ssf_flags = SCTP_DATA_SENT;
3192 	ssf->ssf_length = length;
3193 	ssf->ssf_error = error;
3194 	/* not exactly what the user sent in, but should be close :) */
3195 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3196 	ssf->ssf_info.sinfo_stream = sp->stream;
3197 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3198 	ssf->ssf_info.sinfo_flags = sp->sinfo_flags;
3199 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3200 	ssf->ssf_info.sinfo_context = sp->context;
3201 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3202 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3203 	SCTP_BUF_NEXT(m_notify) = sp->data;
3204 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3205 
3206 	/* Steal off the mbuf */
3207 	sp->data = NULL;
3208 	/*
3209 	 * For this case, we check the actual socket buffer, since the assoc
3210 	 * is going away we don't want to overfill the socket buffer for a
3211 	 * non-reader
3212 	 */
3213 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3214 		sctp_m_freem(m_notify);
3215 		return;
3216 	}
3217 	/* append to socket */
3218 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3219 	    0, 0, 0, 0, 0, 0,
3220 	    m_notify);
3221 	if (control == NULL) {
3222 		/* no memory */
3223 		sctp_m_freem(m_notify);
3224 		return;
3225 	}
3226 	control->spec_flags = M_NOTIFICATION;
3227 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3228 	    control,
3229 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3230 }
3231 
3232 
3233 
3234 static void
3235 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3236     uint32_t error)
3237 {
3238 	struct mbuf *m_notify;
3239 	struct sctp_adaptation_event *sai;
3240 	struct sctp_queued_to_read *control;
3241 
3242 	if ((stcb == NULL) || (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)))
3243 		/* event not enabled */
3244 		return;
3245 
3246 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3247 	if (m_notify == NULL)
3248 		/* no space left */
3249 		return;
3250 	SCTP_BUF_LEN(m_notify) = 0;
3251 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3252 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3253 	sai->sai_flags = 0;
3254 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3255 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3256 	sai->sai_assoc_id = sctp_get_associd(stcb);
3257 
3258 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3259 	SCTP_BUF_NEXT(m_notify) = NULL;
3260 
3261 	/* append to socket */
3262 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3263 	    0, 0, 0, 0, 0, 0,
3264 	    m_notify);
3265 	if (control == NULL) {
3266 		/* no memory */
3267 		sctp_m_freem(m_notify);
3268 		return;
3269 	}
3270 	control->length = SCTP_BUF_LEN(m_notify);
3271 	control->spec_flags = M_NOTIFICATION;
3272 	/* not that we need this */
3273 	control->tail_mbuf = m_notify;
3274 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3275 	    control,
3276 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3277 }
3278 
3279 /* This always must be called with the read-queue LOCKED in the INP */
3280 void
3281 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3282     int nolock, uint32_t val)
3283 {
3284 	struct mbuf *m_notify;
3285 	struct sctp_pdapi_event *pdapi;
3286 	struct sctp_queued_to_read *control;
3287 	struct sockbuf *sb;
3288 
3289 	if ((stcb == NULL) || (stcb->sctp_socket == NULL) ||
3290 	    sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT))
3291 		/* event not enabled */
3292 		return;
3293 
3294 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3295 	if (m_notify == NULL)
3296 		/* no space left */
3297 		return;
3298 	SCTP_BUF_LEN(m_notify) = 0;
3299 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3300 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3301 	pdapi->pdapi_flags = 0;
3302 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3303 	pdapi->pdapi_indication = error;
3304 	pdapi->pdapi_stream = (val >> 16);
3305 	pdapi->pdapi_seq = (val & 0x0000ffff);
3306 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3307 
3308 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3309 	SCTP_BUF_NEXT(m_notify) = NULL;
3310 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3311 	    0, 0, 0, 0, 0, 0,
3312 	    m_notify);
3313 	if (control == NULL) {
3314 		/* no memory */
3315 		sctp_m_freem(m_notify);
3316 		return;
3317 	}
3318 	control->spec_flags = M_NOTIFICATION;
3319 	control->length = SCTP_BUF_LEN(m_notify);
3320 	/* not that we need this */
3321 	control->tail_mbuf = m_notify;
3322 	control->held_length = 0;
3323 	control->length = 0;
3324 	if (nolock == 0) {
3325 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
3326 	}
3327 	sb = &stcb->sctp_socket->so_rcv;
3328 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3329 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3330 	}
3331 	sctp_sballoc(stcb, sb, m_notify);
3332 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3333 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3334 	}
3335 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3336 	control->end_added = 1;
3337 	if (stcb->asoc.control_pdapi)
3338 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3339 	else {
3340 		/* we really should not see this case */
3341 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3342 	}
3343 	if (nolock == 0) {
3344 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3345 	}
3346 	if (stcb->sctp_ep && stcb->sctp_socket) {
3347 		/* This should always be the case */
3348 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3349 	}
3350 }
3351 
3352 static void
3353 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3354 {
3355 	struct mbuf *m_notify;
3356 	struct sctp_shutdown_event *sse;
3357 	struct sctp_queued_to_read *control;
3358 
3359 	/*
3360 	 * For TCP model AND UDP connected sockets we will send an error up
3361 	 * when an SHUTDOWN completes
3362 	 */
3363 	if (stcb == NULL) {
3364 		return;
3365 	}
3366 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3367 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3368 		/* mark socket closed for read/write and wakeup! */
3369 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3370 		struct socket *so;
3371 
3372 		so = SCTP_INP_SO(stcb->sctp_ep);
3373 		atomic_add_int(&stcb->asoc.refcnt, 1);
3374 		SCTP_TCB_UNLOCK(stcb);
3375 		SCTP_SOCKET_LOCK(so, 1);
3376 		SCTP_TCB_LOCK(stcb);
3377 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3378 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3379 			SCTP_SOCKET_UNLOCK(so, 1);
3380 			return;
3381 		}
3382 #endif
3383 		socantsendmore(stcb->sctp_socket);
3384 		socantrcvmore(stcb->sctp_socket);
3385 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3386 		SCTP_SOCKET_UNLOCK(so, 1);
3387 #endif
3388 	}
3389 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
3390 		/* event not enabled */
3391 		return;
3392 
3393 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3394 	if (m_notify == NULL)
3395 		/* no space left */
3396 		return;
3397 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3398 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3399 	sse->sse_flags = 0;
3400 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3401 	sse->sse_assoc_id = sctp_get_associd(stcb);
3402 
3403 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3404 	SCTP_BUF_NEXT(m_notify) = NULL;
3405 
3406 	/* append to socket */
3407 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3408 	    0, 0, 0, 0, 0, 0,
3409 	    m_notify);
3410 	if (control == NULL) {
3411 		/* no memory */
3412 		sctp_m_freem(m_notify);
3413 		return;
3414 	}
3415 	control->spec_flags = M_NOTIFICATION;
3416 	control->length = SCTP_BUF_LEN(m_notify);
3417 	/* not that we need this */
3418 	control->tail_mbuf = m_notify;
3419 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3420 	    control,
3421 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3422 }
3423 
3424 static void
3425 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3426     int number_entries, uint16_t * list, int flag)
3427 {
3428 	struct mbuf *m_notify;
3429 	struct sctp_queued_to_read *control;
3430 	struct sctp_stream_reset_event *strreset;
3431 	int len;
3432 
3433 	if (stcb == NULL) {
3434 		return;
3435 	}
3436 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
3437 		/* event not enabled */
3438 		return;
3439 
3440 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3441 	if (m_notify == NULL)
3442 		/* no space left */
3443 		return;
3444 	SCTP_BUF_LEN(m_notify) = 0;
3445 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3446 	if (len > M_TRAILINGSPACE(m_notify)) {
3447 		/* never enough room */
3448 		sctp_m_freem(m_notify);
3449 		return;
3450 	}
3451 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3452 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3453 	if (number_entries == 0) {
3454 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3455 	} else {
3456 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3457 	}
3458 	strreset->strreset_length = len;
3459 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3460 	if (number_entries) {
3461 		int i;
3462 
3463 		for (i = 0; i < number_entries; i++) {
3464 			strreset->strreset_list[i] = ntohs(list[i]);
3465 		}
3466 	}
3467 	SCTP_BUF_LEN(m_notify) = len;
3468 	SCTP_BUF_NEXT(m_notify) = NULL;
3469 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3470 		/* no space */
3471 		sctp_m_freem(m_notify);
3472 		return;
3473 	}
3474 	/* append to socket */
3475 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3476 	    0, 0, 0, 0, 0, 0,
3477 	    m_notify);
3478 	if (control == NULL) {
3479 		/* no memory */
3480 		sctp_m_freem(m_notify);
3481 		return;
3482 	}
3483 	control->spec_flags = M_NOTIFICATION;
3484 	control->length = SCTP_BUF_LEN(m_notify);
3485 	/* not that we need this */
3486 	control->tail_mbuf = m_notify;
3487 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3488 	    control,
3489 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3490 }
3491 
3492 
3493 void
3494 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3495     uint32_t error, void *data, int so_locked
3496 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3497     SCTP_UNUSED
3498 #endif
3499 )
3500 {
3501 	if (stcb == NULL) {
3502 		/* unlikely but */
3503 		return;
3504 	}
3505 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3506 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3507 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)
3508 	    ) {
3509 		/* No notifications up when we are in a no socket state */
3510 		return;
3511 	}
3512 	if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3513 		/* Can't send up to a closed socket any notifications */
3514 		return;
3515 	}
3516 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3517 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3518 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3519 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3520 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3521 			/* Don't report these in front states */
3522 			return;
3523 		}
3524 	}
3525 	switch (notification) {
3526 	case SCTP_NOTIFY_ASSOC_UP:
3527 		if (stcb->asoc.assoc_up_sent == 0) {
3528 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3529 			stcb->asoc.assoc_up_sent = 1;
3530 		}
3531 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3532 			sctp_notify_adaptation_layer(stcb, error);
3533 		}
3534 		break;
3535 	case SCTP_NOTIFY_ASSOC_DOWN:
3536 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3537 		break;
3538 	case SCTP_NOTIFY_INTERFACE_DOWN:
3539 		{
3540 			struct sctp_nets *net;
3541 
3542 			net = (struct sctp_nets *)data;
3543 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3544 			    (struct sockaddr *)&net->ro._l_addr, error);
3545 			break;
3546 		}
3547 	case SCTP_NOTIFY_INTERFACE_UP:
3548 		{
3549 			struct sctp_nets *net;
3550 
3551 			net = (struct sctp_nets *)data;
3552 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3553 			    (struct sockaddr *)&net->ro._l_addr, error);
3554 			break;
3555 		}
3556 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3557 		{
3558 			struct sctp_nets *net;
3559 
3560 			net = (struct sctp_nets *)data;
3561 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3562 			    (struct sockaddr *)&net->ro._l_addr, error);
3563 			break;
3564 		}
3565 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3566 		sctp_notify_send_failed2(stcb, error,
3567 		    (struct sctp_stream_queue_pending *)data, so_locked);
3568 		break;
3569 	case SCTP_NOTIFY_DG_FAIL:
3570 		sctp_notify_send_failed(stcb, error,
3571 		    (struct sctp_tmit_chunk *)data, so_locked);
3572 		break;
3573 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3574 		{
3575 			uint32_t val;
3576 
3577 			val = *((uint32_t *) data);
3578 
3579 			sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3580 		}
3581 		break;
3582 	case SCTP_NOTIFY_STRDATA_ERR:
3583 		break;
3584 	case SCTP_NOTIFY_ASSOC_ABORTED:
3585 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3586 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3587 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3588 		} else {
3589 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3590 		}
3591 		break;
3592 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3593 		break;
3594 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3595 		break;
3596 	case SCTP_NOTIFY_ASSOC_RESTART:
3597 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3598 		break;
3599 	case SCTP_NOTIFY_HB_RESP:
3600 		break;
3601 	case SCTP_NOTIFY_STR_RESET_SEND:
3602 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3603 		break;
3604 	case SCTP_NOTIFY_STR_RESET_RECV:
3605 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3606 		break;
3607 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3608 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3609 		break;
3610 
3611 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3612 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3613 		break;
3614 
3615 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3616 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3617 		    error);
3618 		break;
3619 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3620 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3621 		    error);
3622 		break;
3623 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3624 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3625 		    error);
3626 		break;
3627 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3628 		break;
3629 	case SCTP_NOTIFY_ASCONF_FAILED:
3630 		break;
3631 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3632 		sctp_notify_shutdown_event(stcb);
3633 		break;
3634 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3635 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3636 		    (uint16_t) (uintptr_t) data);
3637 		break;
3638 #if 0
3639 	case SCTP_NOTIFY_AUTH_KEY_CONFLICT:
3640 		sctp_notify_authentication(stcb, SCTP_AUTH_KEY_CONFLICT,
3641 		    error, (uint16_t) (uintptr_t) data);
3642 		break;
3643 #endif				/* not yet? remove? */
3644 
3645 
3646 	default:
3647 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3648 		    __FUNCTION__, notification, notification);
3649 		break;
3650 	}			/* end switch */
3651 }
3652 
3653 void
3654 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3655 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3656     SCTP_UNUSED
3657 #endif
3658 )
3659 {
3660 	struct sctp_association *asoc;
3661 	struct sctp_stream_out *outs;
3662 	struct sctp_tmit_chunk *chk;
3663 	struct sctp_stream_queue_pending *sp;
3664 	int i;
3665 
3666 	asoc = &stcb->asoc;
3667 
3668 	if (stcb == NULL) {
3669 		return;
3670 	}
3671 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3672 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3673 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3674 		return;
3675 	}
3676 	/* now through all the gunk freeing chunks */
3677 	if (holds_lock == 0) {
3678 		SCTP_TCB_SEND_LOCK(stcb);
3679 	}
3680 	/* sent queue SHOULD be empty */
3681 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3682 		chk = TAILQ_FIRST(&asoc->sent_queue);
3683 		while (chk) {
3684 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3685 			asoc->sent_queue_cnt--;
3686 			if (chk->data) {
3687 				/*
3688 				 * trim off the sctp chunk header(it should
3689 				 * be there)
3690 				 */
3691 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3692 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3693 					sctp_mbuf_crush(chk->data);
3694 					chk->send_size -= sizeof(struct sctp_data_chunk);
3695 				}
3696 			}
3697 			sctp_free_bufspace(stcb, asoc, chk, 1);
3698 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3699 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3700 			if (chk->data) {
3701 				sctp_m_freem(chk->data);
3702 				chk->data = NULL;
3703 			}
3704 			sctp_free_a_chunk(stcb, chk);
3705 			/* sa_ignore FREED_MEMORY */
3706 			chk = TAILQ_FIRST(&asoc->sent_queue);
3707 		}
3708 	}
3709 	/* pending send queue SHOULD be empty */
3710 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3711 		chk = TAILQ_FIRST(&asoc->send_queue);
3712 		while (chk) {
3713 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3714 			asoc->send_queue_cnt--;
3715 			if (chk->data) {
3716 				/*
3717 				 * trim off the sctp chunk header(it should
3718 				 * be there)
3719 				 */
3720 				if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3721 					m_adj(chk->data, sizeof(struct sctp_data_chunk));
3722 					sctp_mbuf_crush(chk->data);
3723 					chk->send_size -= sizeof(struct sctp_data_chunk);
3724 				}
3725 			}
3726 			sctp_free_bufspace(stcb, asoc, chk, 1);
3727 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3728 			if (chk->data) {
3729 				sctp_m_freem(chk->data);
3730 				chk->data = NULL;
3731 			}
3732 			sctp_free_a_chunk(stcb, chk);
3733 			/* sa_ignore FREED_MEMORY */
3734 			chk = TAILQ_FIRST(&asoc->send_queue);
3735 		}
3736 	}
3737 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3738 		/* For each stream */
3739 		outs = &stcb->asoc.strmout[i];
3740 		/* clean up any sends there */
3741 		stcb->asoc.locked_on_sending = NULL;
3742 		sp = TAILQ_FIRST(&outs->outqueue);
3743 		while (sp) {
3744 			stcb->asoc.stream_queue_cnt--;
3745 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3746 			sctp_free_spbufspace(stcb, asoc, sp);
3747 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3748 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3749 			if (sp->data) {
3750 				sctp_m_freem(sp->data);
3751 				sp->data = NULL;
3752 			}
3753 			if (sp->net)
3754 				sctp_free_remote_addr(sp->net);
3755 			sp->net = NULL;
3756 			/* Free the chunk */
3757 			sctp_free_a_strmoq(stcb, sp);
3758 			/* sa_ignore FREED_MEMORY */
3759 			sp = TAILQ_FIRST(&outs->outqueue);
3760 		}
3761 	}
3762 
3763 	if (holds_lock == 0) {
3764 		SCTP_TCB_SEND_UNLOCK(stcb);
3765 	}
3766 }
3767 
3768 void
3769 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3770 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3771     SCTP_UNUSED
3772 #endif
3773 )
3774 {
3775 
3776 	if (stcb == NULL) {
3777 		return;
3778 	}
3779 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3780 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3781 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3782 		return;
3783 	}
3784 	/* Tell them we lost the asoc */
3785 	sctp_report_all_outbound(stcb, 1, so_locked);
3786 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3787 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3788 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3789 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3790 	}
3791 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3792 }
3793 
3794 void
3795 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3796     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3797     uint32_t vrf_id, uint16_t port)
3798 {
3799 	uint32_t vtag;
3800 
3801 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3802 	struct socket *so;
3803 
3804 #endif
3805 
3806 	vtag = 0;
3807 	if (stcb != NULL) {
3808 		/* We have a TCB to abort, send notification too */
3809 		vtag = stcb->asoc.peer_vtag;
3810 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3811 		/* get the assoc vrf id and table id */
3812 		vrf_id = stcb->asoc.vrf_id;
3813 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3814 	}
3815 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3816 	if (stcb != NULL) {
3817 		/* Ok, now lets free it */
3818 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3819 		so = SCTP_INP_SO(inp);
3820 		atomic_add_int(&stcb->asoc.refcnt, 1);
3821 		SCTP_TCB_UNLOCK(stcb);
3822 		SCTP_SOCKET_LOCK(so, 1);
3823 		SCTP_TCB_LOCK(stcb);
3824 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3825 #endif
3826 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3827 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3828 		SCTP_SOCKET_UNLOCK(so, 1);
3829 #endif
3830 	} else {
3831 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3832 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3833 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3834 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3835 			}
3836 		}
3837 	}
3838 }
3839 
3840 #ifdef SCTP_ASOCLOG_OF_TSNS
3841 void
3842 sctp_print_out_track_log(struct sctp_tcb *stcb)
3843 {
3844 #ifdef NOSIY_PRINTS
3845 	int i;
3846 
3847 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3848 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3849 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3850 		SCTP_PRINTF("None rcvd\n");
3851 		goto none_in;
3852 	}
3853 	if (stcb->asoc.tsn_in_wrapped) {
3854 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3855 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3856 			    stcb->asoc.in_tsnlog[i].tsn,
3857 			    stcb->asoc.in_tsnlog[i].strm,
3858 			    stcb->asoc.in_tsnlog[i].seq,
3859 			    stcb->asoc.in_tsnlog[i].flgs,
3860 			    stcb->asoc.in_tsnlog[i].sz);
3861 		}
3862 	}
3863 	if (stcb->asoc.tsn_in_at) {
3864 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3865 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3866 			    stcb->asoc.in_tsnlog[i].tsn,
3867 			    stcb->asoc.in_tsnlog[i].strm,
3868 			    stcb->asoc.in_tsnlog[i].seq,
3869 			    stcb->asoc.in_tsnlog[i].flgs,
3870 			    stcb->asoc.in_tsnlog[i].sz);
3871 		}
3872 	}
3873 none_in:
3874 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3875 	if ((stcb->asoc.tsn_out_at == 0) &&
3876 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3877 		SCTP_PRINTF("None sent\n");
3878 	}
3879 	if (stcb->asoc.tsn_out_wrapped) {
3880 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3881 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3882 			    stcb->asoc.out_tsnlog[i].tsn,
3883 			    stcb->asoc.out_tsnlog[i].strm,
3884 			    stcb->asoc.out_tsnlog[i].seq,
3885 			    stcb->asoc.out_tsnlog[i].flgs,
3886 			    stcb->asoc.out_tsnlog[i].sz);
3887 		}
3888 	}
3889 	if (stcb->asoc.tsn_out_at) {
3890 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3891 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3892 			    stcb->asoc.out_tsnlog[i].tsn,
3893 			    stcb->asoc.out_tsnlog[i].strm,
3894 			    stcb->asoc.out_tsnlog[i].seq,
3895 			    stcb->asoc.out_tsnlog[i].flgs,
3896 			    stcb->asoc.out_tsnlog[i].sz);
3897 		}
3898 	}
3899 #endif
3900 }
3901 
3902 #endif
3903 
3904 void
3905 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3906     int error, struct mbuf *op_err,
3907     int so_locked
3908 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3909     SCTP_UNUSED
3910 #endif
3911 )
3912 {
3913 	uint32_t vtag;
3914 
3915 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3916 	struct socket *so;
3917 
3918 #endif
3919 
3920 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3921 	so = SCTP_INP_SO(inp);
3922 #endif
3923 	if (stcb == NULL) {
3924 		/* Got to have a TCB */
3925 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3926 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3927 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3928 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3929 			}
3930 		}
3931 		return;
3932 	} else {
3933 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3934 	}
3935 	vtag = stcb->asoc.peer_vtag;
3936 	/* notify the ulp */
3937 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3938 		sctp_abort_notification(stcb, error, so_locked);
3939 	/* notify the peer */
3940 #if defined(SCTP_PANIC_ON_ABORT)
3941 	panic("aborting an association");
3942 #endif
3943 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3944 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3945 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3946 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3947 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3948 	}
3949 	/* now free the asoc */
3950 #ifdef SCTP_ASOCLOG_OF_TSNS
3951 	sctp_print_out_track_log(stcb);
3952 #endif
3953 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3954 	if (!so_locked) {
3955 		atomic_add_int(&stcb->asoc.refcnt, 1);
3956 		SCTP_TCB_UNLOCK(stcb);
3957 		SCTP_SOCKET_LOCK(so, 1);
3958 		SCTP_TCB_LOCK(stcb);
3959 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3960 	}
3961 #endif
3962 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3963 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3964 	if (!so_locked) {
3965 		SCTP_SOCKET_UNLOCK(so, 1);
3966 	}
3967 #endif
3968 }
3969 
3970 void
3971 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3972     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3973 {
3974 	struct sctp_chunkhdr *ch, chunk_buf;
3975 	unsigned int chk_length;
3976 
3977 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3978 	/* Generate a TO address for future reference */
3979 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3980 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3981 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3982 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3983 		}
3984 	}
3985 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3986 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3987 	while (ch != NULL) {
3988 		chk_length = ntohs(ch->chunk_length);
3989 		if (chk_length < sizeof(*ch)) {
3990 			/* break to abort land */
3991 			break;
3992 		}
3993 		switch (ch->chunk_type) {
3994 		case SCTP_COOKIE_ECHO:
3995 			/* We hit here only if the assoc is being freed */
3996 			return;
3997 		case SCTP_PACKET_DROPPED:
3998 			/* we don't respond to pkt-dropped */
3999 			return;
4000 		case SCTP_ABORT_ASSOCIATION:
4001 			/* we don't respond with an ABORT to an ABORT */
4002 			return;
4003 		case SCTP_SHUTDOWN_COMPLETE:
4004 			/*
4005 			 * we ignore it since we are not waiting for it and
4006 			 * peer is gone
4007 			 */
4008 			return;
4009 		case SCTP_SHUTDOWN_ACK:
4010 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
4011 			return;
4012 		default:
4013 			break;
4014 		}
4015 		offset += SCTP_SIZE32(chk_length);
4016 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4017 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4018 	}
4019 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4020 }
4021 
4022 /*
4023  * check the inbound datagram to make sure there is not an abort inside it,
4024  * if there is return 1, else return 0.
4025  */
4026 int
4027 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4028 {
4029 	struct sctp_chunkhdr *ch;
4030 	struct sctp_init_chunk *init_chk, chunk_buf;
4031 	int offset;
4032 	unsigned int chk_length;
4033 
4034 	offset = iphlen + sizeof(struct sctphdr);
4035 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4036 	    (uint8_t *) & chunk_buf);
4037 	while (ch != NULL) {
4038 		chk_length = ntohs(ch->chunk_length);
4039 		if (chk_length < sizeof(*ch)) {
4040 			/* packet is probably corrupt */
4041 			break;
4042 		}
4043 		/* we seem to be ok, is it an abort? */
4044 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4045 			/* yep, tell them */
4046 			return (1);
4047 		}
4048 		if (ch->chunk_type == SCTP_INITIATION) {
4049 			/* need to update the Vtag */
4050 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4051 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4052 			if (init_chk != NULL) {
4053 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4054 			}
4055 		}
4056 		/* Nope, move to the next chunk */
4057 		offset += SCTP_SIZE32(chk_length);
4058 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4059 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4060 	}
4061 	return (0);
4062 }
4063 
4064 /*
4065  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4066  * set (i.e. it's 0) so, create this function to compare link local scopes
4067  */
4068 #ifdef INET6
4069 uint32_t
4070 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4071 {
4072 	struct sockaddr_in6 a, b;
4073 
4074 	/* save copies */
4075 	a = *addr1;
4076 	b = *addr2;
4077 
4078 	if (a.sin6_scope_id == 0)
4079 		if (sa6_recoverscope(&a)) {
4080 			/* can't get scope, so can't match */
4081 			return (0);
4082 		}
4083 	if (b.sin6_scope_id == 0)
4084 		if (sa6_recoverscope(&b)) {
4085 			/* can't get scope, so can't match */
4086 			return (0);
4087 		}
4088 	if (a.sin6_scope_id != b.sin6_scope_id)
4089 		return (0);
4090 
4091 	return (1);
4092 }
4093 
4094 /*
4095  * returns a sockaddr_in6 with embedded scope recovered and removed
4096  */
4097 struct sockaddr_in6 *
4098 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4099 {
4100 	/* check and strip embedded scope junk */
4101 	if (addr->sin6_family == AF_INET6) {
4102 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4103 			if (addr->sin6_scope_id == 0) {
4104 				*store = *addr;
4105 				if (!sa6_recoverscope(store)) {
4106 					/* use the recovered scope */
4107 					addr = store;
4108 				}
4109 			} else {
4110 				/* else, return the original "to" addr */
4111 				in6_clearscope(&addr->sin6_addr);
4112 			}
4113 		}
4114 	}
4115 	return (addr);
4116 }
4117 
4118 #endif
4119 
4120 /*
4121  * are the two addresses the same?  currently a "scopeless" check returns: 1
4122  * if same, 0 if not
4123  */
4124 int
4125 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4126 {
4127 
4128 	/* must be valid */
4129 	if (sa1 == NULL || sa2 == NULL)
4130 		return (0);
4131 
4132 	/* must be the same family */
4133 	if (sa1->sa_family != sa2->sa_family)
4134 		return (0);
4135 
4136 	switch (sa1->sa_family) {
4137 #ifdef INET6
4138 	case AF_INET6:
4139 		{
4140 			/* IPv6 addresses */
4141 			struct sockaddr_in6 *sin6_1, *sin6_2;
4142 
4143 			sin6_1 = (struct sockaddr_in6 *)sa1;
4144 			sin6_2 = (struct sockaddr_in6 *)sa2;
4145 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4146 			    sin6_2));
4147 		}
4148 #endif
4149 	case AF_INET:
4150 		{
4151 			/* IPv4 addresses */
4152 			struct sockaddr_in *sin_1, *sin_2;
4153 
4154 			sin_1 = (struct sockaddr_in *)sa1;
4155 			sin_2 = (struct sockaddr_in *)sa2;
4156 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4157 		}
4158 	default:
4159 		/* we don't do these... */
4160 		return (0);
4161 	}
4162 }
4163 
4164 void
4165 sctp_print_address(struct sockaddr *sa)
4166 {
4167 #ifdef INET6
4168 	char ip6buf[INET6_ADDRSTRLEN];
4169 
4170 	ip6buf[0] = 0;
4171 #endif
4172 
4173 	switch (sa->sa_family) {
4174 #ifdef INET6
4175 	case AF_INET6:
4176 		{
4177 			struct sockaddr_in6 *sin6;
4178 
4179 			sin6 = (struct sockaddr_in6 *)sa;
4180 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4181 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4182 			    ntohs(sin6->sin6_port),
4183 			    sin6->sin6_scope_id);
4184 			break;
4185 		}
4186 #endif
4187 	case AF_INET:
4188 		{
4189 			struct sockaddr_in *sin;
4190 			unsigned char *p;
4191 
4192 			sin = (struct sockaddr_in *)sa;
4193 			p = (unsigned char *)&sin->sin_addr;
4194 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4195 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4196 			break;
4197 		}
4198 	default:
4199 		SCTP_PRINTF("?\n");
4200 		break;
4201 	}
4202 }
4203 
4204 void
4205 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4206 {
4207 	switch (iph->ip_v) {
4208 		case IPVERSION:
4209 		{
4210 			struct sockaddr_in lsa, fsa;
4211 
4212 			bzero(&lsa, sizeof(lsa));
4213 			lsa.sin_len = sizeof(lsa);
4214 			lsa.sin_family = AF_INET;
4215 			lsa.sin_addr = iph->ip_src;
4216 			lsa.sin_port = sh->src_port;
4217 			bzero(&fsa, sizeof(fsa));
4218 			fsa.sin_len = sizeof(fsa);
4219 			fsa.sin_family = AF_INET;
4220 			fsa.sin_addr = iph->ip_dst;
4221 			fsa.sin_port = sh->dest_port;
4222 			SCTP_PRINTF("src: ");
4223 			sctp_print_address((struct sockaddr *)&lsa);
4224 			SCTP_PRINTF("dest: ");
4225 			sctp_print_address((struct sockaddr *)&fsa);
4226 			break;
4227 		}
4228 #ifdef INET6
4229 	case IPV6_VERSION >> 4:
4230 		{
4231 			struct ip6_hdr *ip6;
4232 			struct sockaddr_in6 lsa6, fsa6;
4233 
4234 			ip6 = (struct ip6_hdr *)iph;
4235 			bzero(&lsa6, sizeof(lsa6));
4236 			lsa6.sin6_len = sizeof(lsa6);
4237 			lsa6.sin6_family = AF_INET6;
4238 			lsa6.sin6_addr = ip6->ip6_src;
4239 			lsa6.sin6_port = sh->src_port;
4240 			bzero(&fsa6, sizeof(fsa6));
4241 			fsa6.sin6_len = sizeof(fsa6);
4242 			fsa6.sin6_family = AF_INET6;
4243 			fsa6.sin6_addr = ip6->ip6_dst;
4244 			fsa6.sin6_port = sh->dest_port;
4245 			SCTP_PRINTF("src: ");
4246 			sctp_print_address((struct sockaddr *)&lsa6);
4247 			SCTP_PRINTF("dest: ");
4248 			sctp_print_address((struct sockaddr *)&fsa6);
4249 			break;
4250 		}
4251 #endif
4252 	default:
4253 		/* TSNH */
4254 		break;
4255 	}
4256 }
4257 
4258 void
4259 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4260     struct sctp_inpcb *new_inp,
4261     struct sctp_tcb *stcb,
4262     int waitflags)
4263 {
4264 	/*
4265 	 * go through our old INP and pull off any control structures that
4266 	 * belong to stcb and move then to the new inp.
4267 	 */
4268 	struct socket *old_so, *new_so;
4269 	struct sctp_queued_to_read *control, *nctl;
4270 	struct sctp_readhead tmp_queue;
4271 	struct mbuf *m;
4272 	int error = 0;
4273 
4274 	old_so = old_inp->sctp_socket;
4275 	new_so = new_inp->sctp_socket;
4276 	TAILQ_INIT(&tmp_queue);
4277 	error = sblock(&old_so->so_rcv, waitflags);
4278 	if (error) {
4279 		/*
4280 		 * Gak, can't get sblock, we have a problem. data will be
4281 		 * left stranded.. and we don't dare look at it since the
4282 		 * other thread may be reading something. Oh well, its a
4283 		 * screwed up app that does a peeloff OR a accept while
4284 		 * reading from the main socket... actually its only the
4285 		 * peeloff() case, since I think read will fail on a
4286 		 * listening socket..
4287 		 */
4288 		return;
4289 	}
4290 	/* lock the socket buffers */
4291 	SCTP_INP_READ_LOCK(old_inp);
4292 	control = TAILQ_FIRST(&old_inp->read_queue);
4293 	/* Pull off all for out target stcb */
4294 	while (control) {
4295 		nctl = TAILQ_NEXT(control, next);
4296 		if (control->stcb == stcb) {
4297 			/* remove it we want it */
4298 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4299 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4300 			m = control->data;
4301 			while (m) {
4302 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4303 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4304 				}
4305 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4306 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4307 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4308 				}
4309 				m = SCTP_BUF_NEXT(m);
4310 			}
4311 		}
4312 		control = nctl;
4313 	}
4314 	SCTP_INP_READ_UNLOCK(old_inp);
4315 	/* Remove the sb-lock on the old socket */
4316 
4317 	sbunlock(&old_so->so_rcv);
4318 	/* Now we move them over to the new socket buffer */
4319 	control = TAILQ_FIRST(&tmp_queue);
4320 	SCTP_INP_READ_LOCK(new_inp);
4321 	while (control) {
4322 		nctl = TAILQ_NEXT(control, next);
4323 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4324 		m = control->data;
4325 		while (m) {
4326 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4327 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4328 			}
4329 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4330 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4331 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4332 			}
4333 			m = SCTP_BUF_NEXT(m);
4334 		}
4335 		control = nctl;
4336 	}
4337 	SCTP_INP_READ_UNLOCK(new_inp);
4338 }
4339 
4340 
4341 void
4342 sctp_add_to_readq(struct sctp_inpcb *inp,
4343     struct sctp_tcb *stcb,
4344     struct sctp_queued_to_read *control,
4345     struct sockbuf *sb,
4346     int end,
4347     int so_locked
4348 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4349     SCTP_UNUSED
4350 #endif
4351 )
4352 {
4353 	/*
4354 	 * Here we must place the control on the end of the socket read
4355 	 * queue AND increment sb_cc so that select will work properly on
4356 	 * read.
4357 	 */
4358 	struct mbuf *m, *prev = NULL;
4359 
4360 	if (inp == NULL) {
4361 		/* Gak, TSNH!! */
4362 #ifdef INVARIANTS
4363 		panic("Gak, inp NULL on add_to_readq");
4364 #endif
4365 		return;
4366 	}
4367 	SCTP_INP_READ_LOCK(inp);
4368 	if (!(control->spec_flags & M_NOTIFICATION)) {
4369 		atomic_add_int(&inp->total_recvs, 1);
4370 		if (!control->do_not_ref_stcb) {
4371 			atomic_add_int(&stcb->total_recvs, 1);
4372 		}
4373 	}
4374 	m = control->data;
4375 	control->held_length = 0;
4376 	control->length = 0;
4377 	while (m) {
4378 		if (SCTP_BUF_LEN(m) == 0) {
4379 			/* Skip mbufs with NO length */
4380 			if (prev == NULL) {
4381 				/* First one */
4382 				control->data = sctp_m_free(m);
4383 				m = control->data;
4384 			} else {
4385 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4386 				m = SCTP_BUF_NEXT(prev);
4387 			}
4388 			if (m == NULL) {
4389 				control->tail_mbuf = prev;;
4390 			}
4391 			continue;
4392 		}
4393 		prev = m;
4394 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4395 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4396 		}
4397 		sctp_sballoc(stcb, sb, m);
4398 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4399 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4400 		}
4401 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4402 		m = SCTP_BUF_NEXT(m);
4403 	}
4404 	if (prev != NULL) {
4405 		control->tail_mbuf = prev;
4406 	} else {
4407 		/* Everything got collapsed out?? */
4408 		return;
4409 	}
4410 	if (end) {
4411 		control->end_added = 1;
4412 	}
4413 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4414 	SCTP_INP_READ_UNLOCK(inp);
4415 	if (inp && inp->sctp_socket) {
4416 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4417 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4418 		} else {
4419 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4420 			struct socket *so;
4421 
4422 			so = SCTP_INP_SO(inp);
4423 			if (!so_locked) {
4424 				atomic_add_int(&stcb->asoc.refcnt, 1);
4425 				SCTP_TCB_UNLOCK(stcb);
4426 				SCTP_SOCKET_LOCK(so, 1);
4427 				SCTP_TCB_LOCK(stcb);
4428 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4429 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4430 					SCTP_SOCKET_UNLOCK(so, 1);
4431 					return;
4432 				}
4433 			}
4434 #endif
4435 			sctp_sorwakeup(inp, inp->sctp_socket);
4436 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4437 			if (!so_locked) {
4438 				SCTP_SOCKET_UNLOCK(so, 1);
4439 			}
4440 #endif
4441 		}
4442 	}
4443 }
4444 
4445 
4446 int
4447 sctp_append_to_readq(struct sctp_inpcb *inp,
4448     struct sctp_tcb *stcb,
4449     struct sctp_queued_to_read *control,
4450     struct mbuf *m,
4451     int end,
4452     int ctls_cumack,
4453     struct sockbuf *sb)
4454 {
4455 	/*
4456 	 * A partial delivery API event is underway. OR we are appending on
4457 	 * the reassembly queue.
4458 	 *
4459 	 * If PDAPI this means we need to add m to the end of the data.
4460 	 * Increase the length in the control AND increment the sb_cc.
4461 	 * Otherwise sb is NULL and all we need to do is put it at the end
4462 	 * of the mbuf chain.
4463 	 */
4464 	int len = 0;
4465 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4466 
4467 	if (inp) {
4468 		SCTP_INP_READ_LOCK(inp);
4469 	}
4470 	if (control == NULL) {
4471 get_out:
4472 		if (inp) {
4473 			SCTP_INP_READ_UNLOCK(inp);
4474 		}
4475 		return (-1);
4476 	}
4477 	if (control->end_added) {
4478 		/* huh this one is complete? */
4479 		goto get_out;
4480 	}
4481 	mm = m;
4482 	if (mm == NULL) {
4483 		goto get_out;
4484 	}
4485 	while (mm) {
4486 		if (SCTP_BUF_LEN(mm) == 0) {
4487 			/* Skip mbufs with NO lenght */
4488 			if (prev == NULL) {
4489 				/* First one */
4490 				m = sctp_m_free(mm);
4491 				mm = m;
4492 			} else {
4493 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4494 				mm = SCTP_BUF_NEXT(prev);
4495 			}
4496 			continue;
4497 		}
4498 		prev = mm;
4499 		len += SCTP_BUF_LEN(mm);
4500 		if (sb) {
4501 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4502 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4503 			}
4504 			sctp_sballoc(stcb, sb, mm);
4505 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4506 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4507 			}
4508 		}
4509 		mm = SCTP_BUF_NEXT(mm);
4510 	}
4511 	if (prev) {
4512 		tail = prev;
4513 	} else {
4514 		/* Really there should always be a prev */
4515 		if (m == NULL) {
4516 			/* Huh nothing left? */
4517 #ifdef INVARIANTS
4518 			panic("Nothing left to add?");
4519 #else
4520 			goto get_out;
4521 #endif
4522 		}
4523 		tail = m;
4524 	}
4525 	if (control->tail_mbuf) {
4526 		/* append */
4527 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4528 		control->tail_mbuf = tail;
4529 	} else {
4530 		/* nothing there */
4531 #ifdef INVARIANTS
4532 		if (control->data != NULL) {
4533 			panic("This should NOT happen");
4534 		}
4535 #endif
4536 		control->data = m;
4537 		control->tail_mbuf = tail;
4538 	}
4539 	atomic_add_int(&control->length, len);
4540 	if (end) {
4541 		/* message is complete */
4542 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4543 			stcb->asoc.control_pdapi = NULL;
4544 		}
4545 		control->held_length = 0;
4546 		control->end_added = 1;
4547 	}
4548 	if (stcb == NULL) {
4549 		control->do_not_ref_stcb = 1;
4550 	}
4551 	/*
4552 	 * When we are appending in partial delivery, the cum-ack is used
4553 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4554 	 * is populated in the outbound sinfo structure from the true cumack
4555 	 * if the association exists...
4556 	 */
4557 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4558 	if (inp) {
4559 		SCTP_INP_READ_UNLOCK(inp);
4560 	}
4561 	if (inp && inp->sctp_socket) {
4562 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4563 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4564 		} else {
4565 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4566 			struct socket *so;
4567 
4568 			so = SCTP_INP_SO(inp);
4569 			atomic_add_int(&stcb->asoc.refcnt, 1);
4570 			SCTP_TCB_UNLOCK(stcb);
4571 			SCTP_SOCKET_LOCK(so, 1);
4572 			SCTP_TCB_LOCK(stcb);
4573 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4574 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4575 				SCTP_SOCKET_UNLOCK(so, 1);
4576 				return (0);
4577 			}
4578 #endif
4579 			sctp_sorwakeup(inp, inp->sctp_socket);
4580 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4581 			SCTP_SOCKET_UNLOCK(so, 1);
4582 #endif
4583 		}
4584 	}
4585 	return (0);
4586 }
4587 
4588 
4589 
4590 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4591  *************ALTERNATE ROUTING CODE
4592  */
4593 
4594 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4595  *************ALTERNATE ROUTING CODE
4596  */
4597 
4598 struct mbuf *
4599 sctp_generate_invmanparam(int err)
4600 {
4601 	/* Return a MBUF with a invalid mandatory parameter */
4602 	struct mbuf *m;
4603 
4604 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4605 	if (m) {
4606 		struct sctp_paramhdr *ph;
4607 
4608 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4609 		ph = mtod(m, struct sctp_paramhdr *);
4610 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4611 		ph->param_type = htons(err);
4612 	}
4613 	return (m);
4614 }
4615 
4616 #ifdef SCTP_MBCNT_LOGGING
4617 void
4618 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4619     struct sctp_tmit_chunk *tp1, int chk_cnt)
4620 {
4621 	if (tp1->data == NULL) {
4622 		return;
4623 	}
4624 	asoc->chunks_on_out_queue -= chk_cnt;
4625 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4626 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4627 		    asoc->total_output_queue_size,
4628 		    tp1->book_size,
4629 		    0,
4630 		    tp1->mbcnt);
4631 	}
4632 	if (asoc->total_output_queue_size >= tp1->book_size) {
4633 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4634 	} else {
4635 		asoc->total_output_queue_size = 0;
4636 	}
4637 
4638 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4639 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4640 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4641 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4642 		} else {
4643 			stcb->sctp_socket->so_snd.sb_cc = 0;
4644 
4645 		}
4646 	}
4647 }
4648 
4649 #endif
4650 
4651 int
4652 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4653     int reason, struct sctpchunk_listhead *queue, int so_locked
4654 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4655     SCTP_UNUSED
4656 #endif
4657 )
4658 {
4659 	int ret_sz = 0;
4660 	int notdone;
4661 	uint8_t foundeom = 0;
4662 
4663 	do {
4664 		ret_sz += tp1->book_size;
4665 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4666 		if (tp1->data) {
4667 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4668 			struct socket *so;
4669 
4670 #endif
4671 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4672 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, SCTP_SO_NOT_LOCKED);
4673 			sctp_m_freem(tp1->data);
4674 			tp1->data = NULL;
4675 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4676 			so = SCTP_INP_SO(stcb->sctp_ep);
4677 			if (!so_locked) {
4678 				atomic_add_int(&stcb->asoc.refcnt, 1);
4679 				SCTP_TCB_UNLOCK(stcb);
4680 				SCTP_SOCKET_LOCK(so, 1);
4681 				SCTP_TCB_LOCK(stcb);
4682 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4683 				if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4684 					/*
4685 					 * assoc was freed while we were
4686 					 * unlocked
4687 					 */
4688 					SCTP_SOCKET_UNLOCK(so, 1);
4689 					return (ret_sz);
4690 				}
4691 			}
4692 #endif
4693 			sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4694 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4695 			if (!so_locked) {
4696 				SCTP_SOCKET_UNLOCK(so, 1);
4697 			}
4698 #endif
4699 		}
4700 		if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4701 			stcb->asoc.sent_queue_cnt_removeable--;
4702 		}
4703 		if (queue == &stcb->asoc.send_queue) {
4704 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4705 			/* on to the sent queue */
4706 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4707 			    sctp_next);
4708 			stcb->asoc.sent_queue_cnt++;
4709 		}
4710 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4711 		    SCTP_DATA_NOT_FRAG) {
4712 			/* not frag'ed we ae done   */
4713 			notdone = 0;
4714 			foundeom = 1;
4715 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4716 			/* end of frag, we are done */
4717 			notdone = 0;
4718 			foundeom = 1;
4719 		} else {
4720 			/*
4721 			 * Its a begin or middle piece, we must mark all of
4722 			 * it
4723 			 */
4724 			notdone = 1;
4725 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4726 		}
4727 	} while (tp1 && notdone);
4728 	if ((foundeom == 0) && (queue == &stcb->asoc.sent_queue)) {
4729 		/*
4730 		 * The multi-part message was scattered across the send and
4731 		 * sent queue.
4732 		 */
4733 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4734 		/*
4735 		 * recurse throught the send_queue too, starting at the
4736 		 * beginning.
4737 		 */
4738 		if (tp1) {
4739 			ret_sz += sctp_release_pr_sctp_chunk(stcb, tp1, reason,
4740 			    &stcb->asoc.send_queue, so_locked);
4741 		} else {
4742 			SCTP_PRINTF("hmm, nothing on the send queue and no EOM?\n");
4743 		}
4744 	}
4745 	return (ret_sz);
4746 }
4747 
4748 /*
4749  * checks to see if the given address, sa, is one that is currently known by
4750  * the kernel note: can't distinguish the same address on multiple interfaces
4751  * and doesn't handle multiple addresses with different zone/scope id's note:
4752  * ifa_ifwithaddr() compares the entire sockaddr struct
4753  */
4754 struct sctp_ifa *
4755 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4756     int holds_lock)
4757 {
4758 	struct sctp_laddr *laddr;
4759 
4760 	if (holds_lock == 0) {
4761 		SCTP_INP_RLOCK(inp);
4762 	}
4763 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4764 		if (laddr->ifa == NULL)
4765 			continue;
4766 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4767 			continue;
4768 		if (addr->sa_family == AF_INET) {
4769 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4770 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4771 				/* found him. */
4772 				if (holds_lock == 0) {
4773 					SCTP_INP_RUNLOCK(inp);
4774 				}
4775 				return (laddr->ifa);
4776 				break;
4777 			}
4778 		}
4779 #ifdef INET6
4780 		if (addr->sa_family == AF_INET6) {
4781 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4782 			    &laddr->ifa->address.sin6)) {
4783 				/* found him. */
4784 				if (holds_lock == 0) {
4785 					SCTP_INP_RUNLOCK(inp);
4786 				}
4787 				return (laddr->ifa);
4788 				break;
4789 			}
4790 		}
4791 #endif
4792 	}
4793 	if (holds_lock == 0) {
4794 		SCTP_INP_RUNLOCK(inp);
4795 	}
4796 	return (NULL);
4797 }
4798 
4799 uint32_t
4800 sctp_get_ifa_hash_val(struct sockaddr *addr)
4801 {
4802 	if (addr->sa_family == AF_INET) {
4803 		struct sockaddr_in *sin;
4804 
4805 		sin = (struct sockaddr_in *)addr;
4806 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4807 	} else if (addr->sa_family == AF_INET6) {
4808 		struct sockaddr_in6 *sin6;
4809 		uint32_t hash_of_addr;
4810 
4811 		sin6 = (struct sockaddr_in6 *)addr;
4812 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4813 		    sin6->sin6_addr.s6_addr32[1] +
4814 		    sin6->sin6_addr.s6_addr32[2] +
4815 		    sin6->sin6_addr.s6_addr32[3]);
4816 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4817 		return (hash_of_addr);
4818 	}
4819 	return (0);
4820 }
4821 
4822 struct sctp_ifa *
4823 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4824 {
4825 	struct sctp_ifa *sctp_ifap;
4826 	struct sctp_vrf *vrf;
4827 	struct sctp_ifalist *hash_head;
4828 	uint32_t hash_of_addr;
4829 
4830 	if (holds_lock == 0)
4831 		SCTP_IPI_ADDR_RLOCK();
4832 
4833 	vrf = sctp_find_vrf(vrf_id);
4834 	if (vrf == NULL) {
4835 		if (holds_lock == 0)
4836 			SCTP_IPI_ADDR_RUNLOCK();
4837 		return (NULL);
4838 	}
4839 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4840 
4841 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4842 	if (hash_head == NULL) {
4843 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4844 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4845 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4846 		sctp_print_address(addr);
4847 		SCTP_PRINTF("No such bucket for address\n");
4848 		if (holds_lock == 0)
4849 			SCTP_IPI_ADDR_RUNLOCK();
4850 
4851 		return (NULL);
4852 	}
4853 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4854 		if (sctp_ifap == NULL) {
4855 			panic("Huh LIST_FOREACH corrupt");
4856 		}
4857 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4858 			continue;
4859 		if (addr->sa_family == AF_INET) {
4860 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4861 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4862 				/* found him. */
4863 				if (holds_lock == 0)
4864 					SCTP_IPI_ADDR_RUNLOCK();
4865 				return (sctp_ifap);
4866 				break;
4867 			}
4868 		}
4869 #ifdef INET6
4870 		if (addr->sa_family == AF_INET6) {
4871 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4872 			    &sctp_ifap->address.sin6)) {
4873 				/* found him. */
4874 				if (holds_lock == 0)
4875 					SCTP_IPI_ADDR_RUNLOCK();
4876 				return (sctp_ifap);
4877 				break;
4878 			}
4879 		}
4880 #endif
4881 	}
4882 	if (holds_lock == 0)
4883 		SCTP_IPI_ADDR_RUNLOCK();
4884 	return (NULL);
4885 }
4886 
4887 static void
4888 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4889     uint32_t rwnd_req)
4890 {
4891 	/* User pulled some data, do we need a rwnd update? */
4892 	int r_unlocked = 0;
4893 	uint32_t dif, rwnd;
4894 	struct socket *so = NULL;
4895 
4896 	if (stcb == NULL)
4897 		return;
4898 
4899 	atomic_add_int(&stcb->asoc.refcnt, 1);
4900 
4901 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4902 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4903 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4904 		/* Pre-check If we are freeing no update */
4905 		goto no_lock;
4906 	}
4907 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4908 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4909 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4910 		goto out;
4911 	}
4912 	so = stcb->sctp_socket;
4913 	if (so == NULL) {
4914 		goto out;
4915 	}
4916 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4917 	/* Have you have freed enough to look */
4918 	*freed_so_far = 0;
4919 	/* Yep, its worth a look and the lock overhead */
4920 
4921 	/* Figure out what the rwnd would be */
4922 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4923 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4924 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4925 	} else {
4926 		dif = 0;
4927 	}
4928 	if (dif >= rwnd_req) {
4929 		if (hold_rlock) {
4930 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4931 			r_unlocked = 1;
4932 		}
4933 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4934 			/*
4935 			 * One last check before we allow the guy possibly
4936 			 * to get in. There is a race, where the guy has not
4937 			 * reached the gate. In that case
4938 			 */
4939 			goto out;
4940 		}
4941 		SCTP_TCB_LOCK(stcb);
4942 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4943 			/* No reports here */
4944 			SCTP_TCB_UNLOCK(stcb);
4945 			goto out;
4946 		}
4947 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
4948 		sctp_send_sack(stcb);
4949 		sctp_chunk_output(stcb->sctp_ep, stcb,
4950 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
4951 		/* make sure no timer is running */
4952 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
4953 		SCTP_TCB_UNLOCK(stcb);
4954 	} else {
4955 		/* Update how much we have pending */
4956 		stcb->freed_by_sorcv_sincelast = dif;
4957 	}
4958 out:
4959 	if (so && r_unlocked && hold_rlock) {
4960 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
4961 	}
4962 	SCTP_INP_DECR_REF(stcb->sctp_ep);
4963 no_lock:
4964 	atomic_add_int(&stcb->asoc.refcnt, -1);
4965 	return;
4966 }
4967 
4968 int
4969 sctp_sorecvmsg(struct socket *so,
4970     struct uio *uio,
4971     struct mbuf **mp,
4972     struct sockaddr *from,
4973     int fromlen,
4974     int *msg_flags,
4975     struct sctp_sndrcvinfo *sinfo,
4976     int filling_sinfo)
4977 {
4978 	/*
4979 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
4980 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
4981 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
4982 	 * On the way out we may send out any combination of:
4983 	 * MSG_NOTIFICATION MSG_EOR
4984 	 *
4985 	 */
4986 	struct sctp_inpcb *inp = NULL;
4987 	int my_len = 0;
4988 	int cp_len = 0, error = 0;
4989 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
4990 	struct mbuf *m = NULL, *embuf = NULL;
4991 	struct sctp_tcb *stcb = NULL;
4992 	int wakeup_read_socket = 0;
4993 	int freecnt_applied = 0;
4994 	int out_flags = 0, in_flags = 0;
4995 	int block_allowed = 1;
4996 	uint32_t freed_so_far = 0;
4997 	uint32_t copied_so_far = 0;
4998 	int in_eeor_mode = 0;
4999 	int no_rcv_needed = 0;
5000 	uint32_t rwnd_req = 0;
5001 	int hold_sblock = 0;
5002 	int hold_rlock = 0;
5003 	int slen = 0;
5004 	uint32_t held_length = 0;
5005 	int sockbuf_lock = 0;
5006 
5007 	if (uio == NULL) {
5008 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5009 		return (EINVAL);
5010 	}
5011 	if (msg_flags) {
5012 		in_flags = *msg_flags;
5013 		if (in_flags & MSG_PEEK)
5014 			SCTP_STAT_INCR(sctps_read_peeks);
5015 	} else {
5016 		in_flags = 0;
5017 	}
5018 	slen = uio->uio_resid;
5019 
5020 	/* Pull in and set up our int flags */
5021 	if (in_flags & MSG_OOB) {
5022 		/* Out of band's NOT supported */
5023 		return (EOPNOTSUPP);
5024 	}
5025 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5026 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5027 		return (EINVAL);
5028 	}
5029 	if ((in_flags & (MSG_DONTWAIT
5030 	    | MSG_NBIO
5031 	    )) ||
5032 	    SCTP_SO_IS_NBIO(so)) {
5033 		block_allowed = 0;
5034 	}
5035 	/* setup the endpoint */
5036 	inp = (struct sctp_inpcb *)so->so_pcb;
5037 	if (inp == NULL) {
5038 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5039 		return (EFAULT);
5040 	}
5041 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5042 	/* Must be at least a MTU's worth */
5043 	if (rwnd_req < SCTP_MIN_RWND)
5044 		rwnd_req = SCTP_MIN_RWND;
5045 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5046 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5047 		sctp_misc_ints(SCTP_SORECV_ENTER,
5048 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5049 	}
5050 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5051 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5052 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5053 	}
5054 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5055 	sockbuf_lock = 1;
5056 	if (error) {
5057 		goto release_unlocked;
5058 	}
5059 restart:
5060 
5061 
5062 restart_nosblocks:
5063 	if (hold_sblock == 0) {
5064 		SOCKBUF_LOCK(&so->so_rcv);
5065 		hold_sblock = 1;
5066 	}
5067 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5068 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5069 		goto out;
5070 	}
5071 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5072 		if (so->so_error) {
5073 			error = so->so_error;
5074 			if ((in_flags & MSG_PEEK) == 0)
5075 				so->so_error = 0;
5076 			goto out;
5077 		} else {
5078 			if (so->so_rcv.sb_cc == 0) {
5079 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5080 				/* indicate EOF */
5081 				error = 0;
5082 				goto out;
5083 			}
5084 		}
5085 	}
5086 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5087 		/* we need to wait for data */
5088 		if ((so->so_rcv.sb_cc == 0) &&
5089 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5090 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5091 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5092 				/*
5093 				 * For active open side clear flags for
5094 				 * re-use passive open is blocked by
5095 				 * connect.
5096 				 */
5097 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5098 					/*
5099 					 * You were aborted, passive side
5100 					 * always hits here
5101 					 */
5102 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5103 					error = ECONNRESET;
5104 					/*
5105 					 * You get this once if you are
5106 					 * active open side
5107 					 */
5108 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5109 						/*
5110 						 * Remove flag if on the
5111 						 * active open side
5112 						 */
5113 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5114 					}
5115 				}
5116 				so->so_state &= ~(SS_ISCONNECTING |
5117 				    SS_ISDISCONNECTING |
5118 				    SS_ISCONFIRMING |
5119 				    SS_ISCONNECTED);
5120 				if (error == 0) {
5121 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5122 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5123 						error = ENOTCONN;
5124 					} else {
5125 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5126 					}
5127 				}
5128 				goto out;
5129 			}
5130 		}
5131 		error = sbwait(&so->so_rcv);
5132 		if (error) {
5133 			goto out;
5134 		}
5135 		held_length = 0;
5136 		goto restart_nosblocks;
5137 	} else if (so->so_rcv.sb_cc == 0) {
5138 		if (so->so_error) {
5139 			error = so->so_error;
5140 			if ((in_flags & MSG_PEEK) == 0)
5141 				so->so_error = 0;
5142 		} else {
5143 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5144 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5145 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5146 					/*
5147 					 * For active open side clear flags
5148 					 * for re-use passive open is
5149 					 * blocked by connect.
5150 					 */
5151 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5152 						/*
5153 						 * You were aborted, passive
5154 						 * side always hits here
5155 						 */
5156 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5157 						error = ECONNRESET;
5158 						/*
5159 						 * You get this once if you
5160 						 * are active open side
5161 						 */
5162 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5163 							/*
5164 							 * Remove flag if on
5165 							 * the active open
5166 							 * side
5167 							 */
5168 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5169 						}
5170 					}
5171 					so->so_state &= ~(SS_ISCONNECTING |
5172 					    SS_ISDISCONNECTING |
5173 					    SS_ISCONFIRMING |
5174 					    SS_ISCONNECTED);
5175 					if (error == 0) {
5176 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5177 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5178 							error = ENOTCONN;
5179 						} else {
5180 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5181 						}
5182 					}
5183 					goto out;
5184 				}
5185 			}
5186 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5187 			error = EWOULDBLOCK;
5188 		}
5189 		goto out;
5190 	}
5191 	if (hold_sblock == 1) {
5192 		SOCKBUF_UNLOCK(&so->so_rcv);
5193 		hold_sblock = 0;
5194 	}
5195 	/* we possibly have data we can read */
5196 	/* sa_ignore FREED_MEMORY */
5197 	control = TAILQ_FIRST(&inp->read_queue);
5198 	if (control == NULL) {
5199 		/*
5200 		 * This could be happening since the appender did the
5201 		 * increment but as not yet did the tailq insert onto the
5202 		 * read_queue
5203 		 */
5204 		if (hold_rlock == 0) {
5205 			SCTP_INP_READ_LOCK(inp);
5206 			hold_rlock = 1;
5207 		}
5208 		control = TAILQ_FIRST(&inp->read_queue);
5209 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5210 #ifdef INVARIANTS
5211 			panic("Huh, its non zero and nothing on control?");
5212 #endif
5213 			so->so_rcv.sb_cc = 0;
5214 		}
5215 		SCTP_INP_READ_UNLOCK(inp);
5216 		hold_rlock = 0;
5217 		goto restart;
5218 	}
5219 	if ((control->length == 0) &&
5220 	    (control->do_not_ref_stcb)) {
5221 		/*
5222 		 * Clean up code for freeing assoc that left behind a
5223 		 * pdapi.. maybe a peer in EEOR that just closed after
5224 		 * sending and never indicated a EOR.
5225 		 */
5226 		if (hold_rlock == 0) {
5227 			hold_rlock = 1;
5228 			SCTP_INP_READ_LOCK(inp);
5229 		}
5230 		control->held_length = 0;
5231 		if (control->data) {
5232 			/* Hmm there is data here .. fix */
5233 			struct mbuf *m_tmp;
5234 			int cnt = 0;
5235 
5236 			m_tmp = control->data;
5237 			while (m_tmp) {
5238 				cnt += SCTP_BUF_LEN(m_tmp);
5239 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5240 					control->tail_mbuf = m_tmp;
5241 					control->end_added = 1;
5242 				}
5243 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5244 			}
5245 			control->length = cnt;
5246 		} else {
5247 			/* remove it */
5248 			TAILQ_REMOVE(&inp->read_queue, control, next);
5249 			/* Add back any hiddend data */
5250 			sctp_free_remote_addr(control->whoFrom);
5251 			sctp_free_a_readq(stcb, control);
5252 		}
5253 		if (hold_rlock) {
5254 			hold_rlock = 0;
5255 			SCTP_INP_READ_UNLOCK(inp);
5256 		}
5257 		goto restart;
5258 	}
5259 	if (control->length == 0) {
5260 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5261 		    (filling_sinfo)) {
5262 			/* find a more suitable one then this */
5263 			ctl = TAILQ_NEXT(control, next);
5264 			while (ctl) {
5265 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5266 				    (ctl->some_taken ||
5267 				    (ctl->spec_flags & M_NOTIFICATION) ||
5268 				    ((ctl->do_not_ref_stcb == 0) &&
5269 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5270 				    ) {
5271 					/*-
5272 					 * If we have a different TCB next, and there is data
5273 					 * present. If we have already taken some (pdapi), OR we can
5274 					 * ref the tcb and no delivery as started on this stream, we
5275 					 * take it. Note we allow a notification on a different
5276 					 * assoc to be delivered..
5277 					 */
5278 					control = ctl;
5279 					goto found_one;
5280 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5281 					    (ctl->length) &&
5282 					    ((ctl->some_taken) ||
5283 					    ((ctl->do_not_ref_stcb == 0) &&
5284 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5285 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5286 				    ) {
5287 					/*-
5288 					 * If we have the same tcb, and there is data present, and we
5289 					 * have the strm interleave feature present. Then if we have
5290 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5291 					 * not started a delivery for this stream, we can take it.
5292 					 * Note we do NOT allow a notificaiton on the same assoc to
5293 					 * be delivered.
5294 					 */
5295 					control = ctl;
5296 					goto found_one;
5297 				}
5298 				ctl = TAILQ_NEXT(ctl, next);
5299 			}
5300 		}
5301 		/*
5302 		 * if we reach here, not suitable replacement is available
5303 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5304 		 * into the our held count, and its time to sleep again.
5305 		 */
5306 		held_length = so->so_rcv.sb_cc;
5307 		control->held_length = so->so_rcv.sb_cc;
5308 		goto restart;
5309 	}
5310 	/* Clear the held length since there is something to read */
5311 	control->held_length = 0;
5312 	if (hold_rlock) {
5313 		SCTP_INP_READ_UNLOCK(inp);
5314 		hold_rlock = 0;
5315 	}
5316 found_one:
5317 	/*
5318 	 * If we reach here, control has a some data for us to read off.
5319 	 * Note that stcb COULD be NULL.
5320 	 */
5321 	control->some_taken++;
5322 	if (hold_sblock) {
5323 		SOCKBUF_UNLOCK(&so->so_rcv);
5324 		hold_sblock = 0;
5325 	}
5326 	stcb = control->stcb;
5327 	if (stcb) {
5328 		if ((control->do_not_ref_stcb == 0) &&
5329 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5330 			if (freecnt_applied == 0)
5331 				stcb = NULL;
5332 		} else if (control->do_not_ref_stcb == 0) {
5333 			/* you can't free it on me please */
5334 			/*
5335 			 * The lock on the socket buffer protects us so the
5336 			 * free code will stop. But since we used the
5337 			 * socketbuf lock and the sender uses the tcb_lock
5338 			 * to increment, we need to use the atomic add to
5339 			 * the refcnt
5340 			 */
5341 			if (freecnt_applied) {
5342 #ifdef INVARIANTS
5343 				panic("refcnt already incremented");
5344 #else
5345 				printf("refcnt already incremented?\n");
5346 #endif
5347 			} else {
5348 				atomic_add_int(&stcb->asoc.refcnt, 1);
5349 				freecnt_applied = 1;
5350 			}
5351 			/*
5352 			 * Setup to remember how much we have not yet told
5353 			 * the peer our rwnd has opened up. Note we grab the
5354 			 * value from the tcb from last time. Note too that
5355 			 * sack sending clears this when a sack is sent,
5356 			 * which is fine. Once we hit the rwnd_req, we then
5357 			 * will go to the sctp_user_rcvd() that will not
5358 			 * lock until it KNOWs it MUST send a WUP-SACK.
5359 			 */
5360 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5361 			stcb->freed_by_sorcv_sincelast = 0;
5362 		}
5363 	}
5364 	if (stcb &&
5365 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5366 	    control->do_not_ref_stcb == 0) {
5367 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5368 	}
5369 	/* First lets get off the sinfo and sockaddr info */
5370 	if ((sinfo) && filling_sinfo) {
5371 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5372 		nxt = TAILQ_NEXT(control, next);
5373 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5374 			struct sctp_extrcvinfo *s_extra;
5375 
5376 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5377 			if ((nxt) &&
5378 			    (nxt->length)) {
5379 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5380 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5381 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5382 				}
5383 				if (nxt->spec_flags & M_NOTIFICATION) {
5384 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5385 				}
5386 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5387 				s_extra->sreinfo_next_length = nxt->length;
5388 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5389 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5390 				if (nxt->tail_mbuf != NULL) {
5391 					if (nxt->end_added) {
5392 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5393 					}
5394 				}
5395 			} else {
5396 				/*
5397 				 * we explicitly 0 this, since the memcpy
5398 				 * got some other things beyond the older
5399 				 * sinfo_ that is on the control's structure
5400 				 * :-D
5401 				 */
5402 				nxt = NULL;
5403 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5404 				s_extra->sreinfo_next_aid = 0;
5405 				s_extra->sreinfo_next_length = 0;
5406 				s_extra->sreinfo_next_ppid = 0;
5407 				s_extra->sreinfo_next_stream = 0;
5408 			}
5409 		}
5410 		/*
5411 		 * update off the real current cum-ack, if we have an stcb.
5412 		 */
5413 		if ((control->do_not_ref_stcb == 0) && stcb)
5414 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5415 		/*
5416 		 * mask off the high bits, we keep the actual chunk bits in
5417 		 * there.
5418 		 */
5419 		sinfo->sinfo_flags &= 0x00ff;
5420 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5421 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5422 		}
5423 	}
5424 #ifdef SCTP_ASOCLOG_OF_TSNS
5425 	{
5426 		int index, newindex;
5427 		struct sctp_pcbtsn_rlog *entry;
5428 
5429 		do {
5430 			index = inp->readlog_index;
5431 			newindex = index + 1;
5432 			if (newindex >= SCTP_READ_LOG_SIZE) {
5433 				newindex = 0;
5434 			}
5435 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5436 		entry = &inp->readlog[index];
5437 		entry->vtag = control->sinfo_assoc_id;
5438 		entry->strm = control->sinfo_stream;
5439 		entry->seq = control->sinfo_ssn;
5440 		entry->sz = control->length;
5441 		entry->flgs = control->sinfo_flags;
5442 	}
5443 #endif
5444 	if (fromlen && from) {
5445 		struct sockaddr *to;
5446 
5447 #ifdef INET
5448 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5449 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5450 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5451 #else
5452 		/* No AF_INET use AF_INET6 */
5453 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5454 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5455 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5456 #endif
5457 
5458 		to = from;
5459 #if defined(INET) && defined(INET6)
5460 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5461 		    (to->sa_family == AF_INET) &&
5462 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5463 			struct sockaddr_in *sin;
5464 			struct sockaddr_in6 sin6;
5465 
5466 			sin = (struct sockaddr_in *)to;
5467 			bzero(&sin6, sizeof(sin6));
5468 			sin6.sin6_family = AF_INET6;
5469 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5470 			sin6.sin6_addr.s6_addr32[2] = ntohl(0x0000ffff);
5471 			bcopy(&sin->sin_addr,
5472 			    &sin6.sin6_addr.s6_addr32[3],
5473 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5474 			sin6.sin6_port = sin->sin_port;
5475 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5476 		}
5477 #endif
5478 #if defined(INET6)
5479 		{
5480 			struct sockaddr_in6 lsa6, *to6;
5481 
5482 			to6 = (struct sockaddr_in6 *)to;
5483 			sctp_recover_scope_mac(to6, (&lsa6));
5484 		}
5485 #endif
5486 	}
5487 	/* now copy out what data we can */
5488 	if (mp == NULL) {
5489 		/* copy out each mbuf in the chain up to length */
5490 get_more_data:
5491 		m = control->data;
5492 		while (m) {
5493 			/* Move out all we can */
5494 			cp_len = (int)uio->uio_resid;
5495 			my_len = (int)SCTP_BUF_LEN(m);
5496 			if (cp_len > my_len) {
5497 				/* not enough in this buf */
5498 				cp_len = my_len;
5499 			}
5500 			if (hold_rlock) {
5501 				SCTP_INP_READ_UNLOCK(inp);
5502 				hold_rlock = 0;
5503 			}
5504 			if (cp_len > 0)
5505 				error = uiomove(mtod(m, char *), cp_len, uio);
5506 			/* re-read */
5507 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5508 				goto release;
5509 			}
5510 			if ((control->do_not_ref_stcb == 0) && stcb &&
5511 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5512 				no_rcv_needed = 1;
5513 			}
5514 			if (error) {
5515 				/* error we are out of here */
5516 				goto release;
5517 			}
5518 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5519 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5520 			    ((control->end_added == 0) ||
5521 			    (control->end_added &&
5522 			    (TAILQ_NEXT(control, next) == NULL)))
5523 			    ) {
5524 				SCTP_INP_READ_LOCK(inp);
5525 				hold_rlock = 1;
5526 			}
5527 			if (cp_len == SCTP_BUF_LEN(m)) {
5528 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5529 				    (control->end_added)) {
5530 					out_flags |= MSG_EOR;
5531 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5532 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5533 				}
5534 				if (control->spec_flags & M_NOTIFICATION) {
5535 					out_flags |= MSG_NOTIFICATION;
5536 				}
5537 				/* we ate up the mbuf */
5538 				if (in_flags & MSG_PEEK) {
5539 					/* just looking */
5540 					m = SCTP_BUF_NEXT(m);
5541 					copied_so_far += cp_len;
5542 				} else {
5543 					/* dispose of the mbuf */
5544 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5545 						sctp_sblog(&so->so_rcv,
5546 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5547 					}
5548 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5549 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5550 						sctp_sblog(&so->so_rcv,
5551 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5552 					}
5553 					embuf = m;
5554 					copied_so_far += cp_len;
5555 					freed_so_far += cp_len;
5556 					freed_so_far += MSIZE;
5557 					atomic_subtract_int(&control->length, cp_len);
5558 					control->data = sctp_m_free(m);
5559 					m = control->data;
5560 					/*
5561 					 * been through it all, must hold sb
5562 					 * lock ok to null tail
5563 					 */
5564 					if (control->data == NULL) {
5565 #ifdef INVARIANTS
5566 						if ((control->end_added == 0) ||
5567 						    (TAILQ_NEXT(control, next) == NULL)) {
5568 							/*
5569 							 * If the end is not
5570 							 * added, OR the
5571 							 * next is NOT null
5572 							 * we MUST have the
5573 							 * lock.
5574 							 */
5575 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5576 								panic("Hmm we don't own the lock?");
5577 							}
5578 						}
5579 #endif
5580 						control->tail_mbuf = NULL;
5581 #ifdef INVARIANTS
5582 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5583 							panic("end_added, nothing left and no MSG_EOR");
5584 						}
5585 #endif
5586 					}
5587 				}
5588 			} else {
5589 				/* Do we need to trim the mbuf? */
5590 				if (control->spec_flags & M_NOTIFICATION) {
5591 					out_flags |= MSG_NOTIFICATION;
5592 				}
5593 				if ((in_flags & MSG_PEEK) == 0) {
5594 					SCTP_BUF_RESV_UF(m, cp_len);
5595 					SCTP_BUF_LEN(m) -= cp_len;
5596 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5597 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5598 					}
5599 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5600 					if ((control->do_not_ref_stcb == 0) &&
5601 					    stcb) {
5602 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5603 					}
5604 					copied_so_far += cp_len;
5605 					embuf = m;
5606 					freed_so_far += cp_len;
5607 					freed_so_far += MSIZE;
5608 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5609 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5610 						    SCTP_LOG_SBRESULT, 0);
5611 					}
5612 					atomic_subtract_int(&control->length, cp_len);
5613 				} else {
5614 					copied_so_far += cp_len;
5615 				}
5616 			}
5617 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5618 				break;
5619 			}
5620 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5621 			    (control->do_not_ref_stcb == 0) &&
5622 			    (freed_so_far >= rwnd_req)) {
5623 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5624 			}
5625 		}		/* end while(m) */
5626 		/*
5627 		 * At this point we have looked at it all and we either have
5628 		 * a MSG_EOR/or read all the user wants... <OR>
5629 		 * control->length == 0.
5630 		 */
5631 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5632 			/* we are done with this control */
5633 			if (control->length == 0) {
5634 				if (control->data) {
5635 #ifdef INVARIANTS
5636 					panic("control->data not null at read eor?");
5637 #else
5638 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5639 					sctp_m_freem(control->data);
5640 					control->data = NULL;
5641 #endif
5642 				}
5643 		done_with_control:
5644 				if (TAILQ_NEXT(control, next) == NULL) {
5645 					/*
5646 					 * If we don't have a next we need a
5647 					 * lock, if there is a next
5648 					 * interrupt is filling ahead of us
5649 					 * and we don't need a lock to
5650 					 * remove this guy (which is the
5651 					 * head of the queue).
5652 					 */
5653 					if (hold_rlock == 0) {
5654 						SCTP_INP_READ_LOCK(inp);
5655 						hold_rlock = 1;
5656 					}
5657 				}
5658 				TAILQ_REMOVE(&inp->read_queue, control, next);
5659 				/* Add back any hiddend data */
5660 				if (control->held_length) {
5661 					held_length = 0;
5662 					control->held_length = 0;
5663 					wakeup_read_socket = 1;
5664 				}
5665 				if (control->aux_data) {
5666 					sctp_m_free(control->aux_data);
5667 					control->aux_data = NULL;
5668 				}
5669 				no_rcv_needed = control->do_not_ref_stcb;
5670 				sctp_free_remote_addr(control->whoFrom);
5671 				control->data = NULL;
5672 				sctp_free_a_readq(stcb, control);
5673 				control = NULL;
5674 				if ((freed_so_far >= rwnd_req) &&
5675 				    (no_rcv_needed == 0))
5676 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5677 
5678 			} else {
5679 				/*
5680 				 * The user did not read all of this
5681 				 * message, turn off the returned MSG_EOR
5682 				 * since we are leaving more behind on the
5683 				 * control to read.
5684 				 */
5685 #ifdef INVARIANTS
5686 				if (control->end_added &&
5687 				    (control->data == NULL) &&
5688 				    (control->tail_mbuf == NULL)) {
5689 					panic("Gak, control->length is corrupt?");
5690 				}
5691 #endif
5692 				no_rcv_needed = control->do_not_ref_stcb;
5693 				out_flags &= ~MSG_EOR;
5694 			}
5695 		}
5696 		if (out_flags & MSG_EOR) {
5697 			goto release;
5698 		}
5699 		if ((uio->uio_resid == 0) ||
5700 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5701 		    ) {
5702 			goto release;
5703 		}
5704 		/*
5705 		 * If I hit here the receiver wants more and this message is
5706 		 * NOT done (pd-api). So two questions. Can we block? if not
5707 		 * we are done. Did the user NOT set MSG_WAITALL?
5708 		 */
5709 		if (block_allowed == 0) {
5710 			goto release;
5711 		}
5712 		/*
5713 		 * We need to wait for more data a few things: - We don't
5714 		 * sbunlock() so we don't get someone else reading. - We
5715 		 * must be sure to account for the case where what is added
5716 		 * is NOT to our control when we wakeup.
5717 		 */
5718 
5719 		/*
5720 		 * Do we need to tell the transport a rwnd update might be
5721 		 * needed before we go to sleep?
5722 		 */
5723 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5724 		    ((freed_so_far >= rwnd_req) &&
5725 		    (control->do_not_ref_stcb == 0) &&
5726 		    (no_rcv_needed == 0))) {
5727 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5728 		}
5729 wait_some_more:
5730 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5731 			goto release;
5732 		}
5733 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5734 			goto release;
5735 
5736 		if (hold_rlock == 1) {
5737 			SCTP_INP_READ_UNLOCK(inp);
5738 			hold_rlock = 0;
5739 		}
5740 		if (hold_sblock == 0) {
5741 			SOCKBUF_LOCK(&so->so_rcv);
5742 			hold_sblock = 1;
5743 		}
5744 		if ((copied_so_far) && (control->length == 0) &&
5745 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5746 		    ) {
5747 			goto release;
5748 		}
5749 		if (so->so_rcv.sb_cc <= control->held_length) {
5750 			error = sbwait(&so->so_rcv);
5751 			if (error) {
5752 				goto release;
5753 			}
5754 			control->held_length = 0;
5755 		}
5756 		if (hold_sblock) {
5757 			SOCKBUF_UNLOCK(&so->so_rcv);
5758 			hold_sblock = 0;
5759 		}
5760 		if (control->length == 0) {
5761 			/* still nothing here */
5762 			if (control->end_added == 1) {
5763 				/* he aborted, or is done i.e.did a shutdown */
5764 				out_flags |= MSG_EOR;
5765 				if (control->pdapi_aborted) {
5766 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5767 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5768 
5769 					out_flags |= MSG_TRUNC;
5770 				} else {
5771 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5772 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5773 				}
5774 				goto done_with_control;
5775 			}
5776 			if (so->so_rcv.sb_cc > held_length) {
5777 				control->held_length = so->so_rcv.sb_cc;
5778 				held_length = 0;
5779 			}
5780 			goto wait_some_more;
5781 		} else if (control->data == NULL) {
5782 			/*
5783 			 * we must re-sync since data is probably being
5784 			 * added
5785 			 */
5786 			SCTP_INP_READ_LOCK(inp);
5787 			if ((control->length > 0) && (control->data == NULL)) {
5788 				/*
5789 				 * big trouble.. we have the lock and its
5790 				 * corrupt?
5791 				 */
5792 #ifdef INVARIANTS
5793 				panic("Impossible data==NULL length !=0");
5794 #endif
5795 				out_flags |= MSG_EOR;
5796 				out_flags |= MSG_TRUNC;
5797 				control->length = 0;
5798 				SCTP_INP_READ_UNLOCK(inp);
5799 				goto done_with_control;
5800 			}
5801 			SCTP_INP_READ_UNLOCK(inp);
5802 			/* We will fall around to get more data */
5803 		}
5804 		goto get_more_data;
5805 	} else {
5806 		/*-
5807 		 * Give caller back the mbuf chain,
5808 		 * store in uio_resid the length
5809 		 */
5810 		wakeup_read_socket = 0;
5811 		if ((control->end_added == 0) ||
5812 		    (TAILQ_NEXT(control, next) == NULL)) {
5813 			/* Need to get rlock */
5814 			if (hold_rlock == 0) {
5815 				SCTP_INP_READ_LOCK(inp);
5816 				hold_rlock = 1;
5817 			}
5818 		}
5819 		if (control->end_added) {
5820 			out_flags |= MSG_EOR;
5821 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5822 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5823 		}
5824 		if (control->spec_flags & M_NOTIFICATION) {
5825 			out_flags |= MSG_NOTIFICATION;
5826 		}
5827 		uio->uio_resid = control->length;
5828 		*mp = control->data;
5829 		m = control->data;
5830 		while (m) {
5831 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5832 				sctp_sblog(&so->so_rcv,
5833 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5834 			}
5835 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5836 			freed_so_far += SCTP_BUF_LEN(m);
5837 			freed_so_far += MSIZE;
5838 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5839 				sctp_sblog(&so->so_rcv,
5840 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5841 			}
5842 			m = SCTP_BUF_NEXT(m);
5843 		}
5844 		control->data = control->tail_mbuf = NULL;
5845 		control->length = 0;
5846 		if (out_flags & MSG_EOR) {
5847 			/* Done with this control */
5848 			goto done_with_control;
5849 		}
5850 	}
5851 release:
5852 	if (hold_rlock == 1) {
5853 		SCTP_INP_READ_UNLOCK(inp);
5854 		hold_rlock = 0;
5855 	}
5856 	if (hold_sblock == 1) {
5857 		SOCKBUF_UNLOCK(&so->so_rcv);
5858 		hold_sblock = 0;
5859 	}
5860 	sbunlock(&so->so_rcv);
5861 	sockbuf_lock = 0;
5862 
5863 release_unlocked:
5864 	if (hold_sblock) {
5865 		SOCKBUF_UNLOCK(&so->so_rcv);
5866 		hold_sblock = 0;
5867 	}
5868 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5869 		if ((freed_so_far >= rwnd_req) &&
5870 		    (control && (control->do_not_ref_stcb == 0)) &&
5871 		    (no_rcv_needed == 0))
5872 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5873 	}
5874 	if (msg_flags)
5875 		*msg_flags = out_flags;
5876 out:
5877 	if (((out_flags & MSG_EOR) == 0) &&
5878 	    ((in_flags & MSG_PEEK) == 0) &&
5879 	    (sinfo) &&
5880 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5881 		struct sctp_extrcvinfo *s_extra;
5882 
5883 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5884 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5885 	}
5886 	if (hold_rlock == 1) {
5887 		SCTP_INP_READ_UNLOCK(inp);
5888 		hold_rlock = 0;
5889 	}
5890 	if (hold_sblock) {
5891 		SOCKBUF_UNLOCK(&so->so_rcv);
5892 		hold_sblock = 0;
5893 	}
5894 	if (sockbuf_lock) {
5895 		sbunlock(&so->so_rcv);
5896 	}
5897 	if (freecnt_applied) {
5898 		/*
5899 		 * The lock on the socket buffer protects us so the free
5900 		 * code will stop. But since we used the socketbuf lock and
5901 		 * the sender uses the tcb_lock to increment, we need to use
5902 		 * the atomic add to the refcnt.
5903 		 */
5904 		if (stcb == NULL) {
5905 			panic("stcb for refcnt has gone NULL?");
5906 		}
5907 		atomic_add_int(&stcb->asoc.refcnt, -1);
5908 		freecnt_applied = 0;
5909 		/* Save the value back for next time */
5910 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5911 	}
5912 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5913 		if (stcb) {
5914 			sctp_misc_ints(SCTP_SORECV_DONE,
5915 			    freed_so_far,
5916 			    ((uio) ? (slen - uio->uio_resid) : slen),
5917 			    stcb->asoc.my_rwnd,
5918 			    so->so_rcv.sb_cc);
5919 		} else {
5920 			sctp_misc_ints(SCTP_SORECV_DONE,
5921 			    freed_so_far,
5922 			    ((uio) ? (slen - uio->uio_resid) : slen),
5923 			    0,
5924 			    so->so_rcv.sb_cc);
5925 		}
5926 	}
5927 	if (wakeup_read_socket) {
5928 		sctp_sorwakeup(inp, so);
5929 	}
5930 	return (error);
5931 }
5932 
5933 
5934 #ifdef SCTP_MBUF_LOGGING
5935 struct mbuf *
5936 sctp_m_free(struct mbuf *m)
5937 {
5938 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
5939 		if (SCTP_BUF_IS_EXTENDED(m)) {
5940 			sctp_log_mb(m, SCTP_MBUF_IFREE);
5941 		}
5942 	}
5943 	return (m_free(m));
5944 }
5945 
5946 void
5947 sctp_m_freem(struct mbuf *mb)
5948 {
5949 	while (mb != NULL)
5950 		mb = sctp_m_free(mb);
5951 }
5952 
5953 #endif
5954 
5955 int
5956 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
5957 {
5958 	/*
5959 	 * Given a local address. For all associations that holds the
5960 	 * address, request a peer-set-primary.
5961 	 */
5962 	struct sctp_ifa *ifa;
5963 	struct sctp_laddr *wi;
5964 
5965 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
5966 	if (ifa == NULL) {
5967 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
5968 		return (EADDRNOTAVAIL);
5969 	}
5970 	/*
5971 	 * Now that we have the ifa we must awaken the iterator with this
5972 	 * message.
5973 	 */
5974 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
5975 	if (wi == NULL) {
5976 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
5977 		return (ENOMEM);
5978 	}
5979 	/* Now incr the count and int wi structure */
5980 	SCTP_INCR_LADDR_COUNT();
5981 	bzero(wi, sizeof(*wi));
5982 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
5983 	wi->ifa = ifa;
5984 	wi->action = SCTP_SET_PRIM_ADDR;
5985 	atomic_add_int(&ifa->refcount, 1);
5986 
5987 	/* Now add it to the work queue */
5988 	SCTP_IPI_ITERATOR_WQ_LOCK();
5989 	/*
5990 	 * Should this really be a tailq? As it is we will process the
5991 	 * newest first :-0
5992 	 */
5993 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
5994 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
5995 	    (struct sctp_inpcb *)NULL,
5996 	    (struct sctp_tcb *)NULL,
5997 	    (struct sctp_nets *)NULL);
5998 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
5999 	return (0);
6000 }
6001 
6002 
6003 
6004 
6005 int
6006 sctp_soreceive(struct socket *so,
6007     struct sockaddr **psa,
6008     struct uio *uio,
6009     struct mbuf **mp0,
6010     struct mbuf **controlp,
6011     int *flagsp)
6012 {
6013 	int error, fromlen;
6014 	uint8_t sockbuf[256];
6015 	struct sockaddr *from;
6016 	struct sctp_extrcvinfo sinfo;
6017 	int filling_sinfo = 1;
6018 	struct sctp_inpcb *inp;
6019 
6020 	inp = (struct sctp_inpcb *)so->so_pcb;
6021 	/* pickup the assoc we are reading from */
6022 	if (inp == NULL) {
6023 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6024 		return (EINVAL);
6025 	}
6026 	if ((sctp_is_feature_off(inp,
6027 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6028 	    (controlp == NULL)) {
6029 		/* user does not want the sndrcv ctl */
6030 		filling_sinfo = 0;
6031 	}
6032 	if (psa) {
6033 		from = (struct sockaddr *)sockbuf;
6034 		fromlen = sizeof(sockbuf);
6035 		from->sa_len = 0;
6036 	} else {
6037 		from = NULL;
6038 		fromlen = 0;
6039 	}
6040 
6041 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6042 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6043 	if ((controlp) && (filling_sinfo)) {
6044 		/* copy back the sinfo in a CMSG format */
6045 		if (filling_sinfo)
6046 			*controlp = sctp_build_ctl_nchunk(inp,
6047 			    (struct sctp_sndrcvinfo *)&sinfo);
6048 		else
6049 			*controlp = NULL;
6050 	}
6051 	if (psa) {
6052 		/* copy back the address info */
6053 		if (from && from->sa_len) {
6054 			*psa = sodupsockaddr(from, M_NOWAIT);
6055 		} else {
6056 			*psa = NULL;
6057 		}
6058 	}
6059 	return (error);
6060 }
6061 
6062 
6063 int
6064 sctp_l_soreceive(struct socket *so,
6065     struct sockaddr **name,
6066     struct uio *uio,
6067     char **controlp,
6068     int *controllen,
6069     int *flag)
6070 {
6071 	int error, fromlen;
6072 	uint8_t sockbuf[256];
6073 	struct sockaddr *from;
6074 	struct sctp_extrcvinfo sinfo;
6075 	int filling_sinfo = 1;
6076 	struct sctp_inpcb *inp;
6077 
6078 	inp = (struct sctp_inpcb *)so->so_pcb;
6079 	/* pickup the assoc we are reading from */
6080 	if (inp == NULL) {
6081 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6082 		return (EINVAL);
6083 	}
6084 	if ((sctp_is_feature_off(inp,
6085 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6086 	    (controlp == NULL)) {
6087 		/* user does not want the sndrcv ctl */
6088 		filling_sinfo = 0;
6089 	}
6090 	if (name) {
6091 		from = (struct sockaddr *)sockbuf;
6092 		fromlen = sizeof(sockbuf);
6093 		from->sa_len = 0;
6094 	} else {
6095 		from = NULL;
6096 		fromlen = 0;
6097 	}
6098 
6099 	error = sctp_sorecvmsg(so, uio,
6100 	    (struct mbuf **)NULL,
6101 	    from, fromlen, flag,
6102 	    (struct sctp_sndrcvinfo *)&sinfo,
6103 	    filling_sinfo);
6104 	if ((controlp) && (filling_sinfo)) {
6105 		/*
6106 		 * copy back the sinfo in a CMSG format note that the caller
6107 		 * has reponsibility for freeing the memory.
6108 		 */
6109 		if (filling_sinfo)
6110 			*controlp = sctp_build_ctl_cchunk(inp,
6111 			    controllen,
6112 			    (struct sctp_sndrcvinfo *)&sinfo);
6113 	}
6114 	if (name) {
6115 		/* copy back the address info */
6116 		if (from && from->sa_len) {
6117 			*name = sodupsockaddr(from, M_WAIT);
6118 		} else {
6119 			*name = NULL;
6120 		}
6121 	}
6122 	return (error);
6123 }
6124 
6125 
6126 
6127 
6128 
6129 
6130 
6131 int
6132 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6133     int totaddr, int *error)
6134 {
6135 	int added = 0;
6136 	int i;
6137 	struct sctp_inpcb *inp;
6138 	struct sockaddr *sa;
6139 	size_t incr = 0;
6140 
6141 	sa = addr;
6142 	inp = stcb->sctp_ep;
6143 	*error = 0;
6144 	for (i = 0; i < totaddr; i++) {
6145 		if (sa->sa_family == AF_INET) {
6146 			incr = sizeof(struct sockaddr_in);
6147 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6148 				/* assoc gone no un-lock */
6149 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6150 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6151 				*error = ENOBUFS;
6152 				goto out_now;
6153 			}
6154 			added++;
6155 		} else if (sa->sa_family == AF_INET6) {
6156 			incr = sizeof(struct sockaddr_in6);
6157 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6158 				/* assoc gone no un-lock */
6159 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6160 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6161 				*error = ENOBUFS;
6162 				goto out_now;
6163 			}
6164 			added++;
6165 		}
6166 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6167 	}
6168 out_now:
6169 	return (added);
6170 }
6171 
6172 struct sctp_tcb *
6173 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6174     int *totaddr, int *num_v4, int *num_v6, int *error,
6175     int limit, int *bad_addr)
6176 {
6177 	struct sockaddr *sa;
6178 	struct sctp_tcb *stcb = NULL;
6179 	size_t incr, at, i;
6180 
6181 	at = incr = 0;
6182 	sa = addr;
6183 	*error = *num_v6 = *num_v4 = 0;
6184 	/* account and validate addresses */
6185 	for (i = 0; i < (size_t)*totaddr; i++) {
6186 		if (sa->sa_family == AF_INET) {
6187 			(*num_v4) += 1;
6188 			incr = sizeof(struct sockaddr_in);
6189 			if (sa->sa_len != incr) {
6190 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6191 				*error = EINVAL;
6192 				*bad_addr = 1;
6193 				return (NULL);
6194 			}
6195 		} else if (sa->sa_family == AF_INET6) {
6196 			struct sockaddr_in6 *sin6;
6197 
6198 			sin6 = (struct sockaddr_in6 *)sa;
6199 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6200 				/* Must be non-mapped for connectx */
6201 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6202 				*error = EINVAL;
6203 				*bad_addr = 1;
6204 				return (NULL);
6205 			}
6206 			(*num_v6) += 1;
6207 			incr = sizeof(struct sockaddr_in6);
6208 			if (sa->sa_len != incr) {
6209 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6210 				*error = EINVAL;
6211 				*bad_addr = 1;
6212 				return (NULL);
6213 			}
6214 		} else {
6215 			*totaddr = i;
6216 			/* we are done */
6217 			break;
6218 		}
6219 		SCTP_INP_INCR_REF(inp);
6220 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6221 		if (stcb != NULL) {
6222 			/* Already have or am bring up an association */
6223 			return (stcb);
6224 		} else {
6225 			SCTP_INP_DECR_REF(inp);
6226 		}
6227 		if ((at + incr) > (size_t)limit) {
6228 			*totaddr = i;
6229 			break;
6230 		}
6231 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6232 	}
6233 	return ((struct sctp_tcb *)NULL);
6234 }
6235 
6236 /*
6237  * sctp_bindx(ADD) for one address.
6238  * assumes all arguments are valid/checked by caller.
6239  */
6240 void
6241 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6242     struct sockaddr *sa, sctp_assoc_t assoc_id,
6243     uint32_t vrf_id, int *error, void *p)
6244 {
6245 	struct sockaddr *addr_touse;
6246 
6247 #ifdef INET6
6248 	struct sockaddr_in sin;
6249 
6250 #endif
6251 
6252 	/* see if we're bound all already! */
6253 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6254 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6255 		*error = EINVAL;
6256 		return;
6257 	}
6258 	addr_touse = sa;
6259 #if defined(INET6)
6260 	if (sa->sa_family == AF_INET6) {
6261 		struct sockaddr_in6 *sin6;
6262 
6263 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6264 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6265 			*error = EINVAL;
6266 			return;
6267 		}
6268 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6269 			/* can only bind v6 on PF_INET6 sockets */
6270 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6271 			*error = EINVAL;
6272 			return;
6273 		}
6274 		sin6 = (struct sockaddr_in6 *)addr_touse;
6275 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6276 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6277 			    SCTP_IPV6_V6ONLY(inp)) {
6278 				/* can't bind v4-mapped on PF_INET sockets */
6279 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6280 				*error = EINVAL;
6281 				return;
6282 			}
6283 			in6_sin6_2_sin(&sin, sin6);
6284 			addr_touse = (struct sockaddr *)&sin;
6285 		}
6286 	}
6287 #endif
6288 	if (sa->sa_family == AF_INET) {
6289 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6290 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6291 			*error = EINVAL;
6292 			return;
6293 		}
6294 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6295 		    SCTP_IPV6_V6ONLY(inp)) {
6296 			/* can't bind v4 on PF_INET sockets */
6297 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6298 			*error = EINVAL;
6299 			return;
6300 		}
6301 	}
6302 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6303 		if (p == NULL) {
6304 			/* Can't get proc for Net/Open BSD */
6305 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6306 			*error = EINVAL;
6307 			return;
6308 		}
6309 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6310 		return;
6311 	}
6312 	/*
6313 	 * No locks required here since bind and mgmt_ep_sa all do their own
6314 	 * locking. If we do something for the FIX: below we may need to
6315 	 * lock in that case.
6316 	 */
6317 	if (assoc_id == 0) {
6318 		/* add the address */
6319 		struct sctp_inpcb *lep;
6320 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6321 
6322 		/* validate the incoming port */
6323 		if ((lsin->sin_port != 0) &&
6324 		    (lsin->sin_port != inp->sctp_lport)) {
6325 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6326 			*error = EINVAL;
6327 			return;
6328 		} else {
6329 			/* user specified 0 port, set it to existing port */
6330 			lsin->sin_port = inp->sctp_lport;
6331 		}
6332 
6333 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6334 		if (lep != NULL) {
6335 			/*
6336 			 * We must decrement the refcount since we have the
6337 			 * ep already and are binding. No remove going on
6338 			 * here.
6339 			 */
6340 			SCTP_INP_DECR_REF(inp);
6341 		}
6342 		if (lep == inp) {
6343 			/* already bound to it.. ok */
6344 			return;
6345 		} else if (lep == NULL) {
6346 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6347 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6348 			    SCTP_ADD_IP_ADDRESS,
6349 			    vrf_id, NULL);
6350 		} else {
6351 			*error = EADDRINUSE;
6352 		}
6353 		if (*error)
6354 			return;
6355 	} else {
6356 		/*
6357 		 * FIX: decide whether we allow assoc based bindx
6358 		 */
6359 	}
6360 }
6361 
6362 /*
6363  * sctp_bindx(DELETE) for one address.
6364  * assumes all arguments are valid/checked by caller.
6365  */
6366 void
6367 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6368     struct sockaddr *sa, sctp_assoc_t assoc_id,
6369     uint32_t vrf_id, int *error)
6370 {
6371 	struct sockaddr *addr_touse;
6372 
6373 #ifdef INET6
6374 	struct sockaddr_in sin;
6375 
6376 #endif
6377 
6378 	/* see if we're bound all already! */
6379 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6380 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6381 		*error = EINVAL;
6382 		return;
6383 	}
6384 	addr_touse = sa;
6385 #if defined(INET6)
6386 	if (sa->sa_family == AF_INET6) {
6387 		struct sockaddr_in6 *sin6;
6388 
6389 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6390 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6391 			*error = EINVAL;
6392 			return;
6393 		}
6394 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6395 			/* can only bind v6 on PF_INET6 sockets */
6396 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6397 			*error = EINVAL;
6398 			return;
6399 		}
6400 		sin6 = (struct sockaddr_in6 *)addr_touse;
6401 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6402 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6403 			    SCTP_IPV6_V6ONLY(inp)) {
6404 				/* can't bind mapped-v4 on PF_INET sockets */
6405 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6406 				*error = EINVAL;
6407 				return;
6408 			}
6409 			in6_sin6_2_sin(&sin, sin6);
6410 			addr_touse = (struct sockaddr *)&sin;
6411 		}
6412 	}
6413 #endif
6414 	if (sa->sa_family == AF_INET) {
6415 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6416 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6417 			*error = EINVAL;
6418 			return;
6419 		}
6420 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6421 		    SCTP_IPV6_V6ONLY(inp)) {
6422 			/* can't bind v4 on PF_INET sockets */
6423 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6424 			*error = EINVAL;
6425 			return;
6426 		}
6427 	}
6428 	/*
6429 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6430 	 * below is ever changed we may need to lock before calling
6431 	 * association level binding.
6432 	 */
6433 	if (assoc_id == 0) {
6434 		/* delete the address */
6435 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6436 		    SCTP_DEL_IP_ADDRESS,
6437 		    vrf_id, NULL);
6438 	} else {
6439 		/*
6440 		 * FIX: decide whether we allow assoc based bindx
6441 		 */
6442 	}
6443 }
6444 
6445 /*
6446  * returns the valid local address count for an assoc, taking into account
6447  * all scoping rules
6448  */
6449 int
6450 sctp_local_addr_count(struct sctp_tcb *stcb)
6451 {
6452 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6453 	int ipv4_addr_legal, ipv6_addr_legal;
6454 	struct sctp_vrf *vrf;
6455 	struct sctp_ifn *sctp_ifn;
6456 	struct sctp_ifa *sctp_ifa;
6457 	int count = 0;
6458 
6459 	/* Turn on all the appropriate scopes */
6460 	loopback_scope = stcb->asoc.loopback_scope;
6461 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6462 	local_scope = stcb->asoc.local_scope;
6463 	site_scope = stcb->asoc.site_scope;
6464 	ipv4_addr_legal = ipv6_addr_legal = 0;
6465 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6466 		ipv6_addr_legal = 1;
6467 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6468 			ipv4_addr_legal = 1;
6469 		}
6470 	} else {
6471 		ipv4_addr_legal = 1;
6472 	}
6473 
6474 	SCTP_IPI_ADDR_RLOCK();
6475 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6476 	if (vrf == NULL) {
6477 		/* no vrf, no addresses */
6478 		SCTP_IPI_ADDR_RUNLOCK();
6479 		return (0);
6480 	}
6481 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6482 		/*
6483 		 * bound all case: go through all ifns on the vrf
6484 		 */
6485 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6486 			if ((loopback_scope == 0) &&
6487 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6488 				continue;
6489 			}
6490 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6491 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6492 					continue;
6493 				switch (sctp_ifa->address.sa.sa_family) {
6494 				case AF_INET:
6495 					if (ipv4_addr_legal) {
6496 						struct sockaddr_in *sin;
6497 
6498 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6499 						if (sin->sin_addr.s_addr == 0) {
6500 							/*
6501 							 * skip unspecified
6502 							 * addrs
6503 							 */
6504 							continue;
6505 						}
6506 						if ((ipv4_local_scope == 0) &&
6507 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6508 							continue;
6509 						}
6510 						/* count this one */
6511 						count++;
6512 					} else {
6513 						continue;
6514 					}
6515 					break;
6516 #ifdef INET6
6517 				case AF_INET6:
6518 					if (ipv6_addr_legal) {
6519 						struct sockaddr_in6 *sin6;
6520 
6521 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6522 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6523 							continue;
6524 						}
6525 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6526 							if (local_scope == 0)
6527 								continue;
6528 							if (sin6->sin6_scope_id == 0) {
6529 								if (sa6_recoverscope(sin6) != 0)
6530 									/*
6531 									 *
6532 									 * bad
6533 									 *
6534 									 * li
6535 									 * nk
6536 									 *
6537 									 * loc
6538 									 * al
6539 									 *
6540 									 * add
6541 									 * re
6542 									 * ss
6543 									 * */
6544 									continue;
6545 							}
6546 						}
6547 						if ((site_scope == 0) &&
6548 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6549 							continue;
6550 						}
6551 						/* count this one */
6552 						count++;
6553 					}
6554 					break;
6555 #endif
6556 				default:
6557 					/* TSNH */
6558 					break;
6559 				}
6560 			}
6561 		}
6562 	} else {
6563 		/*
6564 		 * subset bound case
6565 		 */
6566 		struct sctp_laddr *laddr;
6567 
6568 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6569 		    sctp_nxt_addr) {
6570 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6571 				continue;
6572 			}
6573 			/* count this one */
6574 			count++;
6575 		}
6576 	}
6577 	SCTP_IPI_ADDR_RUNLOCK();
6578 	return (count);
6579 }
6580 
6581 #if defined(SCTP_LOCAL_TRACE_BUF)
6582 
6583 void
6584 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6585 {
6586 	uint32_t saveindex, newindex;
6587 
6588 	do {
6589 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6590 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6591 			newindex = 1;
6592 		} else {
6593 			newindex = saveindex + 1;
6594 		}
6595 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6596 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6597 		saveindex = 0;
6598 	}
6599 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6600 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6601 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6602 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6603 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6604 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6605 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6606 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6607 }
6608 
6609 #endif
6610 /* We will need to add support
6611  * to bind the ports and such here
6612  * so we can do UDP tunneling. In
6613  * the mean-time, we return error
6614  */
6615 
6616 void
6617 sctp_over_udp_stop(void)
6618 {
6619 	return;
6620 }
6621 int
6622 sctp_over_udp_start(void)
6623 {
6624 	return (-1);
6625 }
6626