xref: /freebsd/sys/netinet/sctputil.c (revision d8b878873e7aa8df1972cc6a642804b17eb61087)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 
52 #define NUMBER_OF_MTU_SIZES 18
53 
54 
55 #if defined(__Windows__) && !defined(SCTP_LOCAL_TRACE_BUF)
56 #include "eventrace_netinet.h"
57 #include "sctputil.tmh"		/* this is the file that will be auto
58 				 * generated */
59 #else
60 #ifndef KTR_SCTP
61 #define KTR_SCTP KTR_SUBSYS
62 #endif
63 #endif
64 
65 void
66 sctp_sblog(struct sockbuf *sb,
67     struct sctp_tcb *stcb, int from, int incr)
68 {
69 	struct sctp_cwnd_log sctp_clog;
70 
71 	sctp_clog.x.sb.stcb = stcb;
72 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
73 	if (stcb)
74 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
75 	else
76 		sctp_clog.x.sb.stcb_sbcc = 0;
77 	sctp_clog.x.sb.incr = incr;
78 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
79 	    SCTP_LOG_EVENT_SB,
80 	    from,
81 	    sctp_clog.x.misc.log1,
82 	    sctp_clog.x.misc.log2,
83 	    sctp_clog.x.misc.log3,
84 	    sctp_clog.x.misc.log4);
85 }
86 
87 void
88 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
89 {
90 	struct sctp_cwnd_log sctp_clog;
91 
92 	sctp_clog.x.close.inp = (void *)inp;
93 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
94 	if (stcb) {
95 		sctp_clog.x.close.stcb = (void *)stcb;
96 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
97 	} else {
98 		sctp_clog.x.close.stcb = 0;
99 		sctp_clog.x.close.state = 0;
100 	}
101 	sctp_clog.x.close.loc = loc;
102 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
103 	    SCTP_LOG_EVENT_CLOSE,
104 	    0,
105 	    sctp_clog.x.misc.log1,
106 	    sctp_clog.x.misc.log2,
107 	    sctp_clog.x.misc.log3,
108 	    sctp_clog.x.misc.log4);
109 }
110 
111 
112 void
113 rto_logging(struct sctp_nets *net, int from)
114 {
115 	struct sctp_cwnd_log sctp_clog;
116 
117 	memset(&sctp_clog, 0, sizeof(sctp_clog));
118 	sctp_clog.x.rto.net = (void *)net;
119 	sctp_clog.x.rto.rtt = net->prev_rtt;
120 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
121 	    SCTP_LOG_EVENT_RTT,
122 	    from,
123 	    sctp_clog.x.misc.log1,
124 	    sctp_clog.x.misc.log2,
125 	    sctp_clog.x.misc.log3,
126 	    sctp_clog.x.misc.log4);
127 
128 }
129 
130 void
131 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
132 {
133 	struct sctp_cwnd_log sctp_clog;
134 
135 	sctp_clog.x.strlog.stcb = stcb;
136 	sctp_clog.x.strlog.n_tsn = tsn;
137 	sctp_clog.x.strlog.n_sseq = sseq;
138 	sctp_clog.x.strlog.e_tsn = 0;
139 	sctp_clog.x.strlog.e_sseq = 0;
140 	sctp_clog.x.strlog.strm = stream;
141 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
142 	    SCTP_LOG_EVENT_STRM,
143 	    from,
144 	    sctp_clog.x.misc.log1,
145 	    sctp_clog.x.misc.log2,
146 	    sctp_clog.x.misc.log3,
147 	    sctp_clog.x.misc.log4);
148 
149 }
150 
151 void
152 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
153 {
154 	struct sctp_cwnd_log sctp_clog;
155 
156 	sctp_clog.x.nagle.stcb = (void *)stcb;
157 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
158 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
159 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
160 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
161 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
162 	    SCTP_LOG_EVENT_NAGLE,
163 	    action,
164 	    sctp_clog.x.misc.log1,
165 	    sctp_clog.x.misc.log2,
166 	    sctp_clog.x.misc.log3,
167 	    sctp_clog.x.misc.log4);
168 }
169 
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	struct sctp_cwnd_log sctp_clog;
175 
176 	sctp_clog.x.sack.cumack = cumack;
177 	sctp_clog.x.sack.oldcumack = old_cumack;
178 	sctp_clog.x.sack.tsn = tsn;
179 	sctp_clog.x.sack.numGaps = gaps;
180 	sctp_clog.x.sack.numDups = dups;
181 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 	    SCTP_LOG_EVENT_SACK,
183 	    from,
184 	    sctp_clog.x.misc.log1,
185 	    sctp_clog.x.misc.log2,
186 	    sctp_clog.x.misc.log3,
187 	    sctp_clog.x.misc.log4);
188 }
189 
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 	struct sctp_cwnd_log sctp_clog;
194 
195 	memset(&sctp_clog, 0, sizeof(sctp_clog));
196 	sctp_clog.x.map.base = map;
197 	sctp_clog.x.map.cum = cum;
198 	sctp_clog.x.map.high = high;
199 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 	    SCTP_LOG_EVENT_MAP,
201 	    from,
202 	    sctp_clog.x.misc.log1,
203 	    sctp_clog.x.misc.log2,
204 	    sctp_clog.x.misc.log3,
205 	    sctp_clog.x.misc.log4);
206 }
207 
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
210     int from)
211 {
212 	struct sctp_cwnd_log sctp_clog;
213 
214 	memset(&sctp_clog, 0, sizeof(sctp_clog));
215 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
216 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
217 	sctp_clog.x.fr.tsn = tsn;
218 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
219 	    SCTP_LOG_EVENT_FR,
220 	    from,
221 	    sctp_clog.x.misc.log1,
222 	    sctp_clog.x.misc.log2,
223 	    sctp_clog.x.misc.log3,
224 	    sctp_clog.x.misc.log4);
225 
226 }
227 
228 
229 void
230 sctp_log_mb(struct mbuf *m, int from)
231 {
232 	struct sctp_cwnd_log sctp_clog;
233 
234 	sctp_clog.x.mb.mp = m;
235 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
236 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
237 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
238 	if (SCTP_BUF_IS_EXTENDED(m)) {
239 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
240 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
241 	} else {
242 		sctp_clog.x.mb.ext = 0;
243 		sctp_clog.x.mb.refcnt = 0;
244 	}
245 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
246 	    SCTP_LOG_EVENT_MBUF,
247 	    from,
248 	    sctp_clog.x.misc.log1,
249 	    sctp_clog.x.misc.log2,
250 	    sctp_clog.x.misc.log3,
251 	    sctp_clog.x.misc.log4);
252 }
253 
254 
255 void
256 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
257     int from)
258 {
259 	struct sctp_cwnd_log sctp_clog;
260 
261 	if (control == NULL) {
262 		SCTP_PRINTF("Gak log of NULL?\n");
263 		return;
264 	}
265 	sctp_clog.x.strlog.stcb = control->stcb;
266 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
267 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
268 	sctp_clog.x.strlog.strm = control->sinfo_stream;
269 	if (poschk != NULL) {
270 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
271 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
272 	} else {
273 		sctp_clog.x.strlog.e_tsn = 0;
274 		sctp_clog.x.strlog.e_sseq = 0;
275 	}
276 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
277 	    SCTP_LOG_EVENT_STRM,
278 	    from,
279 	    sctp_clog.x.misc.log1,
280 	    sctp_clog.x.misc.log2,
281 	    sctp_clog.x.misc.log3,
282 	    sctp_clog.x.misc.log4);
283 
284 }
285 
286 void
287 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
288 {
289 	struct sctp_cwnd_log sctp_clog;
290 
291 	sctp_clog.x.cwnd.net = net;
292 	if (stcb->asoc.send_queue_cnt > 255)
293 		sctp_clog.x.cwnd.cnt_in_send = 255;
294 	else
295 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
296 	if (stcb->asoc.stream_queue_cnt > 255)
297 		sctp_clog.x.cwnd.cnt_in_str = 255;
298 	else
299 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
300 
301 	if (net) {
302 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
303 		sctp_clog.x.cwnd.inflight = net->flight_size;
304 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
305 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
306 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
307 	}
308 	if (SCTP_CWNDLOG_PRESEND == from) {
309 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
310 	}
311 	sctp_clog.x.cwnd.cwnd_augment = augment;
312 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
313 	    SCTP_LOG_EVENT_CWND,
314 	    from,
315 	    sctp_clog.x.misc.log1,
316 	    sctp_clog.x.misc.log2,
317 	    sctp_clog.x.misc.log3,
318 	    sctp_clog.x.misc.log4);
319 
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp->sctp_socket) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 
365 }
366 
367 void
368 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
369 {
370 	struct sctp_cwnd_log sctp_clog;
371 
372 	memset(&sctp_clog, 0, sizeof(sctp_clog));
373 	sctp_clog.x.cwnd.net = net;
374 	sctp_clog.x.cwnd.cwnd_new_value = error;
375 	sctp_clog.x.cwnd.inflight = net->flight_size;
376 	sctp_clog.x.cwnd.cwnd_augment = burst;
377 	if (stcb->asoc.send_queue_cnt > 255)
378 		sctp_clog.x.cwnd.cnt_in_send = 255;
379 	else
380 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
381 	if (stcb->asoc.stream_queue_cnt > 255)
382 		sctp_clog.x.cwnd.cnt_in_str = 255;
383 	else
384 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
385 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
386 	    SCTP_LOG_EVENT_MAXBURST,
387 	    from,
388 	    sctp_clog.x.misc.log1,
389 	    sctp_clog.x.misc.log2,
390 	    sctp_clog.x.misc.log3,
391 	    sctp_clog.x.misc.log4);
392 
393 }
394 
395 void
396 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
397 {
398 	struct sctp_cwnd_log sctp_clog;
399 
400 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
401 	sctp_clog.x.rwnd.send_size = snd_size;
402 	sctp_clog.x.rwnd.overhead = overhead;
403 	sctp_clog.x.rwnd.new_rwnd = 0;
404 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
405 	    SCTP_LOG_EVENT_RWND,
406 	    from,
407 	    sctp_clog.x.misc.log1,
408 	    sctp_clog.x.misc.log2,
409 	    sctp_clog.x.misc.log3,
410 	    sctp_clog.x.misc.log4);
411 }
412 
413 void
414 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
415 {
416 	struct sctp_cwnd_log sctp_clog;
417 
418 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
419 	sctp_clog.x.rwnd.send_size = flight_size;
420 	sctp_clog.x.rwnd.overhead = overhead;
421 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
422 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
423 	    SCTP_LOG_EVENT_RWND,
424 	    from,
425 	    sctp_clog.x.misc.log1,
426 	    sctp_clog.x.misc.log2,
427 	    sctp_clog.x.misc.log3,
428 	    sctp_clog.x.misc.log4);
429 }
430 
431 void
432 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
433 {
434 	struct sctp_cwnd_log sctp_clog;
435 
436 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
437 	sctp_clog.x.mbcnt.size_change = book;
438 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
439 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
440 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
441 	    SCTP_LOG_EVENT_MBCNT,
442 	    from,
443 	    sctp_clog.x.misc.log1,
444 	    sctp_clog.x.misc.log2,
445 	    sctp_clog.x.misc.log3,
446 	    sctp_clog.x.misc.log4);
447 
448 }
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 
504 }
505 
506 void
507 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
508 {
509 	struct sctp_cwnd_log sctp_clog;
510 
511 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
512 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
513 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
514 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
515 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
516 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
517 	sctp_clog.x.blk.sndlen = sendlen;
518 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
519 	    SCTP_LOG_EVENT_BLOCK,
520 	    from,
521 	    sctp_clog.x.misc.log1,
522 	    sctp_clog.x.misc.log2,
523 	    sctp_clog.x.misc.log3,
524 	    sctp_clog.x.misc.log4);
525 
526 }
527 
528 int
529 sctp_fill_stat_log(void *optval, size_t *optsize)
530 {
531 	/* May need to fix this if ktrdump does not work */
532 	return (0);
533 }
534 
535 #ifdef SCTP_AUDITING_ENABLED
536 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
537 static int sctp_audit_indx = 0;
538 
539 static
540 void
541 sctp_print_audit_report(void)
542 {
543 	int i;
544 	int cnt;
545 
546 	cnt = 0;
547 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
548 		if ((sctp_audit_data[i][0] == 0xe0) &&
549 		    (sctp_audit_data[i][1] == 0x01)) {
550 			cnt = 0;
551 			SCTP_PRINTF("\n");
552 		} else if (sctp_audit_data[i][0] == 0xf0) {
553 			cnt = 0;
554 			SCTP_PRINTF("\n");
555 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
556 		    (sctp_audit_data[i][1] == 0x01)) {
557 			SCTP_PRINTF("\n");
558 			cnt = 0;
559 		}
560 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
561 		    (uint32_t) sctp_audit_data[i][1]);
562 		cnt++;
563 		if ((cnt % 14) == 0)
564 			SCTP_PRINTF("\n");
565 	}
566 	for (i = 0; i < sctp_audit_indx; i++) {
567 		if ((sctp_audit_data[i][0] == 0xe0) &&
568 		    (sctp_audit_data[i][1] == 0x01)) {
569 			cnt = 0;
570 			SCTP_PRINTF("\n");
571 		} else if (sctp_audit_data[i][0] == 0xf0) {
572 			cnt = 0;
573 			SCTP_PRINTF("\n");
574 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
575 		    (sctp_audit_data[i][1] == 0x01)) {
576 			SCTP_PRINTF("\n");
577 			cnt = 0;
578 		}
579 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
580 		    (uint32_t) sctp_audit_data[i][1]);
581 		cnt++;
582 		if ((cnt % 14) == 0)
583 			SCTP_PRINTF("\n");
584 	}
585 	SCTP_PRINTF("\n");
586 }
587 
588 void
589 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
590     struct sctp_nets *net)
591 {
592 	int resend_cnt, tot_out, rep, tot_book_cnt;
593 	struct sctp_nets *lnet;
594 	struct sctp_tmit_chunk *chk;
595 
596 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
597 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
598 	sctp_audit_indx++;
599 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
600 		sctp_audit_indx = 0;
601 	}
602 	if (inp == NULL) {
603 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
604 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
605 		sctp_audit_indx++;
606 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 			sctp_audit_indx = 0;
608 		}
609 		return;
610 	}
611 	if (stcb == NULL) {
612 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
613 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
614 		sctp_audit_indx++;
615 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
616 			sctp_audit_indx = 0;
617 		}
618 		return;
619 	}
620 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
621 	sctp_audit_data[sctp_audit_indx][1] =
622 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
623 	sctp_audit_indx++;
624 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625 		sctp_audit_indx = 0;
626 	}
627 	rep = 0;
628 	tot_book_cnt = 0;
629 	resend_cnt = tot_out = 0;
630 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
631 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
632 			resend_cnt++;
633 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
634 			tot_out += chk->book_size;
635 			tot_book_cnt++;
636 		}
637 	}
638 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
639 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
640 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
641 		sctp_audit_indx++;
642 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 			sctp_audit_indx = 0;
644 		}
645 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
646 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
647 		rep = 1;
648 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
649 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
650 		sctp_audit_data[sctp_audit_indx][1] =
651 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
652 		sctp_audit_indx++;
653 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
654 			sctp_audit_indx = 0;
655 		}
656 	}
657 	if (tot_out != stcb->asoc.total_flight) {
658 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
659 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
660 		sctp_audit_indx++;
661 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
662 			sctp_audit_indx = 0;
663 		}
664 		rep = 1;
665 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
666 		    (int)stcb->asoc.total_flight);
667 		stcb->asoc.total_flight = tot_out;
668 	}
669 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
670 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
671 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
672 		sctp_audit_indx++;
673 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
674 			sctp_audit_indx = 0;
675 		}
676 		rep = 1;
677 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
678 
679 		stcb->asoc.total_flight_count = tot_book_cnt;
680 	}
681 	tot_out = 0;
682 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
683 		tot_out += lnet->flight_size;
684 	}
685 	if (tot_out != stcb->asoc.total_flight) {
686 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
687 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
688 		sctp_audit_indx++;
689 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
690 			sctp_audit_indx = 0;
691 		}
692 		rep = 1;
693 		SCTP_PRINTF("real flight:%d net total was %d\n",
694 		    stcb->asoc.total_flight, tot_out);
695 		/* now corrective action */
696 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
697 
698 			tot_out = 0;
699 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
700 				if ((chk->whoTo == lnet) &&
701 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
702 					tot_out += chk->book_size;
703 				}
704 			}
705 			if (lnet->flight_size != tot_out) {
706 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
707 				    lnet, lnet->flight_size,
708 				    tot_out);
709 				lnet->flight_size = tot_out;
710 			}
711 		}
712 	}
713 	if (rep) {
714 		sctp_print_audit_report();
715 	}
716 }
717 
718 void
719 sctp_audit_log(uint8_t ev, uint8_t fd)
720 {
721 
722 	sctp_audit_data[sctp_audit_indx][0] = ev;
723 	sctp_audit_data[sctp_audit_indx][1] = fd;
724 	sctp_audit_indx++;
725 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
726 		sctp_audit_indx = 0;
727 	}
728 }
729 
730 #endif
731 
732 /*
733  * a list of sizes based on typical mtu's, used only if next hop size not
734  * returned.
735  */
736 static int sctp_mtu_sizes[] = {
737 	68,
738 	296,
739 	508,
740 	512,
741 	544,
742 	576,
743 	1006,
744 	1492,
745 	1500,
746 	1536,
747 	2002,
748 	2048,
749 	4352,
750 	4464,
751 	8166,
752 	17914,
753 	32000,
754 	65535
755 };
756 
757 void
758 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
759 {
760 	struct sctp_association *asoc;
761 	struct sctp_nets *net;
762 
763 	asoc = &stcb->asoc;
764 
765 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
766 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
767 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
768 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
769 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
770 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
771 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
772 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
773 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
774 	}
775 }
776 
777 int
778 find_next_best_mtu(int totsz)
779 {
780 	int i, perfer;
781 
782 	/*
783 	 * if we are in here we must find the next best fit based on the
784 	 * size of the dg that failed to be sent.
785 	 */
786 	perfer = 0;
787 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
788 		if (totsz < sctp_mtu_sizes[i]) {
789 			perfer = i - 1;
790 			if (perfer < 0)
791 				perfer = 0;
792 			break;
793 		}
794 	}
795 	return (sctp_mtu_sizes[perfer]);
796 }
797 
798 void
799 sctp_fill_random_store(struct sctp_pcb *m)
800 {
801 	/*
802 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
803 	 * our counter. The result becomes our good random numbers and we
804 	 * then setup to give these out. Note that we do no locking to
805 	 * protect this. This is ok, since if competing folks call this we
806 	 * will get more gobbled gook in the random store which is what we
807 	 * want. There is a danger that two guys will use the same random
808 	 * numbers, but thats ok too since that is random as well :->
809 	 */
810 	m->store_at = 0;
811 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
812 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
813 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
814 	m->random_counter++;
815 }
816 
817 uint32_t
818 sctp_select_initial_TSN(struct sctp_pcb *inp)
819 {
820 	/*
821 	 * A true implementation should use random selection process to get
822 	 * the initial stream sequence number, using RFC1750 as a good
823 	 * guideline
824 	 */
825 	uint32_t x, *xp;
826 	uint8_t *p;
827 	int store_at, new_store;
828 
829 	if (inp->initial_sequence_debug != 0) {
830 		uint32_t ret;
831 
832 		ret = inp->initial_sequence_debug;
833 		inp->initial_sequence_debug++;
834 		return (ret);
835 	}
836 retry:
837 	store_at = inp->store_at;
838 	new_store = store_at + sizeof(uint32_t);
839 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
840 		new_store = 0;
841 	}
842 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
843 		goto retry;
844 	}
845 	if (new_store == 0) {
846 		/* Refill the random store */
847 		sctp_fill_random_store(inp);
848 	}
849 	p = &inp->random_store[store_at];
850 	xp = (uint32_t *) p;
851 	x = *xp;
852 	return (x);
853 }
854 
855 uint32_t
856 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
857 {
858 	uint32_t x, not_done;
859 	struct timeval now;
860 
861 	(void)SCTP_GETTIME_TIMEVAL(&now);
862 	not_done = 1;
863 	while (not_done) {
864 		x = sctp_select_initial_TSN(&inp->sctp_ep);
865 		if (x == 0) {
866 			/* we never use 0 */
867 			continue;
868 		}
869 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
870 			not_done = 0;
871 		}
872 	}
873 	return (x);
874 }
875 
876 int
877 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
878     uint32_t override_tag, uint32_t vrf_id)
879 {
880 	struct sctp_association *asoc;
881 
882 	/*
883 	 * Anything set to zero is taken care of by the allocation routine's
884 	 * bzero
885 	 */
886 
887 	/*
888 	 * Up front select what scoping to apply on addresses I tell my peer
889 	 * Not sure what to do with these right now, we will need to come up
890 	 * with a way to set them. We may need to pass them through from the
891 	 * caller in the sctp_aloc_assoc() function.
892 	 */
893 	int i;
894 
895 	asoc = &stcb->asoc;
896 	/* init all variables to a known value. */
897 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
898 	asoc->max_burst = m->sctp_ep.max_burst;
899 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
900 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
901 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
902 	/* EY Init nr_sack variable */
903 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
904 	/* JRS 5/21/07 - Init CMT PF variables */
905 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
906 	asoc->sctp_frag_point = m->sctp_frag_point;
907 #ifdef INET
908 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
909 #else
910 	asoc->default_tos = 0;
911 #endif
912 
913 #ifdef INET6
914 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
915 #else
916 	asoc->default_flowlabel = 0;
917 #endif
918 	asoc->sb_send_resv = 0;
919 	if (override_tag) {
920 		asoc->my_vtag = override_tag;
921 	} else {
922 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
923 	}
924 	/* Get the nonce tags */
925 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
926 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
927 	asoc->vrf_id = vrf_id;
928 
929 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
930 		asoc->hb_is_disabled = 1;
931 	else
932 		asoc->hb_is_disabled = 0;
933 
934 #ifdef SCTP_ASOCLOG_OF_TSNS
935 	asoc->tsn_in_at = 0;
936 	asoc->tsn_out_at = 0;
937 	asoc->tsn_in_wrapped = 0;
938 	asoc->tsn_out_wrapped = 0;
939 	asoc->cumack_log_at = 0;
940 	asoc->cumack_log_atsnt = 0;
941 #endif
942 #ifdef SCTP_FS_SPEC_LOG
943 	asoc->fs_index = 0;
944 #endif
945 	asoc->refcnt = 0;
946 	asoc->assoc_up_sent = 0;
947 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
948 	    sctp_select_initial_TSN(&m->sctp_ep);
949 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
950 	/* we are optimisitic here */
951 	asoc->peer_supports_pktdrop = 1;
952 	asoc->peer_supports_nat = 0;
953 	asoc->sent_queue_retran_cnt = 0;
954 
955 	/* for CMT */
956 	asoc->last_net_cmt_send_started = NULL;
957 
958 	/* This will need to be adjusted */
959 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
960 	asoc->last_acked_seq = asoc->init_seq_number - 1;
961 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
962 	asoc->asconf_seq_in = asoc->last_acked_seq;
963 
964 	/* here we are different, we hold the next one we expect */
965 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
966 
967 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
968 	asoc->initial_rto = m->sctp_ep.initial_rto;
969 
970 	asoc->max_init_times = m->sctp_ep.max_init_times;
971 	asoc->max_send_times = m->sctp_ep.max_send_times;
972 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
973 	asoc->free_chunk_cnt = 0;
974 
975 	asoc->iam_blocking = 0;
976 	/* ECN Nonce initialization */
977 	asoc->context = m->sctp_context;
978 	asoc->def_send = m->def_send;
979 	asoc->ecn_nonce_allowed = 0;
980 	asoc->receiver_nonce_sum = 1;
981 	asoc->nonce_sum_expect_base = 1;
982 	asoc->nonce_sum_check = 1;
983 	asoc->nonce_resync_tsn = 0;
984 	asoc->nonce_wait_for_ecne = 0;
985 	asoc->nonce_wait_tsn = 0;
986 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
987 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
988 	asoc->pr_sctp_cnt = 0;
989 	asoc->total_output_queue_size = 0;
990 
991 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
992 		struct in6pcb *inp6;
993 
994 		/* Its a V6 socket */
995 		inp6 = (struct in6pcb *)m;
996 		asoc->ipv6_addr_legal = 1;
997 		/* Now look at the binding flag to see if V4 will be legal */
998 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
999 			asoc->ipv4_addr_legal = 1;
1000 		} else {
1001 			/* V4 addresses are NOT legal on the association */
1002 			asoc->ipv4_addr_legal = 0;
1003 		}
1004 	} else {
1005 		/* Its a V4 socket, no - V6 */
1006 		asoc->ipv4_addr_legal = 1;
1007 		asoc->ipv6_addr_legal = 0;
1008 	}
1009 
1010 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1011 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1012 
1013 	asoc->smallest_mtu = m->sctp_frag_point;
1014 #ifdef SCTP_PRINT_FOR_B_AND_M
1015 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1016 	    asoc->smallest_mtu);
1017 #endif
1018 	asoc->minrto = m->sctp_ep.sctp_minrto;
1019 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1020 
1021 	asoc->locked_on_sending = NULL;
1022 	asoc->stream_locked_on = 0;
1023 	asoc->ecn_echo_cnt_onq = 0;
1024 	asoc->stream_locked = 0;
1025 
1026 	asoc->send_sack = 1;
1027 
1028 	LIST_INIT(&asoc->sctp_restricted_addrs);
1029 
1030 	TAILQ_INIT(&asoc->nets);
1031 	TAILQ_INIT(&asoc->pending_reply_queue);
1032 	TAILQ_INIT(&asoc->asconf_ack_sent);
1033 	/* Setup to fill the hb random cache at first HB */
1034 	asoc->hb_random_idx = 4;
1035 
1036 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1037 
1038 	/*
1039 	 * JRS - Pick the default congestion control module based on the
1040 	 * sysctl.
1041 	 */
1042 	switch (m->sctp_ep.sctp_default_cc_module) {
1043 		/* JRS - Standard TCP congestion control */
1044 	case SCTP_CC_RFC2581:
1045 		{
1046 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1047 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1048 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1049 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1050 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1051 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1052 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1053 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1054 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1055 			break;
1056 		}
1057 		/* JRS - High Speed TCP congestion control (Floyd) */
1058 	case SCTP_CC_HSTCP:
1059 		{
1060 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1061 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1064 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1065 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1066 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1067 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1068 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1069 			break;
1070 		}
1071 		/* JRS - HTCP congestion control */
1072 	case SCTP_CC_HTCP:
1073 		{
1074 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1075 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1078 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1079 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1080 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1081 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1082 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1083 			break;
1084 		}
1085 		/* JRS - By default, use RFC2581 */
1086 	default:
1087 		{
1088 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1089 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1092 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1093 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1094 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1095 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1096 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1097 			break;
1098 		}
1099 	}
1100 
1101 	/*
1102 	 * Now the stream parameters, here we allocate space for all streams
1103 	 * that we request by default.
1104 	 */
1105 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1106 	    m->sctp_ep.pre_open_stream_count;
1107 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1108 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1109 	    SCTP_M_STRMO);
1110 	if (asoc->strmout == NULL) {
1111 		/* big trouble no memory */
1112 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1113 		return (ENOMEM);
1114 	}
1115 	for (i = 0; i < asoc->streamoutcnt; i++) {
1116 		/*
1117 		 * inbound side must be set to 0xffff, also NOTE when we get
1118 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1119 		 * count (streamoutcnt) but first check if we sent to any of
1120 		 * the upper streams that were dropped (if some were). Those
1121 		 * that were dropped must be notified to the upper layer as
1122 		 * failed to send.
1123 		 */
1124 		asoc->strmout[i].next_sequence_sent = 0x0;
1125 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1126 		asoc->strmout[i].stream_no = i;
1127 		asoc->strmout[i].last_msg_incomplete = 0;
1128 		asoc->strmout[i].next_spoke.tqe_next = 0;
1129 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1130 	}
1131 	/* Now the mapping array */
1132 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1133 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1134 	    SCTP_M_MAP);
1135 	if (asoc->mapping_array == NULL) {
1136 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1137 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1138 		return (ENOMEM);
1139 	}
1140 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1141 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1142 	    SCTP_M_MAP);
1143 	if (asoc->nr_mapping_array == NULL) {
1144 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1145 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1146 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1147 		return (ENOMEM);
1148 	}
1149 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1150 
1151 	/* Now the init of the other outqueues */
1152 	TAILQ_INIT(&asoc->free_chunks);
1153 	TAILQ_INIT(&asoc->out_wheel);
1154 	TAILQ_INIT(&asoc->control_send_queue);
1155 	TAILQ_INIT(&asoc->asconf_send_queue);
1156 	TAILQ_INIT(&asoc->send_queue);
1157 	TAILQ_INIT(&asoc->sent_queue);
1158 	TAILQ_INIT(&asoc->reasmqueue);
1159 	TAILQ_INIT(&asoc->resetHead);
1160 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1161 	TAILQ_INIT(&asoc->asconf_queue);
1162 	/* authentication fields */
1163 	asoc->authinfo.random = NULL;
1164 	asoc->authinfo.active_keyid = 0;
1165 	asoc->authinfo.assoc_key = NULL;
1166 	asoc->authinfo.assoc_keyid = 0;
1167 	asoc->authinfo.recv_key = NULL;
1168 	asoc->authinfo.recv_keyid = 0;
1169 	LIST_INIT(&asoc->shared_keys);
1170 	asoc->marked_retrans = 0;
1171 	asoc->timoinit = 0;
1172 	asoc->timodata = 0;
1173 	asoc->timosack = 0;
1174 	asoc->timoshutdown = 0;
1175 	asoc->timoheartbeat = 0;
1176 	asoc->timocookie = 0;
1177 	asoc->timoshutdownack = 0;
1178 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1179 	asoc->discontinuity_time = asoc->start_time;
1180 	/*
1181 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1182 	 * freed later when the association is freed.
1183 	 */
1184 	return (0);
1185 }
1186 
1187 void
1188 sctp_print_mapping_array(struct sctp_association *asoc)
1189 {
1190 	unsigned int i, limit;
1191 
1192 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1193 	    asoc->mapping_array_size,
1194 	    asoc->mapping_array_base_tsn,
1195 	    asoc->cumulative_tsn,
1196 	    asoc->highest_tsn_inside_map,
1197 	    asoc->highest_tsn_inside_nr_map);
1198 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1199 		if (asoc->mapping_array[limit - 1]) {
1200 			break;
1201 		}
1202 	}
1203 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1204 	for (i = 0; i < limit; i++) {
1205 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1206 		if (((i + 1) % 16) == 0)
1207 			printf("\n");
1208 	}
1209 	if (limit % 16)
1210 		printf("\n");
1211 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1212 		if (asoc->nr_mapping_array[limit - 1]) {
1213 			break;
1214 		}
1215 	}
1216 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1217 	for (i = 0; i < limit; i++) {
1218 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1219 	}
1220 	if (limit % 16)
1221 		printf("\n");
1222 }
1223 
1224 int
1225 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1226 {
1227 	/* mapping array needs to grow */
1228 	uint8_t *new_array1, *new_array2;
1229 	uint32_t new_size;
1230 
1231 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1232 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1233 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1234 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1235 		/* can't get more, forget it */
1236 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1237 		if (new_array1) {
1238 			SCTP_FREE(new_array1, SCTP_M_MAP);
1239 		}
1240 		if (new_array2) {
1241 			SCTP_FREE(new_array2, SCTP_M_MAP);
1242 		}
1243 		return (-1);
1244 	}
1245 	memset(new_array1, 0, new_size);
1246 	memset(new_array2, 0, new_size);
1247 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1248 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1249 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1250 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1251 	asoc->mapping_array = new_array1;
1252 	asoc->nr_mapping_array = new_array2;
1253 	asoc->mapping_array_size = new_size;
1254 	return (0);
1255 }
1256 
1257 
1258 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1259 static void
1260 sctp_iterator_work(struct sctp_iterator *it)
1261 {
1262 	int iteration_count = 0;
1263 	int inp_skip = 0;
1264 
1265 	SCTP_ITERATOR_LOCK();
1266 	if (it->inp) {
1267 		SCTP_INP_DECR_REF(it->inp);
1268 	}
1269 	if (it->inp == NULL) {
1270 		/* iterator is complete */
1271 done_with_iterator:
1272 		SCTP_ITERATOR_UNLOCK();
1273 		if (it->function_atend != NULL) {
1274 			(*it->function_atend) (it->pointer, it->val);
1275 		}
1276 		SCTP_FREE(it, SCTP_M_ITER);
1277 		return;
1278 	}
1279 select_a_new_ep:
1280 	SCTP_INP_WLOCK(it->inp);
1281 	while (((it->pcb_flags) &&
1282 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1283 	    ((it->pcb_features) &&
1284 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1285 		/* endpoint flags or features don't match, so keep looking */
1286 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1287 			SCTP_INP_WUNLOCK(it->inp);
1288 			goto done_with_iterator;
1289 		}
1290 		SCTP_INP_WUNLOCK(it->inp);
1291 		it->inp = LIST_NEXT(it->inp, sctp_list);
1292 		if (it->inp == NULL) {
1293 			goto done_with_iterator;
1294 		}
1295 		SCTP_INP_WLOCK(it->inp);
1296 	}
1297 
1298 	SCTP_INP_WUNLOCK(it->inp);
1299 	SCTP_INP_RLOCK(it->inp);
1300 
1301 	/* now go through each assoc which is in the desired state */
1302 	if (it->done_current_ep == 0) {
1303 		if (it->function_inp != NULL)
1304 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1305 		it->done_current_ep = 1;
1306 	}
1307 	if (it->stcb == NULL) {
1308 		/* run the per instance function */
1309 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1310 	}
1311 	if ((inp_skip) || it->stcb == NULL) {
1312 		if (it->function_inp_end != NULL) {
1313 			inp_skip = (*it->function_inp_end) (it->inp,
1314 			    it->pointer,
1315 			    it->val);
1316 		}
1317 		SCTP_INP_RUNLOCK(it->inp);
1318 		goto no_stcb;
1319 	}
1320 	while (it->stcb) {
1321 		SCTP_TCB_LOCK(it->stcb);
1322 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1323 			/* not in the right state... keep looking */
1324 			SCTP_TCB_UNLOCK(it->stcb);
1325 			goto next_assoc;
1326 		}
1327 		/* see if we have limited out the iterator loop */
1328 		iteration_count++;
1329 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1330 			/* Pause to let others grab the lock */
1331 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1332 			SCTP_TCB_UNLOCK(it->stcb);
1333 
1334 			SCTP_INP_INCR_REF(it->inp);
1335 			SCTP_INP_RUNLOCK(it->inp);
1336 			SCTP_ITERATOR_UNLOCK();
1337 			SCTP_ITERATOR_LOCK();
1338 			SCTP_INP_RLOCK(it->inp);
1339 
1340 			SCTP_INP_DECR_REF(it->inp);
1341 			SCTP_TCB_LOCK(it->stcb);
1342 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1343 			iteration_count = 0;
1344 		}
1345 		/* run function on this one */
1346 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1347 
1348 		/*
1349 		 * we lie here, it really needs to have its own type but
1350 		 * first I must verify that this won't effect things :-0
1351 		 */
1352 		if (it->no_chunk_output == 0)
1353 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1354 
1355 		SCTP_TCB_UNLOCK(it->stcb);
1356 next_assoc:
1357 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1358 		if (it->stcb == NULL) {
1359 			/* Run last function */
1360 			if (it->function_inp_end != NULL) {
1361 				inp_skip = (*it->function_inp_end) (it->inp,
1362 				    it->pointer,
1363 				    it->val);
1364 			}
1365 		}
1366 	}
1367 	SCTP_INP_RUNLOCK(it->inp);
1368 no_stcb:
1369 	/* done with all assocs on this endpoint, move on to next endpoint */
1370 	it->done_current_ep = 0;
1371 	SCTP_INP_WLOCK(it->inp);
1372 	SCTP_INP_WUNLOCK(it->inp);
1373 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1374 		it->inp = NULL;
1375 	} else {
1376 		SCTP_INP_INFO_RLOCK();
1377 		it->inp = LIST_NEXT(it->inp, sctp_list);
1378 		SCTP_INP_INFO_RUNLOCK();
1379 	}
1380 	if (it->inp == NULL) {
1381 		goto done_with_iterator;
1382 	}
1383 	goto select_a_new_ep;
1384 }
1385 
1386 void
1387 sctp_iterator_worker(void)
1388 {
1389 	struct sctp_iterator *it = NULL;
1390 
1391 	/* This function is called with the WQ lock in place */
1392 
1393 	SCTP_BASE_INFO(iterator_running) = 1;
1394 again:
1395 	it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1396 	while (it) {
1397 		/* now lets work on this one */
1398 		TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1399 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1400 		sctp_iterator_work(it);
1401 		SCTP_IPI_ITERATOR_WQ_LOCK();
1402 		/* sa_ignore FREED_MEMORY */
1403 		it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1404 	}
1405 	if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
1406 		goto again;
1407 	}
1408 	SCTP_BASE_INFO(iterator_running) = 0;
1409 	return;
1410 }
1411 
1412 #endif
1413 
1414 
1415 static void
1416 sctp_handle_addr_wq(void)
1417 {
1418 	/* deal with the ADDR wq from the rtsock calls */
1419 	struct sctp_laddr *wi;
1420 	struct sctp_asconf_iterator *asc;
1421 
1422 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1423 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1424 	if (asc == NULL) {
1425 		/* Try later, no memory */
1426 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1427 		    (struct sctp_inpcb *)NULL,
1428 		    (struct sctp_tcb *)NULL,
1429 		    (struct sctp_nets *)NULL);
1430 		return;
1431 	}
1432 	LIST_INIT(&asc->list_of_work);
1433 	asc->cnt = 0;
1434 	SCTP_IPI_ITERATOR_WQ_LOCK();
1435 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1436 	while (wi != NULL) {
1437 		LIST_REMOVE(wi, sctp_nxt_addr);
1438 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1439 		asc->cnt++;
1440 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1441 	}
1442 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1443 	if (asc->cnt == 0) {
1444 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1445 	} else {
1446 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1447 		    sctp_asconf_iterator_stcb,
1448 		    NULL,	/* No ep end for boundall */
1449 		    SCTP_PCB_FLAGS_BOUNDALL,
1450 		    SCTP_PCB_ANY_FEATURES,
1451 		    SCTP_ASOC_ANY_STATE,
1452 		    (void *)asc, 0,
1453 		    sctp_asconf_iterator_end, NULL, 0);
1454 	}
1455 }
1456 
1457 int retcode = 0;
1458 int cur_oerr = 0;
1459 
1460 void
1461 sctp_timeout_handler(void *t)
1462 {
1463 	struct sctp_inpcb *inp;
1464 	struct sctp_tcb *stcb;
1465 	struct sctp_nets *net;
1466 	struct sctp_timer *tmr;
1467 
1468 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1469 	struct socket *so;
1470 
1471 #endif
1472 	int did_output, type;
1473 	struct sctp_iterator *it = NULL;
1474 
1475 	tmr = (struct sctp_timer *)t;
1476 	inp = (struct sctp_inpcb *)tmr->ep;
1477 	stcb = (struct sctp_tcb *)tmr->tcb;
1478 	net = (struct sctp_nets *)tmr->net;
1479 	CURVNET_SET((struct vnet *)tmr->vnet);
1480 	did_output = 1;
1481 
1482 #ifdef SCTP_AUDITING_ENABLED
1483 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1484 	sctp_auditing(3, inp, stcb, net);
1485 #endif
1486 
1487 	/* sanity checks... */
1488 	if (tmr->self != (void *)tmr) {
1489 		/*
1490 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1491 		 * tmr);
1492 		 */
1493 		CURVNET_RESTORE();
1494 		return;
1495 	}
1496 	tmr->stopped_from = 0xa001;
1497 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1498 		/*
1499 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1500 		 * tmr->type);
1501 		 */
1502 		CURVNET_RESTORE();
1503 		return;
1504 	}
1505 	tmr->stopped_from = 0xa002;
1506 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1507 		CURVNET_RESTORE();
1508 		return;
1509 	}
1510 	/* if this is an iterator timeout, get the struct and clear inp */
1511 	tmr->stopped_from = 0xa003;
1512 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1513 		it = (struct sctp_iterator *)inp;
1514 		inp = NULL;
1515 	}
1516 	type = tmr->type;
1517 	if (inp) {
1518 		SCTP_INP_INCR_REF(inp);
1519 		if ((inp->sctp_socket == 0) &&
1520 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1521 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1522 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1523 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1524 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1525 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1526 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1527 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1528 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1529 		    ) {
1530 			SCTP_INP_DECR_REF(inp);
1531 			CURVNET_RESTORE();
1532 			return;
1533 		}
1534 	}
1535 	tmr->stopped_from = 0xa004;
1536 	if (stcb) {
1537 		atomic_add_int(&stcb->asoc.refcnt, 1);
1538 		if (stcb->asoc.state == 0) {
1539 			atomic_add_int(&stcb->asoc.refcnt, -1);
1540 			if (inp) {
1541 				SCTP_INP_DECR_REF(inp);
1542 			}
1543 			CURVNET_RESTORE();
1544 			return;
1545 		}
1546 	}
1547 	tmr->stopped_from = 0xa005;
1548 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1549 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1550 		if (inp) {
1551 			SCTP_INP_DECR_REF(inp);
1552 		}
1553 		if (stcb) {
1554 			atomic_add_int(&stcb->asoc.refcnt, -1);
1555 		}
1556 		CURVNET_RESTORE();
1557 		return;
1558 	}
1559 	tmr->stopped_from = 0xa006;
1560 
1561 	if (stcb) {
1562 		SCTP_TCB_LOCK(stcb);
1563 		atomic_add_int(&stcb->asoc.refcnt, -1);
1564 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1565 		    ((stcb->asoc.state == 0) ||
1566 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1567 			SCTP_TCB_UNLOCK(stcb);
1568 			if (inp) {
1569 				SCTP_INP_DECR_REF(inp);
1570 			}
1571 			CURVNET_RESTORE();
1572 			return;
1573 		}
1574 	}
1575 	/* record in stopped what t-o occured */
1576 	tmr->stopped_from = tmr->type;
1577 
1578 	/* mark as being serviced now */
1579 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1580 		/*
1581 		 * Callout has been rescheduled.
1582 		 */
1583 		goto get_out;
1584 	}
1585 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1586 		/*
1587 		 * Not active, so no action.
1588 		 */
1589 		goto get_out;
1590 	}
1591 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1592 
1593 	/* call the handler for the appropriate timer type */
1594 	switch (tmr->type) {
1595 	case SCTP_TIMER_TYPE_ZERO_COPY:
1596 		if (inp == NULL) {
1597 			break;
1598 		}
1599 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1600 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1601 		}
1602 		break;
1603 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1604 		if (inp == NULL) {
1605 			break;
1606 		}
1607 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1608 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1609 		}
1610 		break;
1611 	case SCTP_TIMER_TYPE_ADDR_WQ:
1612 		sctp_handle_addr_wq();
1613 		break;
1614 	case SCTP_TIMER_TYPE_ITERATOR:
1615 		SCTP_STAT_INCR(sctps_timoiterator);
1616 		sctp_iterator_timer(it);
1617 		break;
1618 	case SCTP_TIMER_TYPE_SEND:
1619 		if ((stcb == NULL) || (inp == NULL)) {
1620 			break;
1621 		}
1622 		SCTP_STAT_INCR(sctps_timodata);
1623 		stcb->asoc.timodata++;
1624 		stcb->asoc.num_send_timers_up--;
1625 		if (stcb->asoc.num_send_timers_up < 0) {
1626 			stcb->asoc.num_send_timers_up = 0;
1627 		}
1628 		SCTP_TCB_LOCK_ASSERT(stcb);
1629 		cur_oerr = stcb->asoc.overall_error_count;
1630 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1631 		if (retcode) {
1632 			/* no need to unlock on tcb its gone */
1633 
1634 			goto out_decr;
1635 		}
1636 		SCTP_TCB_LOCK_ASSERT(stcb);
1637 #ifdef SCTP_AUDITING_ENABLED
1638 		sctp_auditing(4, inp, stcb, net);
1639 #endif
1640 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1641 		if ((stcb->asoc.num_send_timers_up == 0) &&
1642 		    (stcb->asoc.sent_queue_cnt > 0)
1643 		    ) {
1644 			struct sctp_tmit_chunk *chk;
1645 
1646 			/*
1647 			 * safeguard. If there on some on the sent queue
1648 			 * somewhere but no timers running something is
1649 			 * wrong... so we start a timer on the first chunk
1650 			 * on the send queue on whatever net it is sent to.
1651 			 */
1652 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1653 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1654 			    chk->whoTo);
1655 		}
1656 		break;
1657 	case SCTP_TIMER_TYPE_INIT:
1658 		if ((stcb == NULL) || (inp == NULL)) {
1659 			break;
1660 		}
1661 		SCTP_STAT_INCR(sctps_timoinit);
1662 		stcb->asoc.timoinit++;
1663 		if (sctp_t1init_timer(inp, stcb, net)) {
1664 			/* no need to unlock on tcb its gone */
1665 			goto out_decr;
1666 		}
1667 		/* We do output but not here */
1668 		did_output = 0;
1669 		break;
1670 	case SCTP_TIMER_TYPE_RECV:
1671 		if ((stcb == NULL) || (inp == NULL)) {
1672 			break;
1673 		} {
1674 			SCTP_STAT_INCR(sctps_timosack);
1675 			stcb->asoc.timosack++;
1676 			sctp_send_sack(stcb);
1677 		}
1678 #ifdef SCTP_AUDITING_ENABLED
1679 		sctp_auditing(4, inp, stcb, net);
1680 #endif
1681 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1682 		break;
1683 	case SCTP_TIMER_TYPE_SHUTDOWN:
1684 		if ((stcb == NULL) || (inp == NULL)) {
1685 			break;
1686 		}
1687 		if (sctp_shutdown_timer(inp, stcb, net)) {
1688 			/* no need to unlock on tcb its gone */
1689 			goto out_decr;
1690 		}
1691 		SCTP_STAT_INCR(sctps_timoshutdown);
1692 		stcb->asoc.timoshutdown++;
1693 #ifdef SCTP_AUDITING_ENABLED
1694 		sctp_auditing(4, inp, stcb, net);
1695 #endif
1696 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1697 		break;
1698 	case SCTP_TIMER_TYPE_HEARTBEAT:
1699 		{
1700 			struct sctp_nets *lnet;
1701 			int cnt_of_unconf = 0;
1702 
1703 			if ((stcb == NULL) || (inp == NULL)) {
1704 				break;
1705 			}
1706 			SCTP_STAT_INCR(sctps_timoheartbeat);
1707 			stcb->asoc.timoheartbeat++;
1708 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1709 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1710 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1711 					cnt_of_unconf++;
1712 				}
1713 			}
1714 			if (cnt_of_unconf == 0) {
1715 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1716 				    cnt_of_unconf)) {
1717 					/* no need to unlock on tcb its gone */
1718 					goto out_decr;
1719 				}
1720 			}
1721 #ifdef SCTP_AUDITING_ENABLED
1722 			sctp_auditing(4, inp, stcb, lnet);
1723 #endif
1724 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1725 			    stcb->sctp_ep, stcb, lnet);
1726 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1727 		}
1728 		break;
1729 	case SCTP_TIMER_TYPE_COOKIE:
1730 		if ((stcb == NULL) || (inp == NULL)) {
1731 			break;
1732 		}
1733 		if (sctp_cookie_timer(inp, stcb, net)) {
1734 			/* no need to unlock on tcb its gone */
1735 			goto out_decr;
1736 		}
1737 		SCTP_STAT_INCR(sctps_timocookie);
1738 		stcb->asoc.timocookie++;
1739 #ifdef SCTP_AUDITING_ENABLED
1740 		sctp_auditing(4, inp, stcb, net);
1741 #endif
1742 		/*
1743 		 * We consider T3 and Cookie timer pretty much the same with
1744 		 * respect to where from in chunk_output.
1745 		 */
1746 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1747 		break;
1748 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1749 		{
1750 			struct timeval tv;
1751 			int i, secret;
1752 
1753 			if (inp == NULL) {
1754 				break;
1755 			}
1756 			SCTP_STAT_INCR(sctps_timosecret);
1757 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1758 			SCTP_INP_WLOCK(inp);
1759 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1760 			inp->sctp_ep.last_secret_number =
1761 			    inp->sctp_ep.current_secret_number;
1762 			inp->sctp_ep.current_secret_number++;
1763 			if (inp->sctp_ep.current_secret_number >=
1764 			    SCTP_HOW_MANY_SECRETS) {
1765 				inp->sctp_ep.current_secret_number = 0;
1766 			}
1767 			secret = (int)inp->sctp_ep.current_secret_number;
1768 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1769 				inp->sctp_ep.secret_key[secret][i] =
1770 				    sctp_select_initial_TSN(&inp->sctp_ep);
1771 			}
1772 			SCTP_INP_WUNLOCK(inp);
1773 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1774 		}
1775 		did_output = 0;
1776 		break;
1777 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1778 		if ((stcb == NULL) || (inp == NULL)) {
1779 			break;
1780 		}
1781 		SCTP_STAT_INCR(sctps_timopathmtu);
1782 		sctp_pathmtu_timer(inp, stcb, net);
1783 		did_output = 0;
1784 		break;
1785 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1786 		if ((stcb == NULL) || (inp == NULL)) {
1787 			break;
1788 		}
1789 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1790 			/* no need to unlock on tcb its gone */
1791 			goto out_decr;
1792 		}
1793 		SCTP_STAT_INCR(sctps_timoshutdownack);
1794 		stcb->asoc.timoshutdownack++;
1795 #ifdef SCTP_AUDITING_ENABLED
1796 		sctp_auditing(4, inp, stcb, net);
1797 #endif
1798 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1799 		break;
1800 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1801 		if ((stcb == NULL) || (inp == NULL)) {
1802 			break;
1803 		}
1804 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1805 		sctp_abort_an_association(inp, stcb,
1806 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1807 		/* no need to unlock on tcb its gone */
1808 		goto out_decr;
1809 
1810 	case SCTP_TIMER_TYPE_STRRESET:
1811 		if ((stcb == NULL) || (inp == NULL)) {
1812 			break;
1813 		}
1814 		if (sctp_strreset_timer(inp, stcb, net)) {
1815 			/* no need to unlock on tcb its gone */
1816 			goto out_decr;
1817 		}
1818 		SCTP_STAT_INCR(sctps_timostrmrst);
1819 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1820 		break;
1821 	case SCTP_TIMER_TYPE_EARLYFR:
1822 		/* Need to do FR of things for net */
1823 		if ((stcb == NULL) || (inp == NULL)) {
1824 			break;
1825 		}
1826 		SCTP_STAT_INCR(sctps_timoearlyfr);
1827 		sctp_early_fr_timer(inp, stcb, net);
1828 		break;
1829 	case SCTP_TIMER_TYPE_ASCONF:
1830 		if ((stcb == NULL) || (inp == NULL)) {
1831 			break;
1832 		}
1833 		if (sctp_asconf_timer(inp, stcb, net)) {
1834 			/* no need to unlock on tcb its gone */
1835 			goto out_decr;
1836 		}
1837 		SCTP_STAT_INCR(sctps_timoasconf);
1838 #ifdef SCTP_AUDITING_ENABLED
1839 		sctp_auditing(4, inp, stcb, net);
1840 #endif
1841 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1842 		break;
1843 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1844 		if ((stcb == NULL) || (inp == NULL)) {
1845 			break;
1846 		}
1847 		sctp_delete_prim_timer(inp, stcb, net);
1848 		SCTP_STAT_INCR(sctps_timodelprim);
1849 		break;
1850 
1851 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1852 		if ((stcb == NULL) || (inp == NULL)) {
1853 			break;
1854 		}
1855 		SCTP_STAT_INCR(sctps_timoautoclose);
1856 		sctp_autoclose_timer(inp, stcb, net);
1857 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1858 		did_output = 0;
1859 		break;
1860 	case SCTP_TIMER_TYPE_ASOCKILL:
1861 		if ((stcb == NULL) || (inp == NULL)) {
1862 			break;
1863 		}
1864 		SCTP_STAT_INCR(sctps_timoassockill);
1865 		/* Can we free it yet? */
1866 		SCTP_INP_DECR_REF(inp);
1867 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1868 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1869 		so = SCTP_INP_SO(inp);
1870 		atomic_add_int(&stcb->asoc.refcnt, 1);
1871 		SCTP_TCB_UNLOCK(stcb);
1872 		SCTP_SOCKET_LOCK(so, 1);
1873 		SCTP_TCB_LOCK(stcb);
1874 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1875 #endif
1876 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1877 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1878 		SCTP_SOCKET_UNLOCK(so, 1);
1879 #endif
1880 		/*
1881 		 * free asoc, always unlocks (or destroy's) so prevent
1882 		 * duplicate unlock or unlock of a free mtx :-0
1883 		 */
1884 		stcb = NULL;
1885 		goto out_no_decr;
1886 	case SCTP_TIMER_TYPE_INPKILL:
1887 		SCTP_STAT_INCR(sctps_timoinpkill);
1888 		if (inp == NULL) {
1889 			break;
1890 		}
1891 		/*
1892 		 * special case, take away our increment since WE are the
1893 		 * killer
1894 		 */
1895 		SCTP_INP_DECR_REF(inp);
1896 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1897 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1898 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1899 		inp = NULL;
1900 		goto out_no_decr;
1901 	default:
1902 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1903 		    tmr->type);
1904 		break;
1905 	};
1906 #ifdef SCTP_AUDITING_ENABLED
1907 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1908 	if (inp)
1909 		sctp_auditing(5, inp, stcb, net);
1910 #endif
1911 	if ((did_output) && stcb) {
1912 		/*
1913 		 * Now we need to clean up the control chunk chain if an
1914 		 * ECNE is on it. It must be marked as UNSENT again so next
1915 		 * call will continue to send it until such time that we get
1916 		 * a CWR, to remove it. It is, however, less likely that we
1917 		 * will find a ecn echo on the chain though.
1918 		 */
1919 		sctp_fix_ecn_echo(&stcb->asoc);
1920 	}
1921 get_out:
1922 	if (stcb) {
1923 		SCTP_TCB_UNLOCK(stcb);
1924 	}
1925 out_decr:
1926 	if (inp) {
1927 		SCTP_INP_DECR_REF(inp);
1928 	}
1929 out_no_decr:
1930 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1931 	    type);
1932 	CURVNET_RESTORE();
1933 }
1934 
1935 void
1936 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1937     struct sctp_nets *net)
1938 {
1939 	int to_ticks;
1940 	struct sctp_timer *tmr;
1941 
1942 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1943 		return;
1944 
1945 	to_ticks = 0;
1946 
1947 	tmr = NULL;
1948 	if (stcb) {
1949 		SCTP_TCB_LOCK_ASSERT(stcb);
1950 	}
1951 	switch (t_type) {
1952 	case SCTP_TIMER_TYPE_ZERO_COPY:
1953 		tmr = &inp->sctp_ep.zero_copy_timer;
1954 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1955 		break;
1956 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1957 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1958 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1959 		break;
1960 	case SCTP_TIMER_TYPE_ADDR_WQ:
1961 		/* Only 1 tick away :-) */
1962 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1963 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1964 		break;
1965 	case SCTP_TIMER_TYPE_ITERATOR:
1966 		{
1967 			struct sctp_iterator *it;
1968 
1969 			it = (struct sctp_iterator *)inp;
1970 			tmr = &it->tmr;
1971 			to_ticks = SCTP_ITERATOR_TICKS;
1972 		}
1973 		break;
1974 	case SCTP_TIMER_TYPE_SEND:
1975 		/* Here we use the RTO timer */
1976 		{
1977 			int rto_val;
1978 
1979 			if ((stcb == NULL) || (net == NULL)) {
1980 				return;
1981 			}
1982 			tmr = &net->rxt_timer;
1983 			if (net->RTO == 0) {
1984 				rto_val = stcb->asoc.initial_rto;
1985 			} else {
1986 				rto_val = net->RTO;
1987 			}
1988 			to_ticks = MSEC_TO_TICKS(rto_val);
1989 		}
1990 		break;
1991 	case SCTP_TIMER_TYPE_INIT:
1992 		/*
1993 		 * Here we use the INIT timer default usually about 1
1994 		 * minute.
1995 		 */
1996 		if ((stcb == NULL) || (net == NULL)) {
1997 			return;
1998 		}
1999 		tmr = &net->rxt_timer;
2000 		if (net->RTO == 0) {
2001 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2002 		} else {
2003 			to_ticks = MSEC_TO_TICKS(net->RTO);
2004 		}
2005 		break;
2006 	case SCTP_TIMER_TYPE_RECV:
2007 		/*
2008 		 * Here we use the Delayed-Ack timer value from the inp
2009 		 * ususually about 200ms.
2010 		 */
2011 		if (stcb == NULL) {
2012 			return;
2013 		}
2014 		tmr = &stcb->asoc.dack_timer;
2015 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2016 		break;
2017 	case SCTP_TIMER_TYPE_SHUTDOWN:
2018 		/* Here we use the RTO of the destination. */
2019 		if ((stcb == NULL) || (net == NULL)) {
2020 			return;
2021 		}
2022 		if (net->RTO == 0) {
2023 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2024 		} else {
2025 			to_ticks = MSEC_TO_TICKS(net->RTO);
2026 		}
2027 		tmr = &net->rxt_timer;
2028 		break;
2029 	case SCTP_TIMER_TYPE_HEARTBEAT:
2030 		/*
2031 		 * the net is used here so that we can add in the RTO. Even
2032 		 * though we use a different timer. We also add the HB timer
2033 		 * PLUS a random jitter.
2034 		 */
2035 		if ((inp == NULL) || (stcb == NULL)) {
2036 			return;
2037 		} else {
2038 			uint32_t rndval;
2039 			uint8_t this_random;
2040 			int cnt_of_unconf = 0;
2041 			struct sctp_nets *lnet;
2042 
2043 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2044 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2045 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2046 					cnt_of_unconf++;
2047 				}
2048 			}
2049 			if (cnt_of_unconf) {
2050 				net = lnet = NULL;
2051 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2052 			}
2053 			if (stcb->asoc.hb_random_idx > 3) {
2054 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2055 				memcpy(stcb->asoc.hb_random_values, &rndval,
2056 				    sizeof(stcb->asoc.hb_random_values));
2057 				stcb->asoc.hb_random_idx = 0;
2058 			}
2059 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2060 			stcb->asoc.hb_random_idx++;
2061 			stcb->asoc.hb_ect_randombit = 0;
2062 			/*
2063 			 * this_random will be 0 - 256 ms RTO is in ms.
2064 			 */
2065 			if ((stcb->asoc.hb_is_disabled) &&
2066 			    (cnt_of_unconf == 0)) {
2067 				return;
2068 			}
2069 			if (net) {
2070 				int delay;
2071 
2072 				delay = stcb->asoc.heart_beat_delay;
2073 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2074 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2075 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2076 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2077 						delay = 0;
2078 					}
2079 				}
2080 				if (net->RTO == 0) {
2081 					/* Never been checked */
2082 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2083 				} else {
2084 					/* set rto_val to the ms */
2085 					to_ticks = delay + net->RTO + this_random;
2086 				}
2087 			} else {
2088 				if (cnt_of_unconf) {
2089 					to_ticks = this_random + stcb->asoc.initial_rto;
2090 				} else {
2091 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2092 				}
2093 			}
2094 			/*
2095 			 * Now we must convert the to_ticks that are now in
2096 			 * ms to ticks.
2097 			 */
2098 			to_ticks = MSEC_TO_TICKS(to_ticks);
2099 			tmr = &stcb->asoc.hb_timer;
2100 		}
2101 		break;
2102 	case SCTP_TIMER_TYPE_COOKIE:
2103 		/*
2104 		 * Here we can use the RTO timer from the network since one
2105 		 * RTT was compelete. If a retran happened then we will be
2106 		 * using the RTO initial value.
2107 		 */
2108 		if ((stcb == NULL) || (net == NULL)) {
2109 			return;
2110 		}
2111 		if (net->RTO == 0) {
2112 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2113 		} else {
2114 			to_ticks = MSEC_TO_TICKS(net->RTO);
2115 		}
2116 		tmr = &net->rxt_timer;
2117 		break;
2118 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2119 		/*
2120 		 * nothing needed but the endpoint here ususually about 60
2121 		 * minutes.
2122 		 */
2123 		if (inp == NULL) {
2124 			return;
2125 		}
2126 		tmr = &inp->sctp_ep.signature_change;
2127 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2128 		break;
2129 	case SCTP_TIMER_TYPE_ASOCKILL:
2130 		if (stcb == NULL) {
2131 			return;
2132 		}
2133 		tmr = &stcb->asoc.strreset_timer;
2134 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2135 		break;
2136 	case SCTP_TIMER_TYPE_INPKILL:
2137 		/*
2138 		 * The inp is setup to die. We re-use the signature_chage
2139 		 * timer since that has stopped and we are in the GONE
2140 		 * state.
2141 		 */
2142 		if (inp == NULL) {
2143 			return;
2144 		}
2145 		tmr = &inp->sctp_ep.signature_change;
2146 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2147 		break;
2148 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2149 		/*
2150 		 * Here we use the value found in the EP for PMTU ususually
2151 		 * about 10 minutes.
2152 		 */
2153 		if ((stcb == NULL) || (inp == NULL)) {
2154 			return;
2155 		}
2156 		if (net == NULL) {
2157 			return;
2158 		}
2159 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2160 		tmr = &net->pmtu_timer;
2161 		break;
2162 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2163 		/* Here we use the RTO of the destination */
2164 		if ((stcb == NULL) || (net == NULL)) {
2165 			return;
2166 		}
2167 		if (net->RTO == 0) {
2168 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2169 		} else {
2170 			to_ticks = MSEC_TO_TICKS(net->RTO);
2171 		}
2172 		tmr = &net->rxt_timer;
2173 		break;
2174 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2175 		/*
2176 		 * Here we use the endpoints shutdown guard timer usually
2177 		 * about 3 minutes.
2178 		 */
2179 		if ((inp == NULL) || (stcb == NULL)) {
2180 			return;
2181 		}
2182 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2183 		tmr = &stcb->asoc.shut_guard_timer;
2184 		break;
2185 	case SCTP_TIMER_TYPE_STRRESET:
2186 		/*
2187 		 * Here the timer comes from the stcb but its value is from
2188 		 * the net's RTO.
2189 		 */
2190 		if ((stcb == NULL) || (net == NULL)) {
2191 			return;
2192 		}
2193 		if (net->RTO == 0) {
2194 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2195 		} else {
2196 			to_ticks = MSEC_TO_TICKS(net->RTO);
2197 		}
2198 		tmr = &stcb->asoc.strreset_timer;
2199 		break;
2200 
2201 	case SCTP_TIMER_TYPE_EARLYFR:
2202 		{
2203 			unsigned int msec;
2204 
2205 			if ((stcb == NULL) || (net == NULL)) {
2206 				return;
2207 			}
2208 			if (net->flight_size > net->cwnd) {
2209 				/* no need to start */
2210 				return;
2211 			}
2212 			SCTP_STAT_INCR(sctps_earlyfrstart);
2213 			if (net->lastsa == 0) {
2214 				/* Hmm no rtt estimate yet? */
2215 				msec = stcb->asoc.initial_rto >> 2;
2216 			} else {
2217 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2218 			}
2219 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2220 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2221 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2222 					msec = SCTP_MINFR_MSEC_FLOOR;
2223 				}
2224 			}
2225 			to_ticks = MSEC_TO_TICKS(msec);
2226 			tmr = &net->fr_timer;
2227 		}
2228 		break;
2229 	case SCTP_TIMER_TYPE_ASCONF:
2230 		/*
2231 		 * Here the timer comes from the stcb but its value is from
2232 		 * the net's RTO.
2233 		 */
2234 		if ((stcb == NULL) || (net == NULL)) {
2235 			return;
2236 		}
2237 		if (net->RTO == 0) {
2238 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2239 		} else {
2240 			to_ticks = MSEC_TO_TICKS(net->RTO);
2241 		}
2242 		tmr = &stcb->asoc.asconf_timer;
2243 		break;
2244 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2245 		if ((stcb == NULL) || (net != NULL)) {
2246 			return;
2247 		}
2248 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2249 		tmr = &stcb->asoc.delete_prim_timer;
2250 		break;
2251 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2252 		if (stcb == NULL) {
2253 			return;
2254 		}
2255 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2256 			/*
2257 			 * Really an error since stcb is NOT set to
2258 			 * autoclose
2259 			 */
2260 			return;
2261 		}
2262 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2263 		tmr = &stcb->asoc.autoclose_timer;
2264 		break;
2265 	default:
2266 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2267 		    __FUNCTION__, t_type);
2268 		return;
2269 		break;
2270 	};
2271 	if ((to_ticks <= 0) || (tmr == NULL)) {
2272 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2273 		    __FUNCTION__, t_type, to_ticks, tmr);
2274 		return;
2275 	}
2276 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2277 		/*
2278 		 * we do NOT allow you to have it already running. if it is
2279 		 * we leave the current one up unchanged
2280 		 */
2281 		return;
2282 	}
2283 	/* At this point we can proceed */
2284 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2285 		stcb->asoc.num_send_timers_up++;
2286 	}
2287 	tmr->stopped_from = 0;
2288 	tmr->type = t_type;
2289 	tmr->ep = (void *)inp;
2290 	tmr->tcb = (void *)stcb;
2291 	tmr->net = (void *)net;
2292 	tmr->self = (void *)tmr;
2293 	tmr->vnet = (void *)curvnet;
2294 	tmr->ticks = sctp_get_tick_count();
2295 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2296 	return;
2297 }
2298 
2299 void
2300 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2301     struct sctp_nets *net, uint32_t from)
2302 {
2303 	struct sctp_timer *tmr;
2304 
2305 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2306 	    (inp == NULL))
2307 		return;
2308 
2309 	tmr = NULL;
2310 	if (stcb) {
2311 		SCTP_TCB_LOCK_ASSERT(stcb);
2312 	}
2313 	switch (t_type) {
2314 	case SCTP_TIMER_TYPE_ZERO_COPY:
2315 		tmr = &inp->sctp_ep.zero_copy_timer;
2316 		break;
2317 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2318 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2319 		break;
2320 	case SCTP_TIMER_TYPE_ADDR_WQ:
2321 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2322 		break;
2323 	case SCTP_TIMER_TYPE_EARLYFR:
2324 		if ((stcb == NULL) || (net == NULL)) {
2325 			return;
2326 		}
2327 		tmr = &net->fr_timer;
2328 		SCTP_STAT_INCR(sctps_earlyfrstop);
2329 		break;
2330 	case SCTP_TIMER_TYPE_ITERATOR:
2331 		{
2332 			struct sctp_iterator *it;
2333 
2334 			it = (struct sctp_iterator *)inp;
2335 			tmr = &it->tmr;
2336 		}
2337 		break;
2338 	case SCTP_TIMER_TYPE_SEND:
2339 		if ((stcb == NULL) || (net == NULL)) {
2340 			return;
2341 		}
2342 		tmr = &net->rxt_timer;
2343 		break;
2344 	case SCTP_TIMER_TYPE_INIT:
2345 		if ((stcb == NULL) || (net == NULL)) {
2346 			return;
2347 		}
2348 		tmr = &net->rxt_timer;
2349 		break;
2350 	case SCTP_TIMER_TYPE_RECV:
2351 		if (stcb == NULL) {
2352 			return;
2353 		}
2354 		tmr = &stcb->asoc.dack_timer;
2355 		break;
2356 	case SCTP_TIMER_TYPE_SHUTDOWN:
2357 		if ((stcb == NULL) || (net == NULL)) {
2358 			return;
2359 		}
2360 		tmr = &net->rxt_timer;
2361 		break;
2362 	case SCTP_TIMER_TYPE_HEARTBEAT:
2363 		if (stcb == NULL) {
2364 			return;
2365 		}
2366 		tmr = &stcb->asoc.hb_timer;
2367 		break;
2368 	case SCTP_TIMER_TYPE_COOKIE:
2369 		if ((stcb == NULL) || (net == NULL)) {
2370 			return;
2371 		}
2372 		tmr = &net->rxt_timer;
2373 		break;
2374 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2375 		/* nothing needed but the endpoint here */
2376 		tmr = &inp->sctp_ep.signature_change;
2377 		/*
2378 		 * We re-use the newcookie timer for the INP kill timer. We
2379 		 * must assure that we do not kill it by accident.
2380 		 */
2381 		break;
2382 	case SCTP_TIMER_TYPE_ASOCKILL:
2383 		/*
2384 		 * Stop the asoc kill timer.
2385 		 */
2386 		if (stcb == NULL) {
2387 			return;
2388 		}
2389 		tmr = &stcb->asoc.strreset_timer;
2390 		break;
2391 
2392 	case SCTP_TIMER_TYPE_INPKILL:
2393 		/*
2394 		 * The inp is setup to die. We re-use the signature_chage
2395 		 * timer since that has stopped and we are in the GONE
2396 		 * state.
2397 		 */
2398 		tmr = &inp->sctp_ep.signature_change;
2399 		break;
2400 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2401 		if ((stcb == NULL) || (net == NULL)) {
2402 			return;
2403 		}
2404 		tmr = &net->pmtu_timer;
2405 		break;
2406 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2407 		if ((stcb == NULL) || (net == NULL)) {
2408 			return;
2409 		}
2410 		tmr = &net->rxt_timer;
2411 		break;
2412 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2413 		if (stcb == NULL) {
2414 			return;
2415 		}
2416 		tmr = &stcb->asoc.shut_guard_timer;
2417 		break;
2418 	case SCTP_TIMER_TYPE_STRRESET:
2419 		if (stcb == NULL) {
2420 			return;
2421 		}
2422 		tmr = &stcb->asoc.strreset_timer;
2423 		break;
2424 	case SCTP_TIMER_TYPE_ASCONF:
2425 		if (stcb == NULL) {
2426 			return;
2427 		}
2428 		tmr = &stcb->asoc.asconf_timer;
2429 		break;
2430 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2431 		if (stcb == NULL) {
2432 			return;
2433 		}
2434 		tmr = &stcb->asoc.delete_prim_timer;
2435 		break;
2436 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2437 		if (stcb == NULL) {
2438 			return;
2439 		}
2440 		tmr = &stcb->asoc.autoclose_timer;
2441 		break;
2442 	default:
2443 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2444 		    __FUNCTION__, t_type);
2445 		break;
2446 	};
2447 	if (tmr == NULL) {
2448 		return;
2449 	}
2450 	if ((tmr->type != t_type) && tmr->type) {
2451 		/*
2452 		 * Ok we have a timer that is under joint use. Cookie timer
2453 		 * per chance with the SEND timer. We therefore are NOT
2454 		 * running the timer that the caller wants stopped.  So just
2455 		 * return.
2456 		 */
2457 		return;
2458 	}
2459 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2460 		stcb->asoc.num_send_timers_up--;
2461 		if (stcb->asoc.num_send_timers_up < 0) {
2462 			stcb->asoc.num_send_timers_up = 0;
2463 		}
2464 	}
2465 	tmr->self = NULL;
2466 	tmr->stopped_from = from;
2467 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2468 	return;
2469 }
2470 
2471 uint32_t
2472 sctp_calculate_len(struct mbuf *m)
2473 {
2474 	uint32_t tlen = 0;
2475 	struct mbuf *at;
2476 
2477 	at = m;
2478 	while (at) {
2479 		tlen += SCTP_BUF_LEN(at);
2480 		at = SCTP_BUF_NEXT(at);
2481 	}
2482 	return (tlen);
2483 }
2484 
2485 void
2486 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2487     struct sctp_association *asoc, uint32_t mtu)
2488 {
2489 	/*
2490 	 * Reset the P-MTU size on this association, this involves changing
2491 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2492 	 * allow the DF flag to be cleared.
2493 	 */
2494 	struct sctp_tmit_chunk *chk;
2495 	unsigned int eff_mtu, ovh;
2496 
2497 #ifdef SCTP_PRINT_FOR_B_AND_M
2498 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2499 	    inp, asoc, mtu);
2500 #endif
2501 	asoc->smallest_mtu = mtu;
2502 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2503 		ovh = SCTP_MIN_OVERHEAD;
2504 	} else {
2505 		ovh = SCTP_MIN_V4_OVERHEAD;
2506 	}
2507 	eff_mtu = mtu - ovh;
2508 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2509 
2510 		if (chk->send_size > eff_mtu) {
2511 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2512 		}
2513 	}
2514 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2515 		if (chk->send_size > eff_mtu) {
2516 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2517 		}
2518 	}
2519 }
2520 
2521 
2522 /*
2523  * given an association and starting time of the current RTT period return
2524  * RTO in number of msecs net should point to the current network
2525  */
2526 uint32_t
2527 sctp_calculate_rto(struct sctp_tcb *stcb,
2528     struct sctp_association *asoc,
2529     struct sctp_nets *net,
2530     struct timeval *told,
2531     int safe)
2532 {
2533 	/*-
2534 	 * given an association and the starting time of the current RTT
2535 	 * period (in value1/value2) return RTO in number of msecs.
2536 	 */
2537 	int calc_time = 0;
2538 	int o_calctime;
2539 	uint32_t new_rto = 0;
2540 	int first_measure = 0;
2541 	struct timeval now, then, *old;
2542 
2543 	/* Copy it out for sparc64 */
2544 	if (safe == sctp_align_unsafe_makecopy) {
2545 		old = &then;
2546 		memcpy(&then, told, sizeof(struct timeval));
2547 	} else if (safe == sctp_align_safe_nocopy) {
2548 		old = told;
2549 	} else {
2550 		/* error */
2551 		SCTP_PRINTF("Huh, bad rto calc call\n");
2552 		return (0);
2553 	}
2554 	/************************/
2555 	/* 1. calculate new RTT */
2556 	/************************/
2557 	/* get the current time */
2558 	(void)SCTP_GETTIME_TIMEVAL(&now);
2559 	/* compute the RTT value */
2560 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2561 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2562 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2563 			calc_time += (((u_long)now.tv_usec -
2564 			    (u_long)old->tv_usec) / 1000);
2565 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2566 			/* Borrow 1,000ms from current calculation */
2567 			calc_time -= 1000;
2568 			/* Add in the slop over */
2569 			calc_time += ((int)now.tv_usec / 1000);
2570 			/* Add in the pre-second ms's */
2571 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2572 		}
2573 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2574 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2575 			calc_time = ((u_long)now.tv_usec -
2576 			    (u_long)old->tv_usec) / 1000;
2577 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2578 			/* impossible .. garbage in nothing out */
2579 			goto calc_rto;
2580 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2581 			/*
2582 			 * We have to have 1 usec :-D this must be the
2583 			 * loopback.
2584 			 */
2585 			calc_time = 1;
2586 		} else {
2587 			/* impossible .. garbage in nothing out */
2588 			goto calc_rto;
2589 		}
2590 	} else {
2591 		/* Clock wrapped? */
2592 		goto calc_rto;
2593 	}
2594 	/***************************/
2595 	/* 2. update RTTVAR & SRTT */
2596 	/***************************/
2597 	net->rtt = o_calctime = calc_time;
2598 	/* this is Van Jacobson's integer version */
2599 	if (net->RTO_measured) {
2600 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2601 								 * shift=3 */
2602 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2603 			rto_logging(net, SCTP_LOG_RTTVAR);
2604 		}
2605 		net->prev_rtt = o_calctime;
2606 		net->lastsa += calc_time;	/* add 7/8th into sa when
2607 						 * shift=3 */
2608 		if (calc_time < 0) {
2609 			calc_time = -calc_time;
2610 		}
2611 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2612 									 * VAR shift=2 */
2613 		net->lastsv += calc_time;
2614 		if (net->lastsv == 0) {
2615 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2616 		}
2617 	} else {
2618 		/* First RTO measurment */
2619 		net->RTO_measured = 1;
2620 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2621 								 * shift=3 */
2622 		net->lastsv = calc_time;
2623 		if (net->lastsv == 0) {
2624 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2625 		}
2626 		first_measure = 1;
2627 		net->prev_rtt = o_calctime;
2628 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2629 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2630 		}
2631 	}
2632 calc_rto:
2633 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2634 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2635 	    (stcb->asoc.sat_network_lockout == 0)) {
2636 		stcb->asoc.sat_network = 1;
2637 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2638 		stcb->asoc.sat_network = 0;
2639 		stcb->asoc.sat_network_lockout = 1;
2640 	}
2641 	/* bound it, per C6/C7 in Section 5.3.1 */
2642 	if (new_rto < stcb->asoc.minrto) {
2643 		new_rto = stcb->asoc.minrto;
2644 	}
2645 	if (new_rto > stcb->asoc.maxrto) {
2646 		new_rto = stcb->asoc.maxrto;
2647 	}
2648 	/* we are now returning the RTO */
2649 	return (new_rto);
2650 }
2651 
2652 /*
2653  * return a pointer to a contiguous piece of data from the given mbuf chain
2654  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2655  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2656  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2657  */
2658 caddr_t
2659 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2660 {
2661 	uint32_t count;
2662 	uint8_t *ptr;
2663 
2664 	ptr = in_ptr;
2665 	if ((off < 0) || (len <= 0))
2666 		return (NULL);
2667 
2668 	/* find the desired start location */
2669 	while ((m != NULL) && (off > 0)) {
2670 		if (off < SCTP_BUF_LEN(m))
2671 			break;
2672 		off -= SCTP_BUF_LEN(m);
2673 		m = SCTP_BUF_NEXT(m);
2674 	}
2675 	if (m == NULL)
2676 		return (NULL);
2677 
2678 	/* is the current mbuf large enough (eg. contiguous)? */
2679 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2680 		return (mtod(m, caddr_t)+off);
2681 	} else {
2682 		/* else, it spans more than one mbuf, so save a temp copy... */
2683 		while ((m != NULL) && (len > 0)) {
2684 			count = min(SCTP_BUF_LEN(m) - off, len);
2685 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2686 			len -= count;
2687 			ptr += count;
2688 			off = 0;
2689 			m = SCTP_BUF_NEXT(m);
2690 		}
2691 		if ((m == NULL) && (len > 0))
2692 			return (NULL);
2693 		else
2694 			return ((caddr_t)in_ptr);
2695 	}
2696 }
2697 
2698 
2699 
2700 struct sctp_paramhdr *
2701 sctp_get_next_param(struct mbuf *m,
2702     int offset,
2703     struct sctp_paramhdr *pull,
2704     int pull_limit)
2705 {
2706 	/* This just provides a typed signature to Peter's Pull routine */
2707 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2708 	    (uint8_t *) pull));
2709 }
2710 
2711 
2712 int
2713 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2714 {
2715 	/*
2716 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2717 	 * padlen is > 3 this routine will fail.
2718 	 */
2719 	uint8_t *dp;
2720 	int i;
2721 
2722 	if (padlen > 3) {
2723 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2724 		return (ENOBUFS);
2725 	}
2726 	if (padlen <= M_TRAILINGSPACE(m)) {
2727 		/*
2728 		 * The easy way. We hope the majority of the time we hit
2729 		 * here :)
2730 		 */
2731 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2732 		SCTP_BUF_LEN(m) += padlen;
2733 	} else {
2734 		/* Hard way we must grow the mbuf */
2735 		struct mbuf *tmp;
2736 
2737 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2738 		if (tmp == NULL) {
2739 			/* Out of space GAK! we are in big trouble. */
2740 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2741 			return (ENOSPC);
2742 		}
2743 		/* setup and insert in middle */
2744 		SCTP_BUF_LEN(tmp) = padlen;
2745 		SCTP_BUF_NEXT(tmp) = NULL;
2746 		SCTP_BUF_NEXT(m) = tmp;
2747 		dp = mtod(tmp, uint8_t *);
2748 	}
2749 	/* zero out the pad */
2750 	for (i = 0; i < padlen; i++) {
2751 		*dp = 0;
2752 		dp++;
2753 	}
2754 	return (0);
2755 }
2756 
2757 int
2758 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2759 {
2760 	/* find the last mbuf in chain and pad it */
2761 	struct mbuf *m_at;
2762 
2763 	m_at = m;
2764 	if (last_mbuf) {
2765 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2766 	} else {
2767 		while (m_at) {
2768 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2769 				return (sctp_add_pad_tombuf(m_at, padval));
2770 			}
2771 			m_at = SCTP_BUF_NEXT(m_at);
2772 		}
2773 	}
2774 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2775 	return (EFAULT);
2776 }
2777 
2778 static void
2779 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2780     uint32_t error, void *data, int so_locked
2781 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2782     SCTP_UNUSED
2783 #endif
2784 )
2785 {
2786 	struct mbuf *m_notify;
2787 	struct sctp_assoc_change *sac;
2788 	struct sctp_queued_to_read *control;
2789 
2790 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2791 	struct socket *so;
2792 
2793 #endif
2794 
2795 	/*
2796 	 * For TCP model AND UDP connected sockets we will send an error up
2797 	 * when an ABORT comes in.
2798 	 */
2799 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2800 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2801 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2802 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2803 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2804 			stcb->sctp_socket->so_error = ECONNREFUSED;
2805 		} else {
2806 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2807 			stcb->sctp_socket->so_error = ECONNRESET;
2808 		}
2809 		/* Wake ANY sleepers */
2810 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2811 		so = SCTP_INP_SO(stcb->sctp_ep);
2812 		if (!so_locked) {
2813 			atomic_add_int(&stcb->asoc.refcnt, 1);
2814 			SCTP_TCB_UNLOCK(stcb);
2815 			SCTP_SOCKET_LOCK(so, 1);
2816 			SCTP_TCB_LOCK(stcb);
2817 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2818 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2819 				SCTP_SOCKET_UNLOCK(so, 1);
2820 				return;
2821 			}
2822 		}
2823 #endif
2824 		sorwakeup(stcb->sctp_socket);
2825 		sowwakeup(stcb->sctp_socket);
2826 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2827 		if (!so_locked) {
2828 			SCTP_SOCKET_UNLOCK(so, 1);
2829 		}
2830 #endif
2831 	}
2832 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2833 		/* event not enabled */
2834 		return;
2835 	}
2836 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2837 	if (m_notify == NULL)
2838 		/* no space left */
2839 		return;
2840 	SCTP_BUF_LEN(m_notify) = 0;
2841 
2842 	sac = mtod(m_notify, struct sctp_assoc_change *);
2843 	sac->sac_type = SCTP_ASSOC_CHANGE;
2844 	sac->sac_flags = 0;
2845 	sac->sac_length = sizeof(struct sctp_assoc_change);
2846 	sac->sac_state = event;
2847 	sac->sac_error = error;
2848 	/* XXX verify these stream counts */
2849 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2850 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2851 	sac->sac_assoc_id = sctp_get_associd(stcb);
2852 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2853 	SCTP_BUF_NEXT(m_notify) = NULL;
2854 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2855 	    0, 0, 0, 0, 0, 0,
2856 	    m_notify);
2857 	if (control == NULL) {
2858 		/* no memory */
2859 		sctp_m_freem(m_notify);
2860 		return;
2861 	}
2862 	control->length = SCTP_BUF_LEN(m_notify);
2863 	/* not that we need this */
2864 	control->tail_mbuf = m_notify;
2865 	control->spec_flags = M_NOTIFICATION;
2866 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2867 	    control,
2868 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2869 	    so_locked);
2870 	if (event == SCTP_COMM_LOST) {
2871 		/* Wake up any sleeper */
2872 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2873 		so = SCTP_INP_SO(stcb->sctp_ep);
2874 		if (!so_locked) {
2875 			atomic_add_int(&stcb->asoc.refcnt, 1);
2876 			SCTP_TCB_UNLOCK(stcb);
2877 			SCTP_SOCKET_LOCK(so, 1);
2878 			SCTP_TCB_LOCK(stcb);
2879 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2880 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2881 				SCTP_SOCKET_UNLOCK(so, 1);
2882 				return;
2883 			}
2884 		}
2885 #endif
2886 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2887 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2888 		if (!so_locked) {
2889 			SCTP_SOCKET_UNLOCK(so, 1);
2890 		}
2891 #endif
2892 	}
2893 }
2894 
2895 static void
2896 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2897     struct sockaddr *sa, uint32_t error)
2898 {
2899 	struct mbuf *m_notify;
2900 	struct sctp_paddr_change *spc;
2901 	struct sctp_queued_to_read *control;
2902 
2903 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2904 		/* event not enabled */
2905 		return;
2906 	}
2907 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2908 	if (m_notify == NULL)
2909 		return;
2910 	SCTP_BUF_LEN(m_notify) = 0;
2911 	spc = mtod(m_notify, struct sctp_paddr_change *);
2912 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2913 	spc->spc_flags = 0;
2914 	spc->spc_length = sizeof(struct sctp_paddr_change);
2915 	switch (sa->sa_family) {
2916 	case AF_INET:
2917 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2918 		break;
2919 #ifdef INET6
2920 	case AF_INET6:
2921 		{
2922 			struct sockaddr_in6 *sin6;
2923 
2924 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2925 
2926 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2927 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2928 				if (sin6->sin6_scope_id == 0) {
2929 					/* recover scope_id for user */
2930 					(void)sa6_recoverscope(sin6);
2931 				} else {
2932 					/* clear embedded scope_id for user */
2933 					in6_clearscope(&sin6->sin6_addr);
2934 				}
2935 			}
2936 			break;
2937 		}
2938 #endif
2939 	default:
2940 		/* TSNH */
2941 		break;
2942 	}
2943 	spc->spc_state = state;
2944 	spc->spc_error = error;
2945 	spc->spc_assoc_id = sctp_get_associd(stcb);
2946 
2947 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2948 	SCTP_BUF_NEXT(m_notify) = NULL;
2949 
2950 	/* append to socket */
2951 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2952 	    0, 0, 0, 0, 0, 0,
2953 	    m_notify);
2954 	if (control == NULL) {
2955 		/* no memory */
2956 		sctp_m_freem(m_notify);
2957 		return;
2958 	}
2959 	control->length = SCTP_BUF_LEN(m_notify);
2960 	control->spec_flags = M_NOTIFICATION;
2961 	/* not that we need this */
2962 	control->tail_mbuf = m_notify;
2963 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2964 	    control,
2965 	    &stcb->sctp_socket->so_rcv, 1,
2966 	    SCTP_READ_LOCK_NOT_HELD,
2967 	    SCTP_SO_NOT_LOCKED);
2968 }
2969 
2970 
2971 static void
2972 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2973     struct sctp_tmit_chunk *chk, int so_locked
2974 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2975     SCTP_UNUSED
2976 #endif
2977 )
2978 {
2979 	struct mbuf *m_notify;
2980 	struct sctp_send_failed *ssf;
2981 	struct sctp_queued_to_read *control;
2982 	int length;
2983 
2984 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2985 		/* event not enabled */
2986 		return;
2987 	}
2988 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2989 	if (m_notify == NULL)
2990 		/* no space left */
2991 		return;
2992 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2993 	length -= sizeof(struct sctp_data_chunk);
2994 	SCTP_BUF_LEN(m_notify) = 0;
2995 	ssf = mtod(m_notify, struct sctp_send_failed *);
2996 	ssf->ssf_type = SCTP_SEND_FAILED;
2997 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2998 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2999 	else
3000 		ssf->ssf_flags = SCTP_DATA_SENT;
3001 	ssf->ssf_length = length;
3002 	ssf->ssf_error = error;
3003 	/* not exactly what the user sent in, but should be close :) */
3004 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3005 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3006 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3007 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3008 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3009 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3010 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3011 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3012 
3013 	if (chk->data) {
3014 		/*
3015 		 * trim off the sctp chunk header(it should be there)
3016 		 */
3017 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3018 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3019 			sctp_mbuf_crush(chk->data);
3020 			chk->send_size -= sizeof(struct sctp_data_chunk);
3021 		}
3022 	}
3023 	SCTP_BUF_NEXT(m_notify) = chk->data;
3024 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3025 	/* Steal off the mbuf */
3026 	chk->data = NULL;
3027 	/*
3028 	 * For this case, we check the actual socket buffer, since the assoc
3029 	 * is going away we don't want to overfill the socket buffer for a
3030 	 * non-reader
3031 	 */
3032 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3033 		sctp_m_freem(m_notify);
3034 		return;
3035 	}
3036 	/* append to socket */
3037 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3038 	    0, 0, 0, 0, 0, 0,
3039 	    m_notify);
3040 	if (control == NULL) {
3041 		/* no memory */
3042 		sctp_m_freem(m_notify);
3043 		return;
3044 	}
3045 	control->spec_flags = M_NOTIFICATION;
3046 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3047 	    control,
3048 	    &stcb->sctp_socket->so_rcv, 1,
3049 	    SCTP_READ_LOCK_NOT_HELD,
3050 	    so_locked);
3051 }
3052 
3053 
3054 static void
3055 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3056     struct sctp_stream_queue_pending *sp, int so_locked
3057 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3058     SCTP_UNUSED
3059 #endif
3060 )
3061 {
3062 	struct mbuf *m_notify;
3063 	struct sctp_send_failed *ssf;
3064 	struct sctp_queued_to_read *control;
3065 	int length;
3066 
3067 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3068 		/* event not enabled */
3069 		return;
3070 	}
3071 	length = sizeof(struct sctp_send_failed) + sp->length;
3072 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3073 	if (m_notify == NULL)
3074 		/* no space left */
3075 		return;
3076 	SCTP_BUF_LEN(m_notify) = 0;
3077 	ssf = mtod(m_notify, struct sctp_send_failed *);
3078 	ssf->ssf_type = SCTP_SEND_FAILED;
3079 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3080 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3081 	else
3082 		ssf->ssf_flags = SCTP_DATA_SENT;
3083 	ssf->ssf_length = length;
3084 	ssf->ssf_error = error;
3085 	/* not exactly what the user sent in, but should be close :) */
3086 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3087 	ssf->ssf_info.sinfo_stream = sp->stream;
3088 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3089 	if (sp->some_taken) {
3090 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3091 	} else {
3092 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3093 	}
3094 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3095 	ssf->ssf_info.sinfo_context = sp->context;
3096 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3097 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3098 	SCTP_BUF_NEXT(m_notify) = sp->data;
3099 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3100 
3101 	/* Steal off the mbuf */
3102 	sp->data = NULL;
3103 	/*
3104 	 * For this case, we check the actual socket buffer, since the assoc
3105 	 * is going away we don't want to overfill the socket buffer for a
3106 	 * non-reader
3107 	 */
3108 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3109 		sctp_m_freem(m_notify);
3110 		return;
3111 	}
3112 	/* append to socket */
3113 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3114 	    0, 0, 0, 0, 0, 0,
3115 	    m_notify);
3116 	if (control == NULL) {
3117 		/* no memory */
3118 		sctp_m_freem(m_notify);
3119 		return;
3120 	}
3121 	control->spec_flags = M_NOTIFICATION;
3122 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3123 	    control,
3124 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3125 }
3126 
3127 
3128 
3129 static void
3130 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3131     uint32_t error)
3132 {
3133 	struct mbuf *m_notify;
3134 	struct sctp_adaptation_event *sai;
3135 	struct sctp_queued_to_read *control;
3136 
3137 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3138 		/* event not enabled */
3139 		return;
3140 	}
3141 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3142 	if (m_notify == NULL)
3143 		/* no space left */
3144 		return;
3145 	SCTP_BUF_LEN(m_notify) = 0;
3146 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3147 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3148 	sai->sai_flags = 0;
3149 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3150 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3151 	sai->sai_assoc_id = sctp_get_associd(stcb);
3152 
3153 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3154 	SCTP_BUF_NEXT(m_notify) = NULL;
3155 
3156 	/* append to socket */
3157 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3158 	    0, 0, 0, 0, 0, 0,
3159 	    m_notify);
3160 	if (control == NULL) {
3161 		/* no memory */
3162 		sctp_m_freem(m_notify);
3163 		return;
3164 	}
3165 	control->length = SCTP_BUF_LEN(m_notify);
3166 	control->spec_flags = M_NOTIFICATION;
3167 	/* not that we need this */
3168 	control->tail_mbuf = m_notify;
3169 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3170 	    control,
3171 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3172 }
3173 
3174 /* This always must be called with the read-queue LOCKED in the INP */
3175 static void
3176 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3177     uint32_t val, int so_locked
3178 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3179     SCTP_UNUSED
3180 #endif
3181 )
3182 {
3183 	struct mbuf *m_notify;
3184 	struct sctp_pdapi_event *pdapi;
3185 	struct sctp_queued_to_read *control;
3186 	struct sockbuf *sb;
3187 
3188 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3189 		/* event not enabled */
3190 		return;
3191 	}
3192 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3193 	if (m_notify == NULL)
3194 		/* no space left */
3195 		return;
3196 	SCTP_BUF_LEN(m_notify) = 0;
3197 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3198 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3199 	pdapi->pdapi_flags = 0;
3200 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3201 	pdapi->pdapi_indication = error;
3202 	pdapi->pdapi_stream = (val >> 16);
3203 	pdapi->pdapi_seq = (val & 0x0000ffff);
3204 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3205 
3206 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3207 	SCTP_BUF_NEXT(m_notify) = NULL;
3208 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3209 	    0, 0, 0, 0, 0, 0,
3210 	    m_notify);
3211 	if (control == NULL) {
3212 		/* no memory */
3213 		sctp_m_freem(m_notify);
3214 		return;
3215 	}
3216 	control->spec_flags = M_NOTIFICATION;
3217 	control->length = SCTP_BUF_LEN(m_notify);
3218 	/* not that we need this */
3219 	control->tail_mbuf = m_notify;
3220 	control->held_length = 0;
3221 	control->length = 0;
3222 	sb = &stcb->sctp_socket->so_rcv;
3223 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3224 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3225 	}
3226 	sctp_sballoc(stcb, sb, m_notify);
3227 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3228 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3229 	}
3230 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3231 	control->end_added = 1;
3232 	if (stcb->asoc.control_pdapi)
3233 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3234 	else {
3235 		/* we really should not see this case */
3236 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3237 	}
3238 	if (stcb->sctp_ep && stcb->sctp_socket) {
3239 		/* This should always be the case */
3240 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3241 		struct socket *so;
3242 
3243 		so = SCTP_INP_SO(stcb->sctp_ep);
3244 		if (!so_locked) {
3245 			atomic_add_int(&stcb->asoc.refcnt, 1);
3246 			SCTP_TCB_UNLOCK(stcb);
3247 			SCTP_SOCKET_LOCK(so, 1);
3248 			SCTP_TCB_LOCK(stcb);
3249 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3250 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3251 				SCTP_SOCKET_UNLOCK(so, 1);
3252 				return;
3253 			}
3254 		}
3255 #endif
3256 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3257 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3258 		if (!so_locked) {
3259 			SCTP_SOCKET_UNLOCK(so, 1);
3260 		}
3261 #endif
3262 	}
3263 }
3264 
3265 static void
3266 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3267 {
3268 	struct mbuf *m_notify;
3269 	struct sctp_shutdown_event *sse;
3270 	struct sctp_queued_to_read *control;
3271 
3272 	/*
3273 	 * For TCP model AND UDP connected sockets we will send an error up
3274 	 * when an SHUTDOWN completes
3275 	 */
3276 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3277 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3278 		/* mark socket closed for read/write and wakeup! */
3279 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3280 		struct socket *so;
3281 
3282 		so = SCTP_INP_SO(stcb->sctp_ep);
3283 		atomic_add_int(&stcb->asoc.refcnt, 1);
3284 		SCTP_TCB_UNLOCK(stcb);
3285 		SCTP_SOCKET_LOCK(so, 1);
3286 		SCTP_TCB_LOCK(stcb);
3287 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3288 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3289 			SCTP_SOCKET_UNLOCK(so, 1);
3290 			return;
3291 		}
3292 #endif
3293 		socantsendmore(stcb->sctp_socket);
3294 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3295 		SCTP_SOCKET_UNLOCK(so, 1);
3296 #endif
3297 	}
3298 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3299 		/* event not enabled */
3300 		return;
3301 	}
3302 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3303 	if (m_notify == NULL)
3304 		/* no space left */
3305 		return;
3306 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3307 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3308 	sse->sse_flags = 0;
3309 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3310 	sse->sse_assoc_id = sctp_get_associd(stcb);
3311 
3312 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3313 	SCTP_BUF_NEXT(m_notify) = NULL;
3314 
3315 	/* append to socket */
3316 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3317 	    0, 0, 0, 0, 0, 0,
3318 	    m_notify);
3319 	if (control == NULL) {
3320 		/* no memory */
3321 		sctp_m_freem(m_notify);
3322 		return;
3323 	}
3324 	control->spec_flags = M_NOTIFICATION;
3325 	control->length = SCTP_BUF_LEN(m_notify);
3326 	/* not that we need this */
3327 	control->tail_mbuf = m_notify;
3328 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3329 	    control,
3330 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3331 }
3332 
3333 static void
3334 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3335     int so_locked
3336 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3337     SCTP_UNUSED
3338 #endif
3339 )
3340 {
3341 	struct mbuf *m_notify;
3342 	struct sctp_sender_dry_event *event;
3343 	struct sctp_queued_to_read *control;
3344 
3345 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3346 		/* event not enabled */
3347 		return;
3348 	}
3349 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3350 	if (m_notify == NULL) {
3351 		/* no space left */
3352 		return;
3353 	}
3354 	SCTP_BUF_LEN(m_notify) = 0;
3355 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3356 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3357 	event->sender_dry_flags = 0;
3358 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3359 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3360 
3361 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3362 	SCTP_BUF_NEXT(m_notify) = NULL;
3363 
3364 	/* append to socket */
3365 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3366 	    0, 0, 0, 0, 0, 0, m_notify);
3367 	if (control == NULL) {
3368 		/* no memory */
3369 		sctp_m_freem(m_notify);
3370 		return;
3371 	}
3372 	control->length = SCTP_BUF_LEN(m_notify);
3373 	control->spec_flags = M_NOTIFICATION;
3374 	/* not that we need this */
3375 	control->tail_mbuf = m_notify;
3376 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3377 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3378 }
3379 
3380 
3381 static void
3382 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3383 {
3384 	struct mbuf *m_notify;
3385 	struct sctp_queued_to_read *control;
3386 	struct sctp_stream_reset_event *strreset;
3387 	int len;
3388 
3389 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3390 		/* event not enabled */
3391 		return;
3392 	}
3393 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3394 	if (m_notify == NULL)
3395 		/* no space left */
3396 		return;
3397 	SCTP_BUF_LEN(m_notify) = 0;
3398 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3399 	if (len > M_TRAILINGSPACE(m_notify)) {
3400 		/* never enough room */
3401 		sctp_m_freem(m_notify);
3402 		return;
3403 	}
3404 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3405 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3406 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3407 	strreset->strreset_length = len;
3408 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3409 	strreset->strreset_list[0] = number_entries;
3410 
3411 	SCTP_BUF_LEN(m_notify) = len;
3412 	SCTP_BUF_NEXT(m_notify) = NULL;
3413 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3414 		/* no space */
3415 		sctp_m_freem(m_notify);
3416 		return;
3417 	}
3418 	/* append to socket */
3419 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3420 	    0, 0, 0, 0, 0, 0,
3421 	    m_notify);
3422 	if (control == NULL) {
3423 		/* no memory */
3424 		sctp_m_freem(m_notify);
3425 		return;
3426 	}
3427 	control->spec_flags = M_NOTIFICATION;
3428 	control->length = SCTP_BUF_LEN(m_notify);
3429 	/* not that we need this */
3430 	control->tail_mbuf = m_notify;
3431 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3432 	    control,
3433 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3434 }
3435 
3436 
3437 static void
3438 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3439     int number_entries, uint16_t * list, int flag)
3440 {
3441 	struct mbuf *m_notify;
3442 	struct sctp_queued_to_read *control;
3443 	struct sctp_stream_reset_event *strreset;
3444 	int len;
3445 
3446 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3447 		/* event not enabled */
3448 		return;
3449 	}
3450 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3451 	if (m_notify == NULL)
3452 		/* no space left */
3453 		return;
3454 	SCTP_BUF_LEN(m_notify) = 0;
3455 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3456 	if (len > M_TRAILINGSPACE(m_notify)) {
3457 		/* never enough room */
3458 		sctp_m_freem(m_notify);
3459 		return;
3460 	}
3461 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3462 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3463 	if (number_entries == 0) {
3464 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3465 	} else {
3466 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3467 	}
3468 	strreset->strreset_length = len;
3469 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3470 	if (number_entries) {
3471 		int i;
3472 
3473 		for (i = 0; i < number_entries; i++) {
3474 			strreset->strreset_list[i] = ntohs(list[i]);
3475 		}
3476 	}
3477 	SCTP_BUF_LEN(m_notify) = len;
3478 	SCTP_BUF_NEXT(m_notify) = NULL;
3479 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3480 		/* no space */
3481 		sctp_m_freem(m_notify);
3482 		return;
3483 	}
3484 	/* append to socket */
3485 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3486 	    0, 0, 0, 0, 0, 0,
3487 	    m_notify);
3488 	if (control == NULL) {
3489 		/* no memory */
3490 		sctp_m_freem(m_notify);
3491 		return;
3492 	}
3493 	control->spec_flags = M_NOTIFICATION;
3494 	control->length = SCTP_BUF_LEN(m_notify);
3495 	/* not that we need this */
3496 	control->tail_mbuf = m_notify;
3497 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3498 	    control,
3499 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3500 }
3501 
3502 
3503 void
3504 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3505     uint32_t error, void *data, int so_locked
3506 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3507     SCTP_UNUSED
3508 #endif
3509 )
3510 {
3511 	if ((stcb == NULL) ||
3512 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3513 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3514 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3515 		/* If the socket is gone we are out of here */
3516 		return;
3517 	}
3518 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3519 		return;
3520 	}
3521 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3522 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3523 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3524 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3525 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3526 			/* Don't report these in front states */
3527 			return;
3528 		}
3529 	}
3530 	switch (notification) {
3531 	case SCTP_NOTIFY_ASSOC_UP:
3532 		if (stcb->asoc.assoc_up_sent == 0) {
3533 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3534 			stcb->asoc.assoc_up_sent = 1;
3535 		}
3536 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3537 			sctp_notify_adaptation_layer(stcb, error);
3538 		}
3539 		if (stcb->asoc.peer_supports_auth == 0) {
3540 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3541 			    NULL, so_locked);
3542 		}
3543 		break;
3544 	case SCTP_NOTIFY_ASSOC_DOWN:
3545 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3546 		break;
3547 	case SCTP_NOTIFY_INTERFACE_DOWN:
3548 		{
3549 			struct sctp_nets *net;
3550 
3551 			net = (struct sctp_nets *)data;
3552 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3553 			    (struct sockaddr *)&net->ro._l_addr, error);
3554 			break;
3555 		}
3556 	case SCTP_NOTIFY_INTERFACE_UP:
3557 		{
3558 			struct sctp_nets *net;
3559 
3560 			net = (struct sctp_nets *)data;
3561 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3562 			    (struct sockaddr *)&net->ro._l_addr, error);
3563 			break;
3564 		}
3565 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3566 		{
3567 			struct sctp_nets *net;
3568 
3569 			net = (struct sctp_nets *)data;
3570 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3571 			    (struct sockaddr *)&net->ro._l_addr, error);
3572 			break;
3573 		}
3574 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3575 		sctp_notify_send_failed2(stcb, error,
3576 		    (struct sctp_stream_queue_pending *)data, so_locked);
3577 		break;
3578 	case SCTP_NOTIFY_DG_FAIL:
3579 		sctp_notify_send_failed(stcb, error,
3580 		    (struct sctp_tmit_chunk *)data, so_locked);
3581 		break;
3582 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3583 		{
3584 			uint32_t val;
3585 
3586 			val = *((uint32_t *) data);
3587 
3588 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3589 			break;
3590 		}
3591 	case SCTP_NOTIFY_STRDATA_ERR:
3592 		break;
3593 	case SCTP_NOTIFY_ASSOC_ABORTED:
3594 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3595 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3596 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3597 		} else {
3598 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3599 		}
3600 		break;
3601 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3602 		break;
3603 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3604 		break;
3605 	case SCTP_NOTIFY_ASSOC_RESTART:
3606 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3607 		if (stcb->asoc.peer_supports_auth == 0) {
3608 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3609 			    NULL, so_locked);
3610 		}
3611 		break;
3612 	case SCTP_NOTIFY_HB_RESP:
3613 		break;
3614 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3615 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3616 		break;
3617 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3618 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3619 		break;
3620 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3621 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3622 		break;
3623 
3624 	case SCTP_NOTIFY_STR_RESET_SEND:
3625 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3626 		break;
3627 	case SCTP_NOTIFY_STR_RESET_RECV:
3628 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3629 		break;
3630 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3631 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3632 		break;
3633 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3634 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3635 		break;
3636 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3637 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3638 		    error);
3639 		break;
3640 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3641 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3642 		    error);
3643 		break;
3644 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3645 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3646 		    error);
3647 		break;
3648 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3649 		break;
3650 	case SCTP_NOTIFY_ASCONF_FAILED:
3651 		break;
3652 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3653 		sctp_notify_shutdown_event(stcb);
3654 		break;
3655 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3656 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3657 		    (uint16_t) (uintptr_t) data,
3658 		    so_locked);
3659 		break;
3660 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3661 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3662 		    (uint16_t) (uintptr_t) data,
3663 		    so_locked);
3664 		break;
3665 	case SCTP_NOTIFY_NO_PEER_AUTH:
3666 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3667 		    (uint16_t) (uintptr_t) data,
3668 		    so_locked);
3669 		break;
3670 	case SCTP_NOTIFY_SENDER_DRY:
3671 		sctp_notify_sender_dry_event(stcb, so_locked);
3672 		break;
3673 	default:
3674 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3675 		    __FUNCTION__, notification, notification);
3676 		break;
3677 	}			/* end switch */
3678 }
3679 
3680 void
3681 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3682 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3683     SCTP_UNUSED
3684 #endif
3685 )
3686 {
3687 	struct sctp_association *asoc;
3688 	struct sctp_stream_out *outs;
3689 	struct sctp_tmit_chunk *chk;
3690 	struct sctp_stream_queue_pending *sp;
3691 	int i;
3692 
3693 	asoc = &stcb->asoc;
3694 
3695 	if (stcb == NULL) {
3696 		return;
3697 	}
3698 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3699 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3700 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3701 		return;
3702 	}
3703 	/* now through all the gunk freeing chunks */
3704 	if (holds_lock == 0) {
3705 		SCTP_TCB_SEND_LOCK(stcb);
3706 	}
3707 	/* sent queue SHOULD be empty */
3708 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3709 		chk = TAILQ_FIRST(&asoc->sent_queue);
3710 		while (chk) {
3711 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3712 			asoc->sent_queue_cnt--;
3713 			if (chk->data != NULL) {
3714 				sctp_free_bufspace(stcb, asoc, chk, 1);
3715 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3716 				    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3717 				if (chk->data) {
3718 					sctp_m_freem(chk->data);
3719 					chk->data = NULL;
3720 				}
3721 			}
3722 			sctp_free_a_chunk(stcb, chk);
3723 			/* sa_ignore FREED_MEMORY */
3724 			chk = TAILQ_FIRST(&asoc->sent_queue);
3725 		}
3726 	}
3727 	/* pending send queue SHOULD be empty */
3728 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3729 		chk = TAILQ_FIRST(&asoc->send_queue);
3730 		while (chk) {
3731 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3732 			asoc->send_queue_cnt--;
3733 			if (chk->data != NULL) {
3734 				sctp_free_bufspace(stcb, asoc, chk, 1);
3735 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3736 				    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3737 				if (chk->data) {
3738 					sctp_m_freem(chk->data);
3739 					chk->data = NULL;
3740 				}
3741 			}
3742 			sctp_free_a_chunk(stcb, chk);
3743 			/* sa_ignore FREED_MEMORY */
3744 			chk = TAILQ_FIRST(&asoc->send_queue);
3745 		}
3746 	}
3747 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3748 		/* For each stream */
3749 		outs = &stcb->asoc.strmout[i];
3750 		/* clean up any sends there */
3751 		stcb->asoc.locked_on_sending = NULL;
3752 		sp = TAILQ_FIRST(&outs->outqueue);
3753 		while (sp) {
3754 			stcb->asoc.stream_queue_cnt--;
3755 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3756 			sctp_free_spbufspace(stcb, asoc, sp);
3757 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3758 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3759 			if (sp->data) {
3760 				sctp_m_freem(sp->data);
3761 				sp->data = NULL;
3762 			}
3763 			if (sp->net)
3764 				sctp_free_remote_addr(sp->net);
3765 			sp->net = NULL;
3766 			/* Free the chunk */
3767 			sctp_free_a_strmoq(stcb, sp);
3768 			/* sa_ignore FREED_MEMORY */
3769 			sp = TAILQ_FIRST(&outs->outqueue);
3770 		}
3771 	}
3772 
3773 	if (holds_lock == 0) {
3774 		SCTP_TCB_SEND_UNLOCK(stcb);
3775 	}
3776 }
3777 
3778 void
3779 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3780 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3781     SCTP_UNUSED
3782 #endif
3783 )
3784 {
3785 
3786 	if (stcb == NULL) {
3787 		return;
3788 	}
3789 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3790 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3791 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3792 		return;
3793 	}
3794 	/* Tell them we lost the asoc */
3795 	sctp_report_all_outbound(stcb, 1, so_locked);
3796 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3797 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3798 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3799 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3800 	}
3801 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3802 }
3803 
3804 void
3805 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3806     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3807     uint32_t vrf_id, uint16_t port)
3808 {
3809 	uint32_t vtag;
3810 
3811 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3812 	struct socket *so;
3813 
3814 #endif
3815 
3816 	vtag = 0;
3817 	if (stcb != NULL) {
3818 		/* We have a TCB to abort, send notification too */
3819 		vtag = stcb->asoc.peer_vtag;
3820 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3821 		/* get the assoc vrf id and table id */
3822 		vrf_id = stcb->asoc.vrf_id;
3823 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3824 	}
3825 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3826 	if (stcb != NULL) {
3827 		/* Ok, now lets free it */
3828 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3829 		so = SCTP_INP_SO(inp);
3830 		atomic_add_int(&stcb->asoc.refcnt, 1);
3831 		SCTP_TCB_UNLOCK(stcb);
3832 		SCTP_SOCKET_LOCK(so, 1);
3833 		SCTP_TCB_LOCK(stcb);
3834 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3835 #endif
3836 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3837 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3838 		SCTP_SOCKET_UNLOCK(so, 1);
3839 #endif
3840 	} else {
3841 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3842 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3843 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3844 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3845 			}
3846 		}
3847 	}
3848 }
3849 
3850 #ifdef SCTP_ASOCLOG_OF_TSNS
3851 void
3852 sctp_print_out_track_log(struct sctp_tcb *stcb)
3853 {
3854 #ifdef NOSIY_PRINTS
3855 	int i;
3856 
3857 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3858 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3859 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3860 		SCTP_PRINTF("None rcvd\n");
3861 		goto none_in;
3862 	}
3863 	if (stcb->asoc.tsn_in_wrapped) {
3864 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3865 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3866 			    stcb->asoc.in_tsnlog[i].tsn,
3867 			    stcb->asoc.in_tsnlog[i].strm,
3868 			    stcb->asoc.in_tsnlog[i].seq,
3869 			    stcb->asoc.in_tsnlog[i].flgs,
3870 			    stcb->asoc.in_tsnlog[i].sz);
3871 		}
3872 	}
3873 	if (stcb->asoc.tsn_in_at) {
3874 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3875 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3876 			    stcb->asoc.in_tsnlog[i].tsn,
3877 			    stcb->asoc.in_tsnlog[i].strm,
3878 			    stcb->asoc.in_tsnlog[i].seq,
3879 			    stcb->asoc.in_tsnlog[i].flgs,
3880 			    stcb->asoc.in_tsnlog[i].sz);
3881 		}
3882 	}
3883 none_in:
3884 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3885 	if ((stcb->asoc.tsn_out_at == 0) &&
3886 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3887 		SCTP_PRINTF("None sent\n");
3888 	}
3889 	if (stcb->asoc.tsn_out_wrapped) {
3890 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3891 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3892 			    stcb->asoc.out_tsnlog[i].tsn,
3893 			    stcb->asoc.out_tsnlog[i].strm,
3894 			    stcb->asoc.out_tsnlog[i].seq,
3895 			    stcb->asoc.out_tsnlog[i].flgs,
3896 			    stcb->asoc.out_tsnlog[i].sz);
3897 		}
3898 	}
3899 	if (stcb->asoc.tsn_out_at) {
3900 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3901 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3902 			    stcb->asoc.out_tsnlog[i].tsn,
3903 			    stcb->asoc.out_tsnlog[i].strm,
3904 			    stcb->asoc.out_tsnlog[i].seq,
3905 			    stcb->asoc.out_tsnlog[i].flgs,
3906 			    stcb->asoc.out_tsnlog[i].sz);
3907 		}
3908 	}
3909 #endif
3910 }
3911 
3912 #endif
3913 
3914 void
3915 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3916     int error, struct mbuf *op_err,
3917     int so_locked
3918 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3919     SCTP_UNUSED
3920 #endif
3921 )
3922 {
3923 	uint32_t vtag;
3924 
3925 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3926 	struct socket *so;
3927 
3928 #endif
3929 
3930 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3931 	so = SCTP_INP_SO(inp);
3932 #endif
3933 	if (stcb == NULL) {
3934 		/* Got to have a TCB */
3935 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3936 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3937 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3938 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3939 			}
3940 		}
3941 		return;
3942 	} else {
3943 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3944 	}
3945 	vtag = stcb->asoc.peer_vtag;
3946 	/* notify the ulp */
3947 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3948 		sctp_abort_notification(stcb, error, so_locked);
3949 	/* notify the peer */
3950 #if defined(SCTP_PANIC_ON_ABORT)
3951 	panic("aborting an association");
3952 #endif
3953 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3954 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3955 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3956 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3957 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3958 	}
3959 	/* now free the asoc */
3960 #ifdef SCTP_ASOCLOG_OF_TSNS
3961 	sctp_print_out_track_log(stcb);
3962 #endif
3963 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3964 	if (!so_locked) {
3965 		atomic_add_int(&stcb->asoc.refcnt, 1);
3966 		SCTP_TCB_UNLOCK(stcb);
3967 		SCTP_SOCKET_LOCK(so, 1);
3968 		SCTP_TCB_LOCK(stcb);
3969 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3970 	}
3971 #endif
3972 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3973 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3974 	if (!so_locked) {
3975 		SCTP_SOCKET_UNLOCK(so, 1);
3976 	}
3977 #endif
3978 }
3979 
3980 void
3981 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3982     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3983 {
3984 	struct sctp_chunkhdr *ch, chunk_buf;
3985 	unsigned int chk_length;
3986 
3987 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3988 	/* Generate a TO address for future reference */
3989 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3990 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3991 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3992 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3993 		}
3994 	}
3995 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3996 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3997 	while (ch != NULL) {
3998 		chk_length = ntohs(ch->chunk_length);
3999 		if (chk_length < sizeof(*ch)) {
4000 			/* break to abort land */
4001 			break;
4002 		}
4003 		switch (ch->chunk_type) {
4004 		case SCTP_COOKIE_ECHO:
4005 			/* We hit here only if the assoc is being freed */
4006 			return;
4007 		case SCTP_PACKET_DROPPED:
4008 			/* we don't respond to pkt-dropped */
4009 			return;
4010 		case SCTP_ABORT_ASSOCIATION:
4011 			/* we don't respond with an ABORT to an ABORT */
4012 			return;
4013 		case SCTP_SHUTDOWN_COMPLETE:
4014 			/*
4015 			 * we ignore it since we are not waiting for it and
4016 			 * peer is gone
4017 			 */
4018 			return;
4019 		case SCTP_SHUTDOWN_ACK:
4020 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
4021 			return;
4022 		default:
4023 			break;
4024 		}
4025 		offset += SCTP_SIZE32(chk_length);
4026 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4027 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4028 	}
4029 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4030 }
4031 
4032 /*
4033  * check the inbound datagram to make sure there is not an abort inside it,
4034  * if there is return 1, else return 0.
4035  */
4036 int
4037 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4038 {
4039 	struct sctp_chunkhdr *ch;
4040 	struct sctp_init_chunk *init_chk, chunk_buf;
4041 	int offset;
4042 	unsigned int chk_length;
4043 
4044 	offset = iphlen + sizeof(struct sctphdr);
4045 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4046 	    (uint8_t *) & chunk_buf);
4047 	while (ch != NULL) {
4048 		chk_length = ntohs(ch->chunk_length);
4049 		if (chk_length < sizeof(*ch)) {
4050 			/* packet is probably corrupt */
4051 			break;
4052 		}
4053 		/* we seem to be ok, is it an abort? */
4054 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4055 			/* yep, tell them */
4056 			return (1);
4057 		}
4058 		if (ch->chunk_type == SCTP_INITIATION) {
4059 			/* need to update the Vtag */
4060 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4061 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4062 			if (init_chk != NULL) {
4063 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4064 			}
4065 		}
4066 		/* Nope, move to the next chunk */
4067 		offset += SCTP_SIZE32(chk_length);
4068 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4069 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4070 	}
4071 	return (0);
4072 }
4073 
4074 /*
4075  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4076  * set (i.e. it's 0) so, create this function to compare link local scopes
4077  */
4078 #ifdef INET6
4079 uint32_t
4080 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4081 {
4082 	struct sockaddr_in6 a, b;
4083 
4084 	/* save copies */
4085 	a = *addr1;
4086 	b = *addr2;
4087 
4088 	if (a.sin6_scope_id == 0)
4089 		if (sa6_recoverscope(&a)) {
4090 			/* can't get scope, so can't match */
4091 			return (0);
4092 		}
4093 	if (b.sin6_scope_id == 0)
4094 		if (sa6_recoverscope(&b)) {
4095 			/* can't get scope, so can't match */
4096 			return (0);
4097 		}
4098 	if (a.sin6_scope_id != b.sin6_scope_id)
4099 		return (0);
4100 
4101 	return (1);
4102 }
4103 
4104 /*
4105  * returns a sockaddr_in6 with embedded scope recovered and removed
4106  */
4107 struct sockaddr_in6 *
4108 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4109 {
4110 	/* check and strip embedded scope junk */
4111 	if (addr->sin6_family == AF_INET6) {
4112 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4113 			if (addr->sin6_scope_id == 0) {
4114 				*store = *addr;
4115 				if (!sa6_recoverscope(store)) {
4116 					/* use the recovered scope */
4117 					addr = store;
4118 				}
4119 			} else {
4120 				/* else, return the original "to" addr */
4121 				in6_clearscope(&addr->sin6_addr);
4122 			}
4123 		}
4124 	}
4125 	return (addr);
4126 }
4127 
4128 #endif
4129 
4130 /*
4131  * are the two addresses the same?  currently a "scopeless" check returns: 1
4132  * if same, 0 if not
4133  */
4134 int
4135 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4136 {
4137 
4138 	/* must be valid */
4139 	if (sa1 == NULL || sa2 == NULL)
4140 		return (0);
4141 
4142 	/* must be the same family */
4143 	if (sa1->sa_family != sa2->sa_family)
4144 		return (0);
4145 
4146 	switch (sa1->sa_family) {
4147 #ifdef INET6
4148 	case AF_INET6:
4149 		{
4150 			/* IPv6 addresses */
4151 			struct sockaddr_in6 *sin6_1, *sin6_2;
4152 
4153 			sin6_1 = (struct sockaddr_in6 *)sa1;
4154 			sin6_2 = (struct sockaddr_in6 *)sa2;
4155 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4156 			    sin6_2));
4157 		}
4158 #endif
4159 	case AF_INET:
4160 		{
4161 			/* IPv4 addresses */
4162 			struct sockaddr_in *sin_1, *sin_2;
4163 
4164 			sin_1 = (struct sockaddr_in *)sa1;
4165 			sin_2 = (struct sockaddr_in *)sa2;
4166 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4167 		}
4168 	default:
4169 		/* we don't do these... */
4170 		return (0);
4171 	}
4172 }
4173 
4174 void
4175 sctp_print_address(struct sockaddr *sa)
4176 {
4177 #ifdef INET6
4178 	char ip6buf[INET6_ADDRSTRLEN];
4179 
4180 	ip6buf[0] = 0;
4181 #endif
4182 
4183 	switch (sa->sa_family) {
4184 #ifdef INET6
4185 	case AF_INET6:
4186 		{
4187 			struct sockaddr_in6 *sin6;
4188 
4189 			sin6 = (struct sockaddr_in6 *)sa;
4190 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4191 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4192 			    ntohs(sin6->sin6_port),
4193 			    sin6->sin6_scope_id);
4194 			break;
4195 		}
4196 #endif
4197 	case AF_INET:
4198 		{
4199 			struct sockaddr_in *sin;
4200 			unsigned char *p;
4201 
4202 			sin = (struct sockaddr_in *)sa;
4203 			p = (unsigned char *)&sin->sin_addr;
4204 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4205 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4206 			break;
4207 		}
4208 	default:
4209 		SCTP_PRINTF("?\n");
4210 		break;
4211 	}
4212 }
4213 
4214 void
4215 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4216 {
4217 	switch (iph->ip_v) {
4218 		case IPVERSION:
4219 		{
4220 			struct sockaddr_in lsa, fsa;
4221 
4222 			bzero(&lsa, sizeof(lsa));
4223 			lsa.sin_len = sizeof(lsa);
4224 			lsa.sin_family = AF_INET;
4225 			lsa.sin_addr = iph->ip_src;
4226 			lsa.sin_port = sh->src_port;
4227 			bzero(&fsa, sizeof(fsa));
4228 			fsa.sin_len = sizeof(fsa);
4229 			fsa.sin_family = AF_INET;
4230 			fsa.sin_addr = iph->ip_dst;
4231 			fsa.sin_port = sh->dest_port;
4232 			SCTP_PRINTF("src: ");
4233 			sctp_print_address((struct sockaddr *)&lsa);
4234 			SCTP_PRINTF("dest: ");
4235 			sctp_print_address((struct sockaddr *)&fsa);
4236 			break;
4237 		}
4238 #ifdef INET6
4239 	case IPV6_VERSION >> 4:
4240 		{
4241 			struct ip6_hdr *ip6;
4242 			struct sockaddr_in6 lsa6, fsa6;
4243 
4244 			ip6 = (struct ip6_hdr *)iph;
4245 			bzero(&lsa6, sizeof(lsa6));
4246 			lsa6.sin6_len = sizeof(lsa6);
4247 			lsa6.sin6_family = AF_INET6;
4248 			lsa6.sin6_addr = ip6->ip6_src;
4249 			lsa6.sin6_port = sh->src_port;
4250 			bzero(&fsa6, sizeof(fsa6));
4251 			fsa6.sin6_len = sizeof(fsa6);
4252 			fsa6.sin6_family = AF_INET6;
4253 			fsa6.sin6_addr = ip6->ip6_dst;
4254 			fsa6.sin6_port = sh->dest_port;
4255 			SCTP_PRINTF("src: ");
4256 			sctp_print_address((struct sockaddr *)&lsa6);
4257 			SCTP_PRINTF("dest: ");
4258 			sctp_print_address((struct sockaddr *)&fsa6);
4259 			break;
4260 		}
4261 #endif
4262 	default:
4263 		/* TSNH */
4264 		break;
4265 	}
4266 }
4267 
4268 void
4269 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4270     struct sctp_inpcb *new_inp,
4271     struct sctp_tcb *stcb,
4272     int waitflags)
4273 {
4274 	/*
4275 	 * go through our old INP and pull off any control structures that
4276 	 * belong to stcb and move then to the new inp.
4277 	 */
4278 	struct socket *old_so, *new_so;
4279 	struct sctp_queued_to_read *control, *nctl;
4280 	struct sctp_readhead tmp_queue;
4281 	struct mbuf *m;
4282 	int error = 0;
4283 
4284 	old_so = old_inp->sctp_socket;
4285 	new_so = new_inp->sctp_socket;
4286 	TAILQ_INIT(&tmp_queue);
4287 	error = sblock(&old_so->so_rcv, waitflags);
4288 	if (error) {
4289 		/*
4290 		 * Gak, can't get sblock, we have a problem. data will be
4291 		 * left stranded.. and we don't dare look at it since the
4292 		 * other thread may be reading something. Oh well, its a
4293 		 * screwed up app that does a peeloff OR a accept while
4294 		 * reading from the main socket... actually its only the
4295 		 * peeloff() case, since I think read will fail on a
4296 		 * listening socket..
4297 		 */
4298 		return;
4299 	}
4300 	/* lock the socket buffers */
4301 	SCTP_INP_READ_LOCK(old_inp);
4302 	control = TAILQ_FIRST(&old_inp->read_queue);
4303 	/* Pull off all for out target stcb */
4304 	while (control) {
4305 		nctl = TAILQ_NEXT(control, next);
4306 		if (control->stcb == stcb) {
4307 			/* remove it we want it */
4308 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4309 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4310 			m = control->data;
4311 			while (m) {
4312 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4313 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4314 				}
4315 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4316 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4317 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4318 				}
4319 				m = SCTP_BUF_NEXT(m);
4320 			}
4321 		}
4322 		control = nctl;
4323 	}
4324 	SCTP_INP_READ_UNLOCK(old_inp);
4325 	/* Remove the sb-lock on the old socket */
4326 
4327 	sbunlock(&old_so->so_rcv);
4328 	/* Now we move them over to the new socket buffer */
4329 	control = TAILQ_FIRST(&tmp_queue);
4330 	SCTP_INP_READ_LOCK(new_inp);
4331 	while (control) {
4332 		nctl = TAILQ_NEXT(control, next);
4333 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4334 		m = control->data;
4335 		while (m) {
4336 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4337 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4338 			}
4339 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4340 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4341 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4342 			}
4343 			m = SCTP_BUF_NEXT(m);
4344 		}
4345 		control = nctl;
4346 	}
4347 	SCTP_INP_READ_UNLOCK(new_inp);
4348 }
4349 
4350 void
4351 sctp_add_to_readq(struct sctp_inpcb *inp,
4352     struct sctp_tcb *stcb,
4353     struct sctp_queued_to_read *control,
4354     struct sockbuf *sb,
4355     int end,
4356     int inp_read_lock_held,
4357     int so_locked
4358 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4359     SCTP_UNUSED
4360 #endif
4361 )
4362 {
4363 	/*
4364 	 * Here we must place the control on the end of the socket read
4365 	 * queue AND increment sb_cc so that select will work properly on
4366 	 * read.
4367 	 */
4368 	struct mbuf *m, *prev = NULL;
4369 
4370 	if (inp == NULL) {
4371 		/* Gak, TSNH!! */
4372 #ifdef INVARIANTS
4373 		panic("Gak, inp NULL on add_to_readq");
4374 #endif
4375 		return;
4376 	}
4377 	if (inp_read_lock_held == 0)
4378 		SCTP_INP_READ_LOCK(inp);
4379 	if (!(control->spec_flags & M_NOTIFICATION)) {
4380 		atomic_add_int(&inp->total_recvs, 1);
4381 		if (!control->do_not_ref_stcb) {
4382 			atomic_add_int(&stcb->total_recvs, 1);
4383 		}
4384 	}
4385 	m = control->data;
4386 	control->held_length = 0;
4387 	control->length = 0;
4388 	while (m) {
4389 		if (SCTP_BUF_LEN(m) == 0) {
4390 			/* Skip mbufs with NO length */
4391 			if (prev == NULL) {
4392 				/* First one */
4393 				control->data = sctp_m_free(m);
4394 				m = control->data;
4395 			} else {
4396 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4397 				m = SCTP_BUF_NEXT(prev);
4398 			}
4399 			if (m == NULL) {
4400 				control->tail_mbuf = prev;
4401 			}
4402 			continue;
4403 		}
4404 		prev = m;
4405 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4406 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4407 		}
4408 		sctp_sballoc(stcb, sb, m);
4409 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4410 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4411 		}
4412 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4413 		m = SCTP_BUF_NEXT(m);
4414 	}
4415 	if (prev != NULL) {
4416 		control->tail_mbuf = prev;
4417 	} else {
4418 		/* Everything got collapsed out?? */
4419 		if (inp_read_lock_held == 0)
4420 			SCTP_INP_READ_UNLOCK(inp);
4421 		return;
4422 	}
4423 	if (end) {
4424 		control->end_added = 1;
4425 	}
4426 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4427 	if (inp_read_lock_held == 0)
4428 		SCTP_INP_READ_UNLOCK(inp);
4429 	if (inp && inp->sctp_socket) {
4430 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4431 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4432 		} else {
4433 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4434 			struct socket *so;
4435 
4436 			so = SCTP_INP_SO(inp);
4437 			if (!so_locked) {
4438 				atomic_add_int(&stcb->asoc.refcnt, 1);
4439 				SCTP_TCB_UNLOCK(stcb);
4440 				SCTP_SOCKET_LOCK(so, 1);
4441 				SCTP_TCB_LOCK(stcb);
4442 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4443 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4444 					SCTP_SOCKET_UNLOCK(so, 1);
4445 					return;
4446 				}
4447 			}
4448 #endif
4449 			sctp_sorwakeup(inp, inp->sctp_socket);
4450 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4451 			if (!so_locked) {
4452 				SCTP_SOCKET_UNLOCK(so, 1);
4453 			}
4454 #endif
4455 		}
4456 	}
4457 }
4458 
4459 
4460 int
4461 sctp_append_to_readq(struct sctp_inpcb *inp,
4462     struct sctp_tcb *stcb,
4463     struct sctp_queued_to_read *control,
4464     struct mbuf *m,
4465     int end,
4466     int ctls_cumack,
4467     struct sockbuf *sb)
4468 {
4469 	/*
4470 	 * A partial delivery API event is underway. OR we are appending on
4471 	 * the reassembly queue.
4472 	 *
4473 	 * If PDAPI this means we need to add m to the end of the data.
4474 	 * Increase the length in the control AND increment the sb_cc.
4475 	 * Otherwise sb is NULL and all we need to do is put it at the end
4476 	 * of the mbuf chain.
4477 	 */
4478 	int len = 0;
4479 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4480 
4481 	if (inp) {
4482 		SCTP_INP_READ_LOCK(inp);
4483 	}
4484 	if (control == NULL) {
4485 get_out:
4486 		if (inp) {
4487 			SCTP_INP_READ_UNLOCK(inp);
4488 		}
4489 		return (-1);
4490 	}
4491 	if (control->end_added) {
4492 		/* huh this one is complete? */
4493 		goto get_out;
4494 	}
4495 	mm = m;
4496 	if (mm == NULL) {
4497 		goto get_out;
4498 	}
4499 	while (mm) {
4500 		if (SCTP_BUF_LEN(mm) == 0) {
4501 			/* Skip mbufs with NO lenght */
4502 			if (prev == NULL) {
4503 				/* First one */
4504 				m = sctp_m_free(mm);
4505 				mm = m;
4506 			} else {
4507 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4508 				mm = SCTP_BUF_NEXT(prev);
4509 			}
4510 			continue;
4511 		}
4512 		prev = mm;
4513 		len += SCTP_BUF_LEN(mm);
4514 		if (sb) {
4515 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4516 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4517 			}
4518 			sctp_sballoc(stcb, sb, mm);
4519 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4520 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4521 			}
4522 		}
4523 		mm = SCTP_BUF_NEXT(mm);
4524 	}
4525 	if (prev) {
4526 		tail = prev;
4527 	} else {
4528 		/* Really there should always be a prev */
4529 		if (m == NULL) {
4530 			/* Huh nothing left? */
4531 #ifdef INVARIANTS
4532 			panic("Nothing left to add?");
4533 #else
4534 			goto get_out;
4535 #endif
4536 		}
4537 		tail = m;
4538 	}
4539 	if (control->tail_mbuf) {
4540 		/* append */
4541 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4542 		control->tail_mbuf = tail;
4543 	} else {
4544 		/* nothing there */
4545 #ifdef INVARIANTS
4546 		if (control->data != NULL) {
4547 			panic("This should NOT happen");
4548 		}
4549 #endif
4550 		control->data = m;
4551 		control->tail_mbuf = tail;
4552 	}
4553 	atomic_add_int(&control->length, len);
4554 	if (end) {
4555 		/* message is complete */
4556 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4557 			stcb->asoc.control_pdapi = NULL;
4558 		}
4559 		control->held_length = 0;
4560 		control->end_added = 1;
4561 	}
4562 	if (stcb == NULL) {
4563 		control->do_not_ref_stcb = 1;
4564 	}
4565 	/*
4566 	 * When we are appending in partial delivery, the cum-ack is used
4567 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4568 	 * is populated in the outbound sinfo structure from the true cumack
4569 	 * if the association exists...
4570 	 */
4571 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4572 	if (inp) {
4573 		SCTP_INP_READ_UNLOCK(inp);
4574 	}
4575 	if (inp && inp->sctp_socket) {
4576 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4577 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4578 		} else {
4579 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4580 			struct socket *so;
4581 
4582 			so = SCTP_INP_SO(inp);
4583 			atomic_add_int(&stcb->asoc.refcnt, 1);
4584 			SCTP_TCB_UNLOCK(stcb);
4585 			SCTP_SOCKET_LOCK(so, 1);
4586 			SCTP_TCB_LOCK(stcb);
4587 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4588 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4589 				SCTP_SOCKET_UNLOCK(so, 1);
4590 				return (0);
4591 			}
4592 #endif
4593 			sctp_sorwakeup(inp, inp->sctp_socket);
4594 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4595 			SCTP_SOCKET_UNLOCK(so, 1);
4596 #endif
4597 		}
4598 	}
4599 	return (0);
4600 }
4601 
4602 
4603 
4604 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4605  *************ALTERNATE ROUTING CODE
4606  */
4607 
4608 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4609  *************ALTERNATE ROUTING CODE
4610  */
4611 
4612 struct mbuf *
4613 sctp_generate_invmanparam(int err)
4614 {
4615 	/* Return a MBUF with a invalid mandatory parameter */
4616 	struct mbuf *m;
4617 
4618 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4619 	if (m) {
4620 		struct sctp_paramhdr *ph;
4621 
4622 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4623 		ph = mtod(m, struct sctp_paramhdr *);
4624 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4625 		ph->param_type = htons(err);
4626 	}
4627 	return (m);
4628 }
4629 
4630 #ifdef SCTP_MBCNT_LOGGING
4631 void
4632 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4633     struct sctp_tmit_chunk *tp1, int chk_cnt)
4634 {
4635 	if (tp1->data == NULL) {
4636 		return;
4637 	}
4638 	asoc->chunks_on_out_queue -= chk_cnt;
4639 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4640 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4641 		    asoc->total_output_queue_size,
4642 		    tp1->book_size,
4643 		    0,
4644 		    tp1->mbcnt);
4645 	}
4646 	if (asoc->total_output_queue_size >= tp1->book_size) {
4647 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4648 	} else {
4649 		asoc->total_output_queue_size = 0;
4650 	}
4651 
4652 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4653 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4654 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4655 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4656 		} else {
4657 			stcb->sctp_socket->so_snd.sb_cc = 0;
4658 
4659 		}
4660 	}
4661 }
4662 
4663 #endif
4664 
4665 int
4666 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4667     int reason, int so_locked
4668 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4669     SCTP_UNUSED
4670 #endif
4671 )
4672 {
4673 	struct sctp_stream_out *strq;
4674 	struct sctp_tmit_chunk *chk = NULL;
4675 	struct sctp_stream_queue_pending *sp;
4676 	uint16_t stream = 0, seq = 0;
4677 	uint8_t foundeom = 0;
4678 	int ret_sz = 0;
4679 	int notdone;
4680 	int do_wakeup_routine = 0;
4681 
4682 	stream = tp1->rec.data.stream_number;
4683 	seq = tp1->rec.data.stream_seq;
4684 	do {
4685 		ret_sz += tp1->book_size;
4686 		if (tp1->data != NULL) {
4687 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4688 				sctp_flight_size_decrease(tp1);
4689 				sctp_total_flight_decrease(stcb, tp1);
4690 			}
4691 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4692 			stcb->asoc.peers_rwnd += tp1->send_size;
4693 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4694 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4695 			if (tp1->data) {
4696 				sctp_m_freem(tp1->data);
4697 				tp1->data = NULL;
4698 			}
4699 			do_wakeup_routine = 1;
4700 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4701 				stcb->asoc.sent_queue_cnt_removeable--;
4702 			}
4703 		}
4704 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4705 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4706 		    SCTP_DATA_NOT_FRAG) {
4707 			/* not frag'ed we ae done   */
4708 			notdone = 0;
4709 			foundeom = 1;
4710 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4711 			/* end of frag, we are done */
4712 			notdone = 0;
4713 			foundeom = 1;
4714 		} else {
4715 			/*
4716 			 * Its a begin or middle piece, we must mark all of
4717 			 * it
4718 			 */
4719 			notdone = 1;
4720 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4721 		}
4722 	} while (tp1 && notdone);
4723 	if (foundeom == 0) {
4724 		/*
4725 		 * The multi-part message was scattered across the send and
4726 		 * sent queue.
4727 		 */
4728 next_on_sent:
4729 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4730 		/*
4731 		 * recurse throught the send_queue too, starting at the
4732 		 * beginning.
4733 		 */
4734 		if ((tp1) &&
4735 		    (tp1->rec.data.stream_number == stream) &&
4736 		    (tp1->rec.data.stream_seq == seq)) {
4737 			/*
4738 			 * save to chk in case we have some on stream out
4739 			 * queue. If so and we have an un-transmitted one we
4740 			 * don't have to fudge the TSN.
4741 			 */
4742 			chk = tp1;
4743 			ret_sz += tp1->book_size;
4744 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4745 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4746 			if (tp1->data) {
4747 				sctp_m_freem(tp1->data);
4748 				tp1->data = NULL;
4749 			}
4750 			/* No flight involved here book the size to 0 */
4751 			tp1->book_size = 0;
4752 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4753 				foundeom = 1;
4754 			}
4755 			do_wakeup_routine = 1;
4756 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4757 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4758 			/*
4759 			 * on to the sent queue so we can wait for it to be
4760 			 * passed by.
4761 			 */
4762 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4763 			    sctp_next);
4764 			stcb->asoc.send_queue_cnt--;
4765 			stcb->asoc.sent_queue_cnt++;
4766 			goto next_on_sent;
4767 		}
4768 	}
4769 	if (foundeom == 0) {
4770 		/*
4771 		 * Still no eom found. That means there is stuff left on the
4772 		 * stream out queue.. yuck.
4773 		 */
4774 		strq = &stcb->asoc.strmout[stream];
4775 		SCTP_TCB_SEND_LOCK(stcb);
4776 		sp = TAILQ_FIRST(&strq->outqueue);
4777 		while (sp->strseq <= seq) {
4778 			/* Check if its our SEQ */
4779 			if (sp->strseq == seq) {
4780 				sp->discard_rest = 1;
4781 				/*
4782 				 * We may need to put a chunk on the queue
4783 				 * that holds the TSN that would have been
4784 				 * sent with the LAST bit.
4785 				 */
4786 				if (chk == NULL) {
4787 					/* Yep, we have to */
4788 					sctp_alloc_a_chunk(stcb, chk);
4789 					if (chk == NULL) {
4790 						/*
4791 						 * we are hosed. All we can
4792 						 * do is nothing.. which
4793 						 * will cause an abort if
4794 						 * the peer is paying
4795 						 * attention.
4796 						 */
4797 						goto oh_well;
4798 					}
4799 					memset(chk, 0, sizeof(*chk));
4800 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4801 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4802 					chk->asoc = &stcb->asoc;
4803 					chk->rec.data.stream_seq = sp->strseq;
4804 					chk->rec.data.stream_number = sp->stream;
4805 					chk->rec.data.payloadtype = sp->ppid;
4806 					chk->rec.data.context = sp->context;
4807 					chk->flags = sp->act_flags;
4808 					chk->whoTo = sp->net;
4809 					atomic_add_int(&chk->whoTo->ref_count, 1);
4810 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4811 					stcb->asoc.pr_sctp_cnt++;
4812 					chk->pr_sctp_on = 1;
4813 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4814 					stcb->asoc.sent_queue_cnt++;
4815 					stcb->asoc.pr_sctp_cnt++;
4816 				} else {
4817 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4818 				}
4819 		oh_well:
4820 				if (sp->data) {
4821 					/*
4822 					 * Pull any data to free up the SB
4823 					 * and allow sender to "add more"
4824 					 * whilc we will throw away :-)
4825 					 */
4826 					sctp_free_spbufspace(stcb, &stcb->asoc,
4827 					    sp);
4828 					ret_sz += sp->length;
4829 					do_wakeup_routine = 1;
4830 					sp->some_taken = 1;
4831 					sctp_m_freem(sp->data);
4832 					sp->length = 0;
4833 					sp->data = NULL;
4834 					sp->tail_mbuf = NULL;
4835 				}
4836 				break;
4837 			} else {
4838 				/* Next one please */
4839 				sp = TAILQ_NEXT(sp, next);
4840 			}
4841 		}		/* End while */
4842 		SCTP_TCB_SEND_UNLOCK(stcb);
4843 	}
4844 	if (do_wakeup_routine) {
4845 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4846 		struct socket *so;
4847 
4848 		so = SCTP_INP_SO(stcb->sctp_ep);
4849 		if (!so_locked) {
4850 			atomic_add_int(&stcb->asoc.refcnt, 1);
4851 			SCTP_TCB_UNLOCK(stcb);
4852 			SCTP_SOCKET_LOCK(so, 1);
4853 			SCTP_TCB_LOCK(stcb);
4854 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4855 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4856 				/* assoc was freed while we were unlocked */
4857 				SCTP_SOCKET_UNLOCK(so, 1);
4858 				return (ret_sz);
4859 			}
4860 		}
4861 #endif
4862 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4863 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4864 		if (!so_locked) {
4865 			SCTP_SOCKET_UNLOCK(so, 1);
4866 		}
4867 #endif
4868 	}
4869 	return (ret_sz);
4870 }
4871 
4872 /*
4873  * checks to see if the given address, sa, is one that is currently known by
4874  * the kernel note: can't distinguish the same address on multiple interfaces
4875  * and doesn't handle multiple addresses with different zone/scope id's note:
4876  * ifa_ifwithaddr() compares the entire sockaddr struct
4877  */
4878 struct sctp_ifa *
4879 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4880     int holds_lock)
4881 {
4882 	struct sctp_laddr *laddr;
4883 
4884 	if (holds_lock == 0) {
4885 		SCTP_INP_RLOCK(inp);
4886 	}
4887 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4888 		if (laddr->ifa == NULL)
4889 			continue;
4890 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4891 			continue;
4892 		if (addr->sa_family == AF_INET) {
4893 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4894 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4895 				/* found him. */
4896 				if (holds_lock == 0) {
4897 					SCTP_INP_RUNLOCK(inp);
4898 				}
4899 				return (laddr->ifa);
4900 				break;
4901 			}
4902 		}
4903 #ifdef INET6
4904 		if (addr->sa_family == AF_INET6) {
4905 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4906 			    &laddr->ifa->address.sin6)) {
4907 				/* found him. */
4908 				if (holds_lock == 0) {
4909 					SCTP_INP_RUNLOCK(inp);
4910 				}
4911 				return (laddr->ifa);
4912 				break;
4913 			}
4914 		}
4915 #endif
4916 	}
4917 	if (holds_lock == 0) {
4918 		SCTP_INP_RUNLOCK(inp);
4919 	}
4920 	return (NULL);
4921 }
4922 
4923 uint32_t
4924 sctp_get_ifa_hash_val(struct sockaddr *addr)
4925 {
4926 	if (addr->sa_family == AF_INET) {
4927 		struct sockaddr_in *sin;
4928 
4929 		sin = (struct sockaddr_in *)addr;
4930 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4931 	} else if (addr->sa_family == AF_INET6) {
4932 		struct sockaddr_in6 *sin6;
4933 		uint32_t hash_of_addr;
4934 
4935 		sin6 = (struct sockaddr_in6 *)addr;
4936 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4937 		    sin6->sin6_addr.s6_addr32[1] +
4938 		    sin6->sin6_addr.s6_addr32[2] +
4939 		    sin6->sin6_addr.s6_addr32[3]);
4940 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4941 		return (hash_of_addr);
4942 	}
4943 	return (0);
4944 }
4945 
4946 struct sctp_ifa *
4947 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4948 {
4949 	struct sctp_ifa *sctp_ifap;
4950 	struct sctp_vrf *vrf;
4951 	struct sctp_ifalist *hash_head;
4952 	uint32_t hash_of_addr;
4953 
4954 	if (holds_lock == 0)
4955 		SCTP_IPI_ADDR_RLOCK();
4956 
4957 	vrf = sctp_find_vrf(vrf_id);
4958 	if (vrf == NULL) {
4959 stage_right:
4960 		if (holds_lock == 0)
4961 			SCTP_IPI_ADDR_RUNLOCK();
4962 		return (NULL);
4963 	}
4964 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4965 
4966 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4967 	if (hash_head == NULL) {
4968 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4969 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4970 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4971 		sctp_print_address(addr);
4972 		SCTP_PRINTF("No such bucket for address\n");
4973 		if (holds_lock == 0)
4974 			SCTP_IPI_ADDR_RUNLOCK();
4975 
4976 		return (NULL);
4977 	}
4978 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4979 		if (sctp_ifap == NULL) {
4980 #ifdef INVARIANTS
4981 			panic("Huh LIST_FOREACH corrupt");
4982 			goto stage_right;
4983 #else
4984 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4985 			goto stage_right;
4986 #endif
4987 		}
4988 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4989 			continue;
4990 		if (addr->sa_family == AF_INET) {
4991 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4992 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4993 				/* found him. */
4994 				if (holds_lock == 0)
4995 					SCTP_IPI_ADDR_RUNLOCK();
4996 				return (sctp_ifap);
4997 				break;
4998 			}
4999 		}
5000 #ifdef INET6
5001 		if (addr->sa_family == AF_INET6) {
5002 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5003 			    &sctp_ifap->address.sin6)) {
5004 				/* found him. */
5005 				if (holds_lock == 0)
5006 					SCTP_IPI_ADDR_RUNLOCK();
5007 				return (sctp_ifap);
5008 				break;
5009 			}
5010 		}
5011 #endif
5012 	}
5013 	if (holds_lock == 0)
5014 		SCTP_IPI_ADDR_RUNLOCK();
5015 	return (NULL);
5016 }
5017 
5018 static void
5019 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5020     uint32_t rwnd_req)
5021 {
5022 	/* User pulled some data, do we need a rwnd update? */
5023 	int r_unlocked = 0;
5024 	uint32_t dif, rwnd;
5025 	struct socket *so = NULL;
5026 
5027 	if (stcb == NULL)
5028 		return;
5029 
5030 	atomic_add_int(&stcb->asoc.refcnt, 1);
5031 
5032 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5033 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5034 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5035 		/* Pre-check If we are freeing no update */
5036 		goto no_lock;
5037 	}
5038 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5039 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5040 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5041 		goto out;
5042 	}
5043 	so = stcb->sctp_socket;
5044 	if (so == NULL) {
5045 		goto out;
5046 	}
5047 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5048 	/* Have you have freed enough to look */
5049 	*freed_so_far = 0;
5050 	/* Yep, its worth a look and the lock overhead */
5051 
5052 	/* Figure out what the rwnd would be */
5053 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5054 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5055 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5056 	} else {
5057 		dif = 0;
5058 	}
5059 	if (dif >= rwnd_req) {
5060 		if (hold_rlock) {
5061 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5062 			r_unlocked = 1;
5063 		}
5064 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5065 			/*
5066 			 * One last check before we allow the guy possibly
5067 			 * to get in. There is a race, where the guy has not
5068 			 * reached the gate. In that case
5069 			 */
5070 			goto out;
5071 		}
5072 		SCTP_TCB_LOCK(stcb);
5073 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5074 			/* No reports here */
5075 			SCTP_TCB_UNLOCK(stcb);
5076 			goto out;
5077 		}
5078 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5079 		sctp_send_sack(stcb);
5080 
5081 		sctp_chunk_output(stcb->sctp_ep, stcb,
5082 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5083 		/* make sure no timer is running */
5084 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5085 		SCTP_TCB_UNLOCK(stcb);
5086 	} else {
5087 		/* Update how much we have pending */
5088 		stcb->freed_by_sorcv_sincelast = dif;
5089 	}
5090 out:
5091 	if (so && r_unlocked && hold_rlock) {
5092 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5093 	}
5094 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5095 no_lock:
5096 	atomic_add_int(&stcb->asoc.refcnt, -1);
5097 	return;
5098 }
5099 
5100 int
5101 sctp_sorecvmsg(struct socket *so,
5102     struct uio *uio,
5103     struct mbuf **mp,
5104     struct sockaddr *from,
5105     int fromlen,
5106     int *msg_flags,
5107     struct sctp_sndrcvinfo *sinfo,
5108     int filling_sinfo)
5109 {
5110 	/*
5111 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5112 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5113 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5114 	 * On the way out we may send out any combination of:
5115 	 * MSG_NOTIFICATION MSG_EOR
5116 	 *
5117 	 */
5118 	struct sctp_inpcb *inp = NULL;
5119 	int my_len = 0;
5120 	int cp_len = 0, error = 0;
5121 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5122 	struct mbuf *m = NULL, *embuf = NULL;
5123 	struct sctp_tcb *stcb = NULL;
5124 	int wakeup_read_socket = 0;
5125 	int freecnt_applied = 0;
5126 	int out_flags = 0, in_flags = 0;
5127 	int block_allowed = 1;
5128 	uint32_t freed_so_far = 0;
5129 	uint32_t copied_so_far = 0;
5130 	int in_eeor_mode = 0;
5131 	int no_rcv_needed = 0;
5132 	uint32_t rwnd_req = 0;
5133 	int hold_sblock = 0;
5134 	int hold_rlock = 0;
5135 	int slen = 0;
5136 	uint32_t held_length = 0;
5137 	int sockbuf_lock = 0;
5138 
5139 	if (uio == NULL) {
5140 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5141 		return (EINVAL);
5142 	}
5143 	if (msg_flags) {
5144 		in_flags = *msg_flags;
5145 		if (in_flags & MSG_PEEK)
5146 			SCTP_STAT_INCR(sctps_read_peeks);
5147 	} else {
5148 		in_flags = 0;
5149 	}
5150 	slen = uio->uio_resid;
5151 
5152 	/* Pull in and set up our int flags */
5153 	if (in_flags & MSG_OOB) {
5154 		/* Out of band's NOT supported */
5155 		return (EOPNOTSUPP);
5156 	}
5157 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5158 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5159 		return (EINVAL);
5160 	}
5161 	if ((in_flags & (MSG_DONTWAIT
5162 	    | MSG_NBIO
5163 	    )) ||
5164 	    SCTP_SO_IS_NBIO(so)) {
5165 		block_allowed = 0;
5166 	}
5167 	/* setup the endpoint */
5168 	inp = (struct sctp_inpcb *)so->so_pcb;
5169 	if (inp == NULL) {
5170 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5171 		return (EFAULT);
5172 	}
5173 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5174 	/* Must be at least a MTU's worth */
5175 	if (rwnd_req < SCTP_MIN_RWND)
5176 		rwnd_req = SCTP_MIN_RWND;
5177 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5178 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5179 		sctp_misc_ints(SCTP_SORECV_ENTER,
5180 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5181 	}
5182 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5183 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5184 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5185 	}
5186 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5187 	sockbuf_lock = 1;
5188 	if (error) {
5189 		goto release_unlocked;
5190 	}
5191 restart:
5192 
5193 
5194 restart_nosblocks:
5195 	if (hold_sblock == 0) {
5196 		SOCKBUF_LOCK(&so->so_rcv);
5197 		hold_sblock = 1;
5198 	}
5199 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5200 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5201 		goto out;
5202 	}
5203 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5204 		if (so->so_error) {
5205 			error = so->so_error;
5206 			if ((in_flags & MSG_PEEK) == 0)
5207 				so->so_error = 0;
5208 			goto out;
5209 		} else {
5210 			if (so->so_rcv.sb_cc == 0) {
5211 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5212 				/* indicate EOF */
5213 				error = 0;
5214 				goto out;
5215 			}
5216 		}
5217 	}
5218 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5219 		/* we need to wait for data */
5220 		if ((so->so_rcv.sb_cc == 0) &&
5221 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5222 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5223 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5224 				/*
5225 				 * For active open side clear flags for
5226 				 * re-use passive open is blocked by
5227 				 * connect.
5228 				 */
5229 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5230 					/*
5231 					 * You were aborted, passive side
5232 					 * always hits here
5233 					 */
5234 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5235 					error = ECONNRESET;
5236 					/*
5237 					 * You get this once if you are
5238 					 * active open side
5239 					 */
5240 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5241 						/*
5242 						 * Remove flag if on the
5243 						 * active open side
5244 						 */
5245 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5246 					}
5247 				}
5248 				so->so_state &= ~(SS_ISCONNECTING |
5249 				    SS_ISDISCONNECTING |
5250 				    SS_ISCONFIRMING |
5251 				    SS_ISCONNECTED);
5252 				if (error == 0) {
5253 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5254 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5255 						error = ENOTCONN;
5256 					} else {
5257 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5258 					}
5259 				}
5260 				goto out;
5261 			}
5262 		}
5263 		error = sbwait(&so->so_rcv);
5264 		if (error) {
5265 			goto out;
5266 		}
5267 		held_length = 0;
5268 		goto restart_nosblocks;
5269 	} else if (so->so_rcv.sb_cc == 0) {
5270 		if (so->so_error) {
5271 			error = so->so_error;
5272 			if ((in_flags & MSG_PEEK) == 0)
5273 				so->so_error = 0;
5274 		} else {
5275 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5276 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5277 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5278 					/*
5279 					 * For active open side clear flags
5280 					 * for re-use passive open is
5281 					 * blocked by connect.
5282 					 */
5283 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5284 						/*
5285 						 * You were aborted, passive
5286 						 * side always hits here
5287 						 */
5288 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5289 						error = ECONNRESET;
5290 						/*
5291 						 * You get this once if you
5292 						 * are active open side
5293 						 */
5294 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5295 							/*
5296 							 * Remove flag if on
5297 							 * the active open
5298 							 * side
5299 							 */
5300 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5301 						}
5302 					}
5303 					so->so_state &= ~(SS_ISCONNECTING |
5304 					    SS_ISDISCONNECTING |
5305 					    SS_ISCONFIRMING |
5306 					    SS_ISCONNECTED);
5307 					if (error == 0) {
5308 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5309 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5310 							error = ENOTCONN;
5311 						} else {
5312 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5313 						}
5314 					}
5315 					goto out;
5316 				}
5317 			}
5318 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5319 			error = EWOULDBLOCK;
5320 		}
5321 		goto out;
5322 	}
5323 	if (hold_sblock == 1) {
5324 		SOCKBUF_UNLOCK(&so->so_rcv);
5325 		hold_sblock = 0;
5326 	}
5327 	/* we possibly have data we can read */
5328 	/* sa_ignore FREED_MEMORY */
5329 	control = TAILQ_FIRST(&inp->read_queue);
5330 	if (control == NULL) {
5331 		/*
5332 		 * This could be happening since the appender did the
5333 		 * increment but as not yet did the tailq insert onto the
5334 		 * read_queue
5335 		 */
5336 		if (hold_rlock == 0) {
5337 			SCTP_INP_READ_LOCK(inp);
5338 			hold_rlock = 1;
5339 		}
5340 		control = TAILQ_FIRST(&inp->read_queue);
5341 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5342 #ifdef INVARIANTS
5343 			panic("Huh, its non zero and nothing on control?");
5344 #endif
5345 			so->so_rcv.sb_cc = 0;
5346 		}
5347 		SCTP_INP_READ_UNLOCK(inp);
5348 		hold_rlock = 0;
5349 		goto restart;
5350 	}
5351 	if ((control->length == 0) &&
5352 	    (control->do_not_ref_stcb)) {
5353 		/*
5354 		 * Clean up code for freeing assoc that left behind a
5355 		 * pdapi.. maybe a peer in EEOR that just closed after
5356 		 * sending and never indicated a EOR.
5357 		 */
5358 		if (hold_rlock == 0) {
5359 			hold_rlock = 1;
5360 			SCTP_INP_READ_LOCK(inp);
5361 		}
5362 		control->held_length = 0;
5363 		if (control->data) {
5364 			/* Hmm there is data here .. fix */
5365 			struct mbuf *m_tmp;
5366 			int cnt = 0;
5367 
5368 			m_tmp = control->data;
5369 			while (m_tmp) {
5370 				cnt += SCTP_BUF_LEN(m_tmp);
5371 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5372 					control->tail_mbuf = m_tmp;
5373 					control->end_added = 1;
5374 				}
5375 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5376 			}
5377 			control->length = cnt;
5378 		} else {
5379 			/* remove it */
5380 			TAILQ_REMOVE(&inp->read_queue, control, next);
5381 			/* Add back any hiddend data */
5382 			sctp_free_remote_addr(control->whoFrom);
5383 			sctp_free_a_readq(stcb, control);
5384 		}
5385 		if (hold_rlock) {
5386 			hold_rlock = 0;
5387 			SCTP_INP_READ_UNLOCK(inp);
5388 		}
5389 		goto restart;
5390 	}
5391 	if ((control->length == 0) &&
5392 	    (control->end_added == 1)) {
5393 		/*
5394 		 * Do we also need to check for (control->pdapi_aborted ==
5395 		 * 1)?
5396 		 */
5397 		if (hold_rlock == 0) {
5398 			hold_rlock = 1;
5399 			SCTP_INP_READ_LOCK(inp);
5400 		}
5401 		TAILQ_REMOVE(&inp->read_queue, control, next);
5402 		if (control->data) {
5403 #ifdef INVARIANTS
5404 			panic("control->data not null but control->length == 0");
5405 #else
5406 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5407 			sctp_m_freem(control->data);
5408 			control->data = NULL;
5409 #endif
5410 		}
5411 		if (control->aux_data) {
5412 			sctp_m_free(control->aux_data);
5413 			control->aux_data = NULL;
5414 		}
5415 		sctp_free_remote_addr(control->whoFrom);
5416 		sctp_free_a_readq(stcb, control);
5417 		if (hold_rlock) {
5418 			hold_rlock = 0;
5419 			SCTP_INP_READ_UNLOCK(inp);
5420 		}
5421 		goto restart;
5422 	}
5423 	if (control->length == 0) {
5424 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5425 		    (filling_sinfo)) {
5426 			/* find a more suitable one then this */
5427 			ctl = TAILQ_NEXT(control, next);
5428 			while (ctl) {
5429 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5430 				    (ctl->some_taken ||
5431 				    (ctl->spec_flags & M_NOTIFICATION) ||
5432 				    ((ctl->do_not_ref_stcb == 0) &&
5433 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5434 				    ) {
5435 					/*-
5436 					 * If we have a different TCB next, and there is data
5437 					 * present. If we have already taken some (pdapi), OR we can
5438 					 * ref the tcb and no delivery as started on this stream, we
5439 					 * take it. Note we allow a notification on a different
5440 					 * assoc to be delivered..
5441 					 */
5442 					control = ctl;
5443 					goto found_one;
5444 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5445 					    (ctl->length) &&
5446 					    ((ctl->some_taken) ||
5447 					    ((ctl->do_not_ref_stcb == 0) &&
5448 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5449 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5450 					/*-
5451 					 * If we have the same tcb, and there is data present, and we
5452 					 * have the strm interleave feature present. Then if we have
5453 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5454 					 * not started a delivery for this stream, we can take it.
5455 					 * Note we do NOT allow a notificaiton on the same assoc to
5456 					 * be delivered.
5457 					 */
5458 					control = ctl;
5459 					goto found_one;
5460 				}
5461 				ctl = TAILQ_NEXT(ctl, next);
5462 			}
5463 		}
5464 		/*
5465 		 * if we reach here, not suitable replacement is available
5466 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5467 		 * into the our held count, and its time to sleep again.
5468 		 */
5469 		held_length = so->so_rcv.sb_cc;
5470 		control->held_length = so->so_rcv.sb_cc;
5471 		goto restart;
5472 	}
5473 	/* Clear the held length since there is something to read */
5474 	control->held_length = 0;
5475 	if (hold_rlock) {
5476 		SCTP_INP_READ_UNLOCK(inp);
5477 		hold_rlock = 0;
5478 	}
5479 found_one:
5480 	/*
5481 	 * If we reach here, control has a some data for us to read off.
5482 	 * Note that stcb COULD be NULL.
5483 	 */
5484 	control->some_taken++;
5485 	if (hold_sblock) {
5486 		SOCKBUF_UNLOCK(&so->so_rcv);
5487 		hold_sblock = 0;
5488 	}
5489 	stcb = control->stcb;
5490 	if (stcb) {
5491 		if ((control->do_not_ref_stcb == 0) &&
5492 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5493 			if (freecnt_applied == 0)
5494 				stcb = NULL;
5495 		} else if (control->do_not_ref_stcb == 0) {
5496 			/* you can't free it on me please */
5497 			/*
5498 			 * The lock on the socket buffer protects us so the
5499 			 * free code will stop. But since we used the
5500 			 * socketbuf lock and the sender uses the tcb_lock
5501 			 * to increment, we need to use the atomic add to
5502 			 * the refcnt
5503 			 */
5504 			if (freecnt_applied) {
5505 #ifdef INVARIANTS
5506 				panic("refcnt already incremented");
5507 #else
5508 				printf("refcnt already incremented?\n");
5509 #endif
5510 			} else {
5511 				atomic_add_int(&stcb->asoc.refcnt, 1);
5512 				freecnt_applied = 1;
5513 			}
5514 			/*
5515 			 * Setup to remember how much we have not yet told
5516 			 * the peer our rwnd has opened up. Note we grab the
5517 			 * value from the tcb from last time. Note too that
5518 			 * sack sending clears this when a sack is sent,
5519 			 * which is fine. Once we hit the rwnd_req, we then
5520 			 * will go to the sctp_user_rcvd() that will not
5521 			 * lock until it KNOWs it MUST send a WUP-SACK.
5522 			 */
5523 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5524 			stcb->freed_by_sorcv_sincelast = 0;
5525 		}
5526 	}
5527 	if (stcb &&
5528 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5529 	    control->do_not_ref_stcb == 0) {
5530 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5531 	}
5532 	/* First lets get off the sinfo and sockaddr info */
5533 	if ((sinfo) && filling_sinfo) {
5534 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5535 		nxt = TAILQ_NEXT(control, next);
5536 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5537 			struct sctp_extrcvinfo *s_extra;
5538 
5539 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5540 			if ((nxt) &&
5541 			    (nxt->length)) {
5542 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5543 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5544 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5545 				}
5546 				if (nxt->spec_flags & M_NOTIFICATION) {
5547 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5548 				}
5549 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5550 				s_extra->sreinfo_next_length = nxt->length;
5551 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5552 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5553 				if (nxt->tail_mbuf != NULL) {
5554 					if (nxt->end_added) {
5555 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5556 					}
5557 				}
5558 			} else {
5559 				/*
5560 				 * we explicitly 0 this, since the memcpy
5561 				 * got some other things beyond the older
5562 				 * sinfo_ that is on the control's structure
5563 				 * :-D
5564 				 */
5565 				nxt = NULL;
5566 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5567 				s_extra->sreinfo_next_aid = 0;
5568 				s_extra->sreinfo_next_length = 0;
5569 				s_extra->sreinfo_next_ppid = 0;
5570 				s_extra->sreinfo_next_stream = 0;
5571 			}
5572 		}
5573 		/*
5574 		 * update off the real current cum-ack, if we have an stcb.
5575 		 */
5576 		if ((control->do_not_ref_stcb == 0) && stcb)
5577 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5578 		/*
5579 		 * mask off the high bits, we keep the actual chunk bits in
5580 		 * there.
5581 		 */
5582 		sinfo->sinfo_flags &= 0x00ff;
5583 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5584 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5585 		}
5586 	}
5587 #ifdef SCTP_ASOCLOG_OF_TSNS
5588 	{
5589 		int index, newindex;
5590 		struct sctp_pcbtsn_rlog *entry;
5591 
5592 		do {
5593 			index = inp->readlog_index;
5594 			newindex = index + 1;
5595 			if (newindex >= SCTP_READ_LOG_SIZE) {
5596 				newindex = 0;
5597 			}
5598 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5599 		entry = &inp->readlog[index];
5600 		entry->vtag = control->sinfo_assoc_id;
5601 		entry->strm = control->sinfo_stream;
5602 		entry->seq = control->sinfo_ssn;
5603 		entry->sz = control->length;
5604 		entry->flgs = control->sinfo_flags;
5605 	}
5606 #endif
5607 	if (fromlen && from) {
5608 		struct sockaddr *to;
5609 
5610 #ifdef INET
5611 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5612 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5613 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5614 #else
5615 		/* No AF_INET use AF_INET6 */
5616 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5617 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5618 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5619 #endif
5620 
5621 		to = from;
5622 #if defined(INET) && defined(INET6)
5623 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5624 		    (to->sa_family == AF_INET) &&
5625 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5626 			struct sockaddr_in *sin;
5627 			struct sockaddr_in6 sin6;
5628 
5629 			sin = (struct sockaddr_in *)to;
5630 			bzero(&sin6, sizeof(sin6));
5631 			sin6.sin6_family = AF_INET6;
5632 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5633 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5634 			bcopy(&sin->sin_addr,
5635 			    &sin6.sin6_addr.s6_addr32[3],
5636 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5637 			sin6.sin6_port = sin->sin_port;
5638 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5639 		}
5640 #endif
5641 #if defined(INET6)
5642 		{
5643 			struct sockaddr_in6 lsa6, *to6;
5644 
5645 			to6 = (struct sockaddr_in6 *)to;
5646 			sctp_recover_scope_mac(to6, (&lsa6));
5647 		}
5648 #endif
5649 	}
5650 	/* now copy out what data we can */
5651 	if (mp == NULL) {
5652 		/* copy out each mbuf in the chain up to length */
5653 get_more_data:
5654 		m = control->data;
5655 		while (m) {
5656 			/* Move out all we can */
5657 			cp_len = (int)uio->uio_resid;
5658 			my_len = (int)SCTP_BUF_LEN(m);
5659 			if (cp_len > my_len) {
5660 				/* not enough in this buf */
5661 				cp_len = my_len;
5662 			}
5663 			if (hold_rlock) {
5664 				SCTP_INP_READ_UNLOCK(inp);
5665 				hold_rlock = 0;
5666 			}
5667 			if (cp_len > 0)
5668 				error = uiomove(mtod(m, char *), cp_len, uio);
5669 			/* re-read */
5670 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5671 				goto release;
5672 			}
5673 			if ((control->do_not_ref_stcb == 0) && stcb &&
5674 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5675 				no_rcv_needed = 1;
5676 			}
5677 			if (error) {
5678 				/* error we are out of here */
5679 				goto release;
5680 			}
5681 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5682 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5683 			    ((control->end_added == 0) ||
5684 			    (control->end_added &&
5685 			    (TAILQ_NEXT(control, next) == NULL)))
5686 			    ) {
5687 				SCTP_INP_READ_LOCK(inp);
5688 				hold_rlock = 1;
5689 			}
5690 			if (cp_len == SCTP_BUF_LEN(m)) {
5691 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5692 				    (control->end_added)) {
5693 					out_flags |= MSG_EOR;
5694 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5695 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5696 				}
5697 				if (control->spec_flags & M_NOTIFICATION) {
5698 					out_flags |= MSG_NOTIFICATION;
5699 				}
5700 				/* we ate up the mbuf */
5701 				if (in_flags & MSG_PEEK) {
5702 					/* just looking */
5703 					m = SCTP_BUF_NEXT(m);
5704 					copied_so_far += cp_len;
5705 				} else {
5706 					/* dispose of the mbuf */
5707 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5708 						sctp_sblog(&so->so_rcv,
5709 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5710 					}
5711 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5712 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5713 						sctp_sblog(&so->so_rcv,
5714 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5715 					}
5716 					embuf = m;
5717 					copied_so_far += cp_len;
5718 					freed_so_far += cp_len;
5719 					freed_so_far += MSIZE;
5720 					atomic_subtract_int(&control->length, cp_len);
5721 					control->data = sctp_m_free(m);
5722 					m = control->data;
5723 					/*
5724 					 * been through it all, must hold sb
5725 					 * lock ok to null tail
5726 					 */
5727 					if (control->data == NULL) {
5728 #ifdef INVARIANTS
5729 						if ((control->end_added == 0) ||
5730 						    (TAILQ_NEXT(control, next) == NULL)) {
5731 							/*
5732 							 * If the end is not
5733 							 * added, OR the
5734 							 * next is NOT null
5735 							 * we MUST have the
5736 							 * lock.
5737 							 */
5738 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5739 								panic("Hmm we don't own the lock?");
5740 							}
5741 						}
5742 #endif
5743 						control->tail_mbuf = NULL;
5744 #ifdef INVARIANTS
5745 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5746 							panic("end_added, nothing left and no MSG_EOR");
5747 						}
5748 #endif
5749 					}
5750 				}
5751 			} else {
5752 				/* Do we need to trim the mbuf? */
5753 				if (control->spec_flags & M_NOTIFICATION) {
5754 					out_flags |= MSG_NOTIFICATION;
5755 				}
5756 				if ((in_flags & MSG_PEEK) == 0) {
5757 					SCTP_BUF_RESV_UF(m, cp_len);
5758 					SCTP_BUF_LEN(m) -= cp_len;
5759 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5760 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5761 					}
5762 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5763 					if ((control->do_not_ref_stcb == 0) &&
5764 					    stcb) {
5765 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5766 					}
5767 					copied_so_far += cp_len;
5768 					embuf = m;
5769 					freed_so_far += cp_len;
5770 					freed_so_far += MSIZE;
5771 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5772 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5773 						    SCTP_LOG_SBRESULT, 0);
5774 					}
5775 					atomic_subtract_int(&control->length, cp_len);
5776 				} else {
5777 					copied_so_far += cp_len;
5778 				}
5779 			}
5780 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5781 				break;
5782 			}
5783 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5784 			    (control->do_not_ref_stcb == 0) &&
5785 			    (freed_so_far >= rwnd_req)) {
5786 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5787 			}
5788 		}		/* end while(m) */
5789 		/*
5790 		 * At this point we have looked at it all and we either have
5791 		 * a MSG_EOR/or read all the user wants... <OR>
5792 		 * control->length == 0.
5793 		 */
5794 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5795 			/* we are done with this control */
5796 			if (control->length == 0) {
5797 				if (control->data) {
5798 #ifdef INVARIANTS
5799 					panic("control->data not null at read eor?");
5800 #else
5801 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5802 					sctp_m_freem(control->data);
5803 					control->data = NULL;
5804 #endif
5805 				}
5806 		done_with_control:
5807 				if (TAILQ_NEXT(control, next) == NULL) {
5808 					/*
5809 					 * If we don't have a next we need a
5810 					 * lock, if there is a next
5811 					 * interrupt is filling ahead of us
5812 					 * and we don't need a lock to
5813 					 * remove this guy (which is the
5814 					 * head of the queue).
5815 					 */
5816 					if (hold_rlock == 0) {
5817 						SCTP_INP_READ_LOCK(inp);
5818 						hold_rlock = 1;
5819 					}
5820 				}
5821 				TAILQ_REMOVE(&inp->read_queue, control, next);
5822 				/* Add back any hiddend data */
5823 				if (control->held_length) {
5824 					held_length = 0;
5825 					control->held_length = 0;
5826 					wakeup_read_socket = 1;
5827 				}
5828 				if (control->aux_data) {
5829 					sctp_m_free(control->aux_data);
5830 					control->aux_data = NULL;
5831 				}
5832 				no_rcv_needed = control->do_not_ref_stcb;
5833 				sctp_free_remote_addr(control->whoFrom);
5834 				control->data = NULL;
5835 				sctp_free_a_readq(stcb, control);
5836 				control = NULL;
5837 				if ((freed_so_far >= rwnd_req) &&
5838 				    (no_rcv_needed == 0))
5839 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5840 
5841 			} else {
5842 				/*
5843 				 * The user did not read all of this
5844 				 * message, turn off the returned MSG_EOR
5845 				 * since we are leaving more behind on the
5846 				 * control to read.
5847 				 */
5848 #ifdef INVARIANTS
5849 				if (control->end_added &&
5850 				    (control->data == NULL) &&
5851 				    (control->tail_mbuf == NULL)) {
5852 					panic("Gak, control->length is corrupt?");
5853 				}
5854 #endif
5855 				no_rcv_needed = control->do_not_ref_stcb;
5856 				out_flags &= ~MSG_EOR;
5857 			}
5858 		}
5859 		if (out_flags & MSG_EOR) {
5860 			goto release;
5861 		}
5862 		if ((uio->uio_resid == 0) ||
5863 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5864 		    ) {
5865 			goto release;
5866 		}
5867 		/*
5868 		 * If I hit here the receiver wants more and this message is
5869 		 * NOT done (pd-api). So two questions. Can we block? if not
5870 		 * we are done. Did the user NOT set MSG_WAITALL?
5871 		 */
5872 		if (block_allowed == 0) {
5873 			goto release;
5874 		}
5875 		/*
5876 		 * We need to wait for more data a few things: - We don't
5877 		 * sbunlock() so we don't get someone else reading. - We
5878 		 * must be sure to account for the case where what is added
5879 		 * is NOT to our control when we wakeup.
5880 		 */
5881 
5882 		/*
5883 		 * Do we need to tell the transport a rwnd update might be
5884 		 * needed before we go to sleep?
5885 		 */
5886 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5887 		    ((freed_so_far >= rwnd_req) &&
5888 		    (control->do_not_ref_stcb == 0) &&
5889 		    (no_rcv_needed == 0))) {
5890 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5891 		}
5892 wait_some_more:
5893 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5894 			goto release;
5895 		}
5896 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5897 			goto release;
5898 
5899 		if (hold_rlock == 1) {
5900 			SCTP_INP_READ_UNLOCK(inp);
5901 			hold_rlock = 0;
5902 		}
5903 		if (hold_sblock == 0) {
5904 			SOCKBUF_LOCK(&so->so_rcv);
5905 			hold_sblock = 1;
5906 		}
5907 		if ((copied_so_far) && (control->length == 0) &&
5908 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5909 			goto release;
5910 		}
5911 		if (so->so_rcv.sb_cc <= control->held_length) {
5912 			error = sbwait(&so->so_rcv);
5913 			if (error) {
5914 				goto release;
5915 			}
5916 			control->held_length = 0;
5917 		}
5918 		if (hold_sblock) {
5919 			SOCKBUF_UNLOCK(&so->so_rcv);
5920 			hold_sblock = 0;
5921 		}
5922 		if (control->length == 0) {
5923 			/* still nothing here */
5924 			if (control->end_added == 1) {
5925 				/* he aborted, or is done i.e.did a shutdown */
5926 				out_flags |= MSG_EOR;
5927 				if (control->pdapi_aborted) {
5928 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5929 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5930 
5931 					out_flags |= MSG_TRUNC;
5932 				} else {
5933 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5934 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5935 				}
5936 				goto done_with_control;
5937 			}
5938 			if (so->so_rcv.sb_cc > held_length) {
5939 				control->held_length = so->so_rcv.sb_cc;
5940 				held_length = 0;
5941 			}
5942 			goto wait_some_more;
5943 		} else if (control->data == NULL) {
5944 			/*
5945 			 * we must re-sync since data is probably being
5946 			 * added
5947 			 */
5948 			SCTP_INP_READ_LOCK(inp);
5949 			if ((control->length > 0) && (control->data == NULL)) {
5950 				/*
5951 				 * big trouble.. we have the lock and its
5952 				 * corrupt?
5953 				 */
5954 #ifdef INVARIANTS
5955 				panic("Impossible data==NULL length !=0");
5956 #endif
5957 				out_flags |= MSG_EOR;
5958 				out_flags |= MSG_TRUNC;
5959 				control->length = 0;
5960 				SCTP_INP_READ_UNLOCK(inp);
5961 				goto done_with_control;
5962 			}
5963 			SCTP_INP_READ_UNLOCK(inp);
5964 			/* We will fall around to get more data */
5965 		}
5966 		goto get_more_data;
5967 	} else {
5968 		/*-
5969 		 * Give caller back the mbuf chain,
5970 		 * store in uio_resid the length
5971 		 */
5972 		wakeup_read_socket = 0;
5973 		if ((control->end_added == 0) ||
5974 		    (TAILQ_NEXT(control, next) == NULL)) {
5975 			/* Need to get rlock */
5976 			if (hold_rlock == 0) {
5977 				SCTP_INP_READ_LOCK(inp);
5978 				hold_rlock = 1;
5979 			}
5980 		}
5981 		if (control->end_added) {
5982 			out_flags |= MSG_EOR;
5983 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5984 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5985 		}
5986 		if (control->spec_flags & M_NOTIFICATION) {
5987 			out_flags |= MSG_NOTIFICATION;
5988 		}
5989 		uio->uio_resid = control->length;
5990 		*mp = control->data;
5991 		m = control->data;
5992 		while (m) {
5993 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5994 				sctp_sblog(&so->so_rcv,
5995 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5996 			}
5997 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5998 			freed_so_far += SCTP_BUF_LEN(m);
5999 			freed_so_far += MSIZE;
6000 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6001 				sctp_sblog(&so->so_rcv,
6002 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6003 			}
6004 			m = SCTP_BUF_NEXT(m);
6005 		}
6006 		control->data = control->tail_mbuf = NULL;
6007 		control->length = 0;
6008 		if (out_flags & MSG_EOR) {
6009 			/* Done with this control */
6010 			goto done_with_control;
6011 		}
6012 	}
6013 release:
6014 	if (hold_rlock == 1) {
6015 		SCTP_INP_READ_UNLOCK(inp);
6016 		hold_rlock = 0;
6017 	}
6018 	if (hold_sblock == 1) {
6019 		SOCKBUF_UNLOCK(&so->so_rcv);
6020 		hold_sblock = 0;
6021 	}
6022 	sbunlock(&so->so_rcv);
6023 	sockbuf_lock = 0;
6024 
6025 release_unlocked:
6026 	if (hold_sblock) {
6027 		SOCKBUF_UNLOCK(&so->so_rcv);
6028 		hold_sblock = 0;
6029 	}
6030 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6031 		if ((freed_so_far >= rwnd_req) &&
6032 		    (control && (control->do_not_ref_stcb == 0)) &&
6033 		    (no_rcv_needed == 0))
6034 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6035 	}
6036 out:
6037 	if (msg_flags) {
6038 		*msg_flags = out_flags;
6039 	}
6040 	if (((out_flags & MSG_EOR) == 0) &&
6041 	    ((in_flags & MSG_PEEK) == 0) &&
6042 	    (sinfo) &&
6043 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
6044 		struct sctp_extrcvinfo *s_extra;
6045 
6046 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6047 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6048 	}
6049 	if (hold_rlock == 1) {
6050 		SCTP_INP_READ_UNLOCK(inp);
6051 		hold_rlock = 0;
6052 	}
6053 	if (hold_sblock) {
6054 		SOCKBUF_UNLOCK(&so->so_rcv);
6055 		hold_sblock = 0;
6056 	}
6057 	if (sockbuf_lock) {
6058 		sbunlock(&so->so_rcv);
6059 	}
6060 	if (freecnt_applied) {
6061 		/*
6062 		 * The lock on the socket buffer protects us so the free
6063 		 * code will stop. But since we used the socketbuf lock and
6064 		 * the sender uses the tcb_lock to increment, we need to use
6065 		 * the atomic add to the refcnt.
6066 		 */
6067 		if (stcb == NULL) {
6068 #ifdef INVARIANTS
6069 			panic("stcb for refcnt has gone NULL?");
6070 			goto stage_left;
6071 #else
6072 			goto stage_left;
6073 #endif
6074 		}
6075 		atomic_add_int(&stcb->asoc.refcnt, -1);
6076 		freecnt_applied = 0;
6077 		/* Save the value back for next time */
6078 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6079 	}
6080 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6081 		if (stcb) {
6082 			sctp_misc_ints(SCTP_SORECV_DONE,
6083 			    freed_so_far,
6084 			    ((uio) ? (slen - uio->uio_resid) : slen),
6085 			    stcb->asoc.my_rwnd,
6086 			    so->so_rcv.sb_cc);
6087 		} else {
6088 			sctp_misc_ints(SCTP_SORECV_DONE,
6089 			    freed_so_far,
6090 			    ((uio) ? (slen - uio->uio_resid) : slen),
6091 			    0,
6092 			    so->so_rcv.sb_cc);
6093 		}
6094 	}
6095 stage_left:
6096 	if (wakeup_read_socket) {
6097 		sctp_sorwakeup(inp, so);
6098 	}
6099 	return (error);
6100 }
6101 
6102 
6103 #ifdef SCTP_MBUF_LOGGING
6104 struct mbuf *
6105 sctp_m_free(struct mbuf *m)
6106 {
6107 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6108 		if (SCTP_BUF_IS_EXTENDED(m)) {
6109 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6110 		}
6111 	}
6112 	return (m_free(m));
6113 }
6114 
6115 void
6116 sctp_m_freem(struct mbuf *mb)
6117 {
6118 	while (mb != NULL)
6119 		mb = sctp_m_free(mb);
6120 }
6121 
6122 #endif
6123 
6124 int
6125 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6126 {
6127 	/*
6128 	 * Given a local address. For all associations that holds the
6129 	 * address, request a peer-set-primary.
6130 	 */
6131 	struct sctp_ifa *ifa;
6132 	struct sctp_laddr *wi;
6133 
6134 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6135 	if (ifa == NULL) {
6136 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6137 		return (EADDRNOTAVAIL);
6138 	}
6139 	/*
6140 	 * Now that we have the ifa we must awaken the iterator with this
6141 	 * message.
6142 	 */
6143 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6144 	if (wi == NULL) {
6145 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6146 		return (ENOMEM);
6147 	}
6148 	/* Now incr the count and int wi structure */
6149 	SCTP_INCR_LADDR_COUNT();
6150 	bzero(wi, sizeof(*wi));
6151 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6152 	wi->ifa = ifa;
6153 	wi->action = SCTP_SET_PRIM_ADDR;
6154 	atomic_add_int(&ifa->refcount, 1);
6155 
6156 	/* Now add it to the work queue */
6157 	SCTP_IPI_ITERATOR_WQ_LOCK();
6158 	/*
6159 	 * Should this really be a tailq? As it is we will process the
6160 	 * newest first :-0
6161 	 */
6162 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6163 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
6164 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6165 	    (struct sctp_inpcb *)NULL,
6166 	    (struct sctp_tcb *)NULL,
6167 	    (struct sctp_nets *)NULL);
6168 	return (0);
6169 }
6170 
6171 
6172 int
6173 sctp_soreceive(struct socket *so,
6174     struct sockaddr **psa,
6175     struct uio *uio,
6176     struct mbuf **mp0,
6177     struct mbuf **controlp,
6178     int *flagsp)
6179 {
6180 	int error, fromlen;
6181 	uint8_t sockbuf[256];
6182 	struct sockaddr *from;
6183 	struct sctp_extrcvinfo sinfo;
6184 	int filling_sinfo = 1;
6185 	struct sctp_inpcb *inp;
6186 
6187 	inp = (struct sctp_inpcb *)so->so_pcb;
6188 	/* pickup the assoc we are reading from */
6189 	if (inp == NULL) {
6190 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6191 		return (EINVAL);
6192 	}
6193 	if ((sctp_is_feature_off(inp,
6194 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6195 	    (controlp == NULL)) {
6196 		/* user does not want the sndrcv ctl */
6197 		filling_sinfo = 0;
6198 	}
6199 	if (psa) {
6200 		from = (struct sockaddr *)sockbuf;
6201 		fromlen = sizeof(sockbuf);
6202 		from->sa_len = 0;
6203 	} else {
6204 		from = NULL;
6205 		fromlen = 0;
6206 	}
6207 
6208 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6209 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6210 	if ((controlp) && (filling_sinfo)) {
6211 		/* copy back the sinfo in a CMSG format */
6212 		if (filling_sinfo)
6213 			*controlp = sctp_build_ctl_nchunk(inp,
6214 			    (struct sctp_sndrcvinfo *)&sinfo);
6215 		else
6216 			*controlp = NULL;
6217 	}
6218 	if (psa) {
6219 		/* copy back the address info */
6220 		if (from && from->sa_len) {
6221 			*psa = sodupsockaddr(from, M_NOWAIT);
6222 		} else {
6223 			*psa = NULL;
6224 		}
6225 	}
6226 	return (error);
6227 }
6228 
6229 
6230 int
6231 sctp_l_soreceive(struct socket *so,
6232     struct sockaddr **name,
6233     struct uio *uio,
6234     char **controlp,
6235     int *controllen,
6236     int *flag)
6237 {
6238 	int error, fromlen;
6239 	uint8_t sockbuf[256];
6240 	struct sockaddr *from;
6241 	struct sctp_extrcvinfo sinfo;
6242 	int filling_sinfo = 1;
6243 	struct sctp_inpcb *inp;
6244 
6245 	inp = (struct sctp_inpcb *)so->so_pcb;
6246 	/* pickup the assoc we are reading from */
6247 	if (inp == NULL) {
6248 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6249 		return (EINVAL);
6250 	}
6251 	if ((sctp_is_feature_off(inp,
6252 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6253 	    (controlp == NULL)) {
6254 		/* user does not want the sndrcv ctl */
6255 		filling_sinfo = 0;
6256 	}
6257 	if (name) {
6258 		from = (struct sockaddr *)sockbuf;
6259 		fromlen = sizeof(sockbuf);
6260 		from->sa_len = 0;
6261 	} else {
6262 		from = NULL;
6263 		fromlen = 0;
6264 	}
6265 
6266 	error = sctp_sorecvmsg(so, uio,
6267 	    (struct mbuf **)NULL,
6268 	    from, fromlen, flag,
6269 	    (struct sctp_sndrcvinfo *)&sinfo,
6270 	    filling_sinfo);
6271 	if ((controlp) && (filling_sinfo)) {
6272 		/*
6273 		 * copy back the sinfo in a CMSG format note that the caller
6274 		 * has reponsibility for freeing the memory.
6275 		 */
6276 		if (filling_sinfo)
6277 			*controlp = sctp_build_ctl_cchunk(inp,
6278 			    controllen,
6279 			    (struct sctp_sndrcvinfo *)&sinfo);
6280 	}
6281 	if (name) {
6282 		/* copy back the address info */
6283 		if (from && from->sa_len) {
6284 			*name = sodupsockaddr(from, M_WAIT);
6285 		} else {
6286 			*name = NULL;
6287 		}
6288 	}
6289 	return (error);
6290 }
6291 
6292 
6293 
6294 
6295 
6296 
6297 
6298 int
6299 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6300     int totaddr, int *error)
6301 {
6302 	int added = 0;
6303 	int i;
6304 	struct sctp_inpcb *inp;
6305 	struct sockaddr *sa;
6306 	size_t incr = 0;
6307 
6308 	sa = addr;
6309 	inp = stcb->sctp_ep;
6310 	*error = 0;
6311 	for (i = 0; i < totaddr; i++) {
6312 		if (sa->sa_family == AF_INET) {
6313 			incr = sizeof(struct sockaddr_in);
6314 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6315 				/* assoc gone no un-lock */
6316 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6317 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6318 				*error = ENOBUFS;
6319 				goto out_now;
6320 			}
6321 			added++;
6322 		} else if (sa->sa_family == AF_INET6) {
6323 			incr = sizeof(struct sockaddr_in6);
6324 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6325 				/* assoc gone no un-lock */
6326 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6327 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6328 				*error = ENOBUFS;
6329 				goto out_now;
6330 			}
6331 			added++;
6332 		}
6333 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6334 	}
6335 out_now:
6336 	return (added);
6337 }
6338 
6339 struct sctp_tcb *
6340 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6341     int *totaddr, int *num_v4, int *num_v6, int *error,
6342     int limit, int *bad_addr)
6343 {
6344 	struct sockaddr *sa;
6345 	struct sctp_tcb *stcb = NULL;
6346 	size_t incr, at, i;
6347 
6348 	at = incr = 0;
6349 	sa = addr;
6350 	*error = *num_v6 = *num_v4 = 0;
6351 	/* account and validate addresses */
6352 	for (i = 0; i < (size_t)*totaddr; i++) {
6353 		if (sa->sa_family == AF_INET) {
6354 			(*num_v4) += 1;
6355 			incr = sizeof(struct sockaddr_in);
6356 			if (sa->sa_len != incr) {
6357 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6358 				*error = EINVAL;
6359 				*bad_addr = 1;
6360 				return (NULL);
6361 			}
6362 		} else if (sa->sa_family == AF_INET6) {
6363 			struct sockaddr_in6 *sin6;
6364 
6365 			sin6 = (struct sockaddr_in6 *)sa;
6366 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6367 				/* Must be non-mapped for connectx */
6368 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6369 				*error = EINVAL;
6370 				*bad_addr = 1;
6371 				return (NULL);
6372 			}
6373 			(*num_v6) += 1;
6374 			incr = sizeof(struct sockaddr_in6);
6375 			if (sa->sa_len != incr) {
6376 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6377 				*error = EINVAL;
6378 				*bad_addr = 1;
6379 				return (NULL);
6380 			}
6381 		} else {
6382 			*totaddr = i;
6383 			/* we are done */
6384 			break;
6385 		}
6386 		SCTP_INP_INCR_REF(inp);
6387 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6388 		if (stcb != NULL) {
6389 			/* Already have or am bring up an association */
6390 			return (stcb);
6391 		} else {
6392 			SCTP_INP_DECR_REF(inp);
6393 		}
6394 		if ((at + incr) > (size_t)limit) {
6395 			*totaddr = i;
6396 			break;
6397 		}
6398 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6399 	}
6400 	return ((struct sctp_tcb *)NULL);
6401 }
6402 
6403 /*
6404  * sctp_bindx(ADD) for one address.
6405  * assumes all arguments are valid/checked by caller.
6406  */
6407 void
6408 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6409     struct sockaddr *sa, sctp_assoc_t assoc_id,
6410     uint32_t vrf_id, int *error, void *p)
6411 {
6412 	struct sockaddr *addr_touse;
6413 
6414 #ifdef INET6
6415 	struct sockaddr_in sin;
6416 
6417 #endif
6418 
6419 	/* see if we're bound all already! */
6420 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6421 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6422 		*error = EINVAL;
6423 		return;
6424 	}
6425 	addr_touse = sa;
6426 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6427 	if (sa->sa_family == AF_INET6) {
6428 		struct sockaddr_in6 *sin6;
6429 
6430 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6431 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6432 			*error = EINVAL;
6433 			return;
6434 		}
6435 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6436 			/* can only bind v6 on PF_INET6 sockets */
6437 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6438 			*error = EINVAL;
6439 			return;
6440 		}
6441 		sin6 = (struct sockaddr_in6 *)addr_touse;
6442 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6443 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6444 			    SCTP_IPV6_V6ONLY(inp)) {
6445 				/* can't bind v4-mapped on PF_INET sockets */
6446 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6447 				*error = EINVAL;
6448 				return;
6449 			}
6450 			in6_sin6_2_sin(&sin, sin6);
6451 			addr_touse = (struct sockaddr *)&sin;
6452 		}
6453 	}
6454 #endif
6455 	if (sa->sa_family == AF_INET) {
6456 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6457 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6458 			*error = EINVAL;
6459 			return;
6460 		}
6461 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6462 		    SCTP_IPV6_V6ONLY(inp)) {
6463 			/* can't bind v4 on PF_INET sockets */
6464 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6465 			*error = EINVAL;
6466 			return;
6467 		}
6468 	}
6469 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6470 		if (p == NULL) {
6471 			/* Can't get proc for Net/Open BSD */
6472 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6473 			*error = EINVAL;
6474 			return;
6475 		}
6476 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6477 		return;
6478 	}
6479 	/*
6480 	 * No locks required here since bind and mgmt_ep_sa all do their own
6481 	 * locking. If we do something for the FIX: below we may need to
6482 	 * lock in that case.
6483 	 */
6484 	if (assoc_id == 0) {
6485 		/* add the address */
6486 		struct sctp_inpcb *lep;
6487 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6488 
6489 		/* validate the incoming port */
6490 		if ((lsin->sin_port != 0) &&
6491 		    (lsin->sin_port != inp->sctp_lport)) {
6492 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6493 			*error = EINVAL;
6494 			return;
6495 		} else {
6496 			/* user specified 0 port, set it to existing port */
6497 			lsin->sin_port = inp->sctp_lport;
6498 		}
6499 
6500 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6501 		if (lep != NULL) {
6502 			/*
6503 			 * We must decrement the refcount since we have the
6504 			 * ep already and are binding. No remove going on
6505 			 * here.
6506 			 */
6507 			SCTP_INP_DECR_REF(lep);
6508 		}
6509 		if (lep == inp) {
6510 			/* already bound to it.. ok */
6511 			return;
6512 		} else if (lep == NULL) {
6513 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6514 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6515 			    SCTP_ADD_IP_ADDRESS,
6516 			    vrf_id, NULL);
6517 		} else {
6518 			*error = EADDRINUSE;
6519 		}
6520 		if (*error)
6521 			return;
6522 	} else {
6523 		/*
6524 		 * FIX: decide whether we allow assoc based bindx
6525 		 */
6526 	}
6527 }
6528 
6529 /*
6530  * sctp_bindx(DELETE) for one address.
6531  * assumes all arguments are valid/checked by caller.
6532  */
6533 void
6534 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6535     struct sockaddr *sa, sctp_assoc_t assoc_id,
6536     uint32_t vrf_id, int *error)
6537 {
6538 	struct sockaddr *addr_touse;
6539 
6540 #ifdef INET6
6541 	struct sockaddr_in sin;
6542 
6543 #endif
6544 
6545 	/* see if we're bound all already! */
6546 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6547 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6548 		*error = EINVAL;
6549 		return;
6550 	}
6551 	addr_touse = sa;
6552 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6553 	if (sa->sa_family == AF_INET6) {
6554 		struct sockaddr_in6 *sin6;
6555 
6556 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6557 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6558 			*error = EINVAL;
6559 			return;
6560 		}
6561 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6562 			/* can only bind v6 on PF_INET6 sockets */
6563 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6564 			*error = EINVAL;
6565 			return;
6566 		}
6567 		sin6 = (struct sockaddr_in6 *)addr_touse;
6568 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6569 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6570 			    SCTP_IPV6_V6ONLY(inp)) {
6571 				/* can't bind mapped-v4 on PF_INET sockets */
6572 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6573 				*error = EINVAL;
6574 				return;
6575 			}
6576 			in6_sin6_2_sin(&sin, sin6);
6577 			addr_touse = (struct sockaddr *)&sin;
6578 		}
6579 	}
6580 #endif
6581 	if (sa->sa_family == AF_INET) {
6582 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6583 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6584 			*error = EINVAL;
6585 			return;
6586 		}
6587 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6588 		    SCTP_IPV6_V6ONLY(inp)) {
6589 			/* can't bind v4 on PF_INET sockets */
6590 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6591 			*error = EINVAL;
6592 			return;
6593 		}
6594 	}
6595 	/*
6596 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6597 	 * below is ever changed we may need to lock before calling
6598 	 * association level binding.
6599 	 */
6600 	if (assoc_id == 0) {
6601 		/* delete the address */
6602 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6603 		    SCTP_DEL_IP_ADDRESS,
6604 		    vrf_id, NULL);
6605 	} else {
6606 		/*
6607 		 * FIX: decide whether we allow assoc based bindx
6608 		 */
6609 	}
6610 }
6611 
6612 /*
6613  * returns the valid local address count for an assoc, taking into account
6614  * all scoping rules
6615  */
6616 int
6617 sctp_local_addr_count(struct sctp_tcb *stcb)
6618 {
6619 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6620 	int ipv4_addr_legal, ipv6_addr_legal;
6621 	struct sctp_vrf *vrf;
6622 	struct sctp_ifn *sctp_ifn;
6623 	struct sctp_ifa *sctp_ifa;
6624 	int count = 0;
6625 
6626 	/* Turn on all the appropriate scopes */
6627 	loopback_scope = stcb->asoc.loopback_scope;
6628 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6629 	local_scope = stcb->asoc.local_scope;
6630 	site_scope = stcb->asoc.site_scope;
6631 	ipv4_addr_legal = ipv6_addr_legal = 0;
6632 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6633 		ipv6_addr_legal = 1;
6634 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6635 			ipv4_addr_legal = 1;
6636 		}
6637 	} else {
6638 		ipv4_addr_legal = 1;
6639 	}
6640 
6641 	SCTP_IPI_ADDR_RLOCK();
6642 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6643 	if (vrf == NULL) {
6644 		/* no vrf, no addresses */
6645 		SCTP_IPI_ADDR_RUNLOCK();
6646 		return (0);
6647 	}
6648 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6649 		/*
6650 		 * bound all case: go through all ifns on the vrf
6651 		 */
6652 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6653 			if ((loopback_scope == 0) &&
6654 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6655 				continue;
6656 			}
6657 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6658 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6659 					continue;
6660 				switch (sctp_ifa->address.sa.sa_family) {
6661 				case AF_INET:
6662 					if (ipv4_addr_legal) {
6663 						struct sockaddr_in *sin;
6664 
6665 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6666 						if (sin->sin_addr.s_addr == 0) {
6667 							/*
6668 							 * skip unspecified
6669 							 * addrs
6670 							 */
6671 							continue;
6672 						}
6673 						if ((ipv4_local_scope == 0) &&
6674 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6675 							continue;
6676 						}
6677 						/* count this one */
6678 						count++;
6679 					} else {
6680 						continue;
6681 					}
6682 					break;
6683 #ifdef INET6
6684 				case AF_INET6:
6685 					if (ipv6_addr_legal) {
6686 						struct sockaddr_in6 *sin6;
6687 
6688 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6689 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6690 							continue;
6691 						}
6692 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6693 							if (local_scope == 0)
6694 								continue;
6695 							if (sin6->sin6_scope_id == 0) {
6696 								if (sa6_recoverscope(sin6) != 0)
6697 									/*
6698 									 *
6699 									 * bad
6700 									 *
6701 									 * li
6702 									 * nk
6703 									 *
6704 									 * loc
6705 									 * al
6706 									 *
6707 									 * add
6708 									 * re
6709 									 * ss
6710 									 * */
6711 									continue;
6712 							}
6713 						}
6714 						if ((site_scope == 0) &&
6715 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6716 							continue;
6717 						}
6718 						/* count this one */
6719 						count++;
6720 					}
6721 					break;
6722 #endif
6723 				default:
6724 					/* TSNH */
6725 					break;
6726 				}
6727 			}
6728 		}
6729 	} else {
6730 		/*
6731 		 * subset bound case
6732 		 */
6733 		struct sctp_laddr *laddr;
6734 
6735 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6736 		    sctp_nxt_addr) {
6737 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6738 				continue;
6739 			}
6740 			/* count this one */
6741 			count++;
6742 		}
6743 	}
6744 	SCTP_IPI_ADDR_RUNLOCK();
6745 	return (count);
6746 }
6747 
6748 #if defined(SCTP_LOCAL_TRACE_BUF)
6749 
6750 void
6751 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6752 {
6753 	uint32_t saveindex, newindex;
6754 
6755 	do {
6756 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6757 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6758 			newindex = 1;
6759 		} else {
6760 			newindex = saveindex + 1;
6761 		}
6762 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6763 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6764 		saveindex = 0;
6765 	}
6766 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6767 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6768 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6769 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6770 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6771 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6772 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6773 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6774 }
6775 
6776 #endif
6777 /* We will need to add support
6778  * to bind the ports and such here
6779  * so we can do UDP tunneling. In
6780  * the mean-time, we return error
6781  */
6782 #include <netinet/udp.h>
6783 #include <netinet/udp_var.h>
6784 #include <sys/proc.h>
6785 #ifdef INET6
6786 #include <netinet6/sctp6_var.h>
6787 #endif
6788 
6789 static void
6790 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6791 {
6792 	struct ip *iph;
6793 	struct mbuf *sp, *last;
6794 	struct udphdr *uhdr;
6795 	uint16_t port = 0, len;
6796 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6797 
6798 	/*
6799 	 * Split out the mbuf chain. Leave the IP header in m, place the
6800 	 * rest in the sp.
6801 	 */
6802 	if ((m->m_flags & M_PKTHDR) == 0) {
6803 		/* Can't handle one that is not a pkt hdr */
6804 		goto out;
6805 	}
6806 	/* pull the src port */
6807 	iph = mtod(m, struct ip *);
6808 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6809 
6810 	port = uhdr->uh_sport;
6811 	sp = m_split(m, off, M_DONTWAIT);
6812 	if (sp == NULL) {
6813 		/* Gak, drop packet, we can't do a split */
6814 		goto out;
6815 	}
6816 	if (sp->m_pkthdr.len < header_size) {
6817 		/* Gak, packet can't have an SCTP header in it - to small */
6818 		m_freem(sp);
6819 		goto out;
6820 	}
6821 	/* ok now pull up the UDP header and SCTP header together */
6822 	sp = m_pullup(sp, header_size);
6823 	if (sp == NULL) {
6824 		/* Gak pullup failed */
6825 		goto out;
6826 	}
6827 	/* trim out the UDP header */
6828 	m_adj(sp, sizeof(struct udphdr));
6829 
6830 	/* Now reconstruct the mbuf chain */
6831 	/* 1) find last one */
6832 	last = m;
6833 	while (last->m_next != NULL) {
6834 		last = last->m_next;
6835 	}
6836 	last->m_next = sp;
6837 	m->m_pkthdr.len += sp->m_pkthdr.len;
6838 	last = m;
6839 	while (last != NULL) {
6840 		last = last->m_next;
6841 	}
6842 	/* Now its ready for sctp_input or sctp6_input */
6843 	iph = mtod(m, struct ip *);
6844 	switch (iph->ip_v) {
6845 	case IPVERSION:
6846 		{
6847 			/* its IPv4 */
6848 			len = SCTP_GET_IPV4_LENGTH(iph);
6849 			len -= sizeof(struct udphdr);
6850 			SCTP_GET_IPV4_LENGTH(iph) = len;
6851 			sctp_input_with_port(m, off, port);
6852 			break;
6853 		}
6854 #ifdef INET6
6855 	case IPV6_VERSION >> 4:
6856 		{
6857 			/* its IPv6 - NOT supported */
6858 			goto out;
6859 			break;
6860 
6861 		}
6862 #endif
6863 	default:
6864 		{
6865 			m_freem(m);
6866 			break;
6867 		}
6868 	}
6869 	return;
6870 out:
6871 	m_freem(m);
6872 }
6873 
6874 void
6875 sctp_over_udp_stop(void)
6876 {
6877 	struct socket *sop;
6878 
6879 	/*
6880 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6881 	 * for writting!
6882 	 */
6883 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6884 		/* Nothing to do */
6885 		return;
6886 	}
6887 	sop = SCTP_BASE_INFO(udp_tun_socket);
6888 	soclose(sop);
6889 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6890 }
6891 int
6892 sctp_over_udp_start(void)
6893 {
6894 	uint16_t port;
6895 	int ret;
6896 	struct sockaddr_in sin;
6897 	struct socket *sop = NULL;
6898 	struct thread *th;
6899 	struct ucred *cred;
6900 
6901 	/*
6902 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6903 	 * for writting!
6904 	 */
6905 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6906 	if (port == 0) {
6907 		/* Must have a port set */
6908 		return (EINVAL);
6909 	}
6910 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6911 		/* Already running -- must stop first */
6912 		return (EALREADY);
6913 	}
6914 	th = curthread;
6915 	cred = th->td_ucred;
6916 	if ((ret = socreate(PF_INET, &sop,
6917 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6918 		return (ret);
6919 	}
6920 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6921 	/* call the special UDP hook */
6922 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6923 	if (ret) {
6924 		goto exit_stage_left;
6925 	}
6926 	/* Ok we have a socket, bind it to the port */
6927 	memset(&sin, 0, sizeof(sin));
6928 	sin.sin_len = sizeof(sin);
6929 	sin.sin_family = AF_INET;
6930 	sin.sin_port = htons(port);
6931 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6932 	if (ret) {
6933 		/* Close up we cant get the port */
6934 exit_stage_left:
6935 		sctp_over_udp_stop();
6936 		return (ret);
6937 	}
6938 	/*
6939 	 * Ok we should now get UDP packets directly to our input routine
6940 	 * sctp_recv_upd_tunneled_packet().
6941 	 */
6942 	return (0);
6943 }
6944