xref: /freebsd/sys/netinet/sctputil.c (revision dc60165b73e4c4d829a2cb9fed5cce585e93d9a9)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 
52 #define NUMBER_OF_MTU_SIZES 18
53 
54 
55 #if defined(__Windows__) && !defined(SCTP_LOCAL_TRACE_BUF)
56 #include "eventrace_netinet.h"
57 #include "sctputil.tmh"		/* this is the file that will be auto
58 				 * generated */
59 #else
60 #ifndef KTR_SCTP
61 #define KTR_SCTP KTR_SUBSYS
62 #endif
63 #endif
64 
65 void
66 sctp_sblog(struct sockbuf *sb,
67     struct sctp_tcb *stcb, int from, int incr)
68 {
69 	struct sctp_cwnd_log sctp_clog;
70 
71 	sctp_clog.x.sb.stcb = stcb;
72 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
73 	if (stcb)
74 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
75 	else
76 		sctp_clog.x.sb.stcb_sbcc = 0;
77 	sctp_clog.x.sb.incr = incr;
78 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
79 	    SCTP_LOG_EVENT_SB,
80 	    from,
81 	    sctp_clog.x.misc.log1,
82 	    sctp_clog.x.misc.log2,
83 	    sctp_clog.x.misc.log3,
84 	    sctp_clog.x.misc.log4);
85 }
86 
87 void
88 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
89 {
90 	struct sctp_cwnd_log sctp_clog;
91 
92 	sctp_clog.x.close.inp = (void *)inp;
93 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
94 	if (stcb) {
95 		sctp_clog.x.close.stcb = (void *)stcb;
96 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
97 	} else {
98 		sctp_clog.x.close.stcb = 0;
99 		sctp_clog.x.close.state = 0;
100 	}
101 	sctp_clog.x.close.loc = loc;
102 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
103 	    SCTP_LOG_EVENT_CLOSE,
104 	    0,
105 	    sctp_clog.x.misc.log1,
106 	    sctp_clog.x.misc.log2,
107 	    sctp_clog.x.misc.log3,
108 	    sctp_clog.x.misc.log4);
109 }
110 
111 
112 void
113 rto_logging(struct sctp_nets *net, int from)
114 {
115 	struct sctp_cwnd_log sctp_clog;
116 
117 	memset(&sctp_clog, 0, sizeof(sctp_clog));
118 	sctp_clog.x.rto.net = (void *)net;
119 	sctp_clog.x.rto.rtt = net->prev_rtt;
120 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
121 	    SCTP_LOG_EVENT_RTT,
122 	    from,
123 	    sctp_clog.x.misc.log1,
124 	    sctp_clog.x.misc.log2,
125 	    sctp_clog.x.misc.log3,
126 	    sctp_clog.x.misc.log4);
127 
128 }
129 
130 void
131 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
132 {
133 	struct sctp_cwnd_log sctp_clog;
134 
135 	sctp_clog.x.strlog.stcb = stcb;
136 	sctp_clog.x.strlog.n_tsn = tsn;
137 	sctp_clog.x.strlog.n_sseq = sseq;
138 	sctp_clog.x.strlog.e_tsn = 0;
139 	sctp_clog.x.strlog.e_sseq = 0;
140 	sctp_clog.x.strlog.strm = stream;
141 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
142 	    SCTP_LOG_EVENT_STRM,
143 	    from,
144 	    sctp_clog.x.misc.log1,
145 	    sctp_clog.x.misc.log2,
146 	    sctp_clog.x.misc.log3,
147 	    sctp_clog.x.misc.log4);
148 
149 }
150 
151 void
152 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
153 {
154 	struct sctp_cwnd_log sctp_clog;
155 
156 	sctp_clog.x.nagle.stcb = (void *)stcb;
157 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
158 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
159 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
160 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
161 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
162 	    SCTP_LOG_EVENT_NAGLE,
163 	    action,
164 	    sctp_clog.x.misc.log1,
165 	    sctp_clog.x.misc.log2,
166 	    sctp_clog.x.misc.log3,
167 	    sctp_clog.x.misc.log4);
168 }
169 
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	struct sctp_cwnd_log sctp_clog;
175 
176 	sctp_clog.x.sack.cumack = cumack;
177 	sctp_clog.x.sack.oldcumack = old_cumack;
178 	sctp_clog.x.sack.tsn = tsn;
179 	sctp_clog.x.sack.numGaps = gaps;
180 	sctp_clog.x.sack.numDups = dups;
181 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 	    SCTP_LOG_EVENT_SACK,
183 	    from,
184 	    sctp_clog.x.misc.log1,
185 	    sctp_clog.x.misc.log2,
186 	    sctp_clog.x.misc.log3,
187 	    sctp_clog.x.misc.log4);
188 }
189 
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 	struct sctp_cwnd_log sctp_clog;
194 
195 	memset(&sctp_clog, 0, sizeof(sctp_clog));
196 	sctp_clog.x.map.base = map;
197 	sctp_clog.x.map.cum = cum;
198 	sctp_clog.x.map.high = high;
199 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 	    SCTP_LOG_EVENT_MAP,
201 	    from,
202 	    sctp_clog.x.misc.log1,
203 	    sctp_clog.x.misc.log2,
204 	    sctp_clog.x.misc.log3,
205 	    sctp_clog.x.misc.log4);
206 }
207 
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
210     int from)
211 {
212 	struct sctp_cwnd_log sctp_clog;
213 
214 	memset(&sctp_clog, 0, sizeof(sctp_clog));
215 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
216 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
217 	sctp_clog.x.fr.tsn = tsn;
218 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
219 	    SCTP_LOG_EVENT_FR,
220 	    from,
221 	    sctp_clog.x.misc.log1,
222 	    sctp_clog.x.misc.log2,
223 	    sctp_clog.x.misc.log3,
224 	    sctp_clog.x.misc.log4);
225 
226 }
227 
228 
229 void
230 sctp_log_mb(struct mbuf *m, int from)
231 {
232 	struct sctp_cwnd_log sctp_clog;
233 
234 	sctp_clog.x.mb.mp = m;
235 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
236 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
237 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
238 	if (SCTP_BUF_IS_EXTENDED(m)) {
239 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
240 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
241 	} else {
242 		sctp_clog.x.mb.ext = 0;
243 		sctp_clog.x.mb.refcnt = 0;
244 	}
245 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
246 	    SCTP_LOG_EVENT_MBUF,
247 	    from,
248 	    sctp_clog.x.misc.log1,
249 	    sctp_clog.x.misc.log2,
250 	    sctp_clog.x.misc.log3,
251 	    sctp_clog.x.misc.log4);
252 }
253 
254 
255 void
256 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
257     int from)
258 {
259 	struct sctp_cwnd_log sctp_clog;
260 
261 	if (control == NULL) {
262 		SCTP_PRINTF("Gak log of NULL?\n");
263 		return;
264 	}
265 	sctp_clog.x.strlog.stcb = control->stcb;
266 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
267 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
268 	sctp_clog.x.strlog.strm = control->sinfo_stream;
269 	if (poschk != NULL) {
270 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
271 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
272 	} else {
273 		sctp_clog.x.strlog.e_tsn = 0;
274 		sctp_clog.x.strlog.e_sseq = 0;
275 	}
276 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
277 	    SCTP_LOG_EVENT_STRM,
278 	    from,
279 	    sctp_clog.x.misc.log1,
280 	    sctp_clog.x.misc.log2,
281 	    sctp_clog.x.misc.log3,
282 	    sctp_clog.x.misc.log4);
283 
284 }
285 
286 void
287 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
288 {
289 	struct sctp_cwnd_log sctp_clog;
290 
291 	sctp_clog.x.cwnd.net = net;
292 	if (stcb->asoc.send_queue_cnt > 255)
293 		sctp_clog.x.cwnd.cnt_in_send = 255;
294 	else
295 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
296 	if (stcb->asoc.stream_queue_cnt > 255)
297 		sctp_clog.x.cwnd.cnt_in_str = 255;
298 	else
299 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
300 
301 	if (net) {
302 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
303 		sctp_clog.x.cwnd.inflight = net->flight_size;
304 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
305 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
306 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
307 	}
308 	if (SCTP_CWNDLOG_PRESEND == from) {
309 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
310 	}
311 	sctp_clog.x.cwnd.cwnd_augment = augment;
312 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
313 	    SCTP_LOG_EVENT_CWND,
314 	    from,
315 	    sctp_clog.x.misc.log1,
316 	    sctp_clog.x.misc.log2,
317 	    sctp_clog.x.misc.log3,
318 	    sctp_clog.x.misc.log4);
319 
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp->sctp_socket) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 
365 }
366 
367 void
368 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
369 {
370 	struct sctp_cwnd_log sctp_clog;
371 
372 	memset(&sctp_clog, 0, sizeof(sctp_clog));
373 	sctp_clog.x.cwnd.net = net;
374 	sctp_clog.x.cwnd.cwnd_new_value = error;
375 	sctp_clog.x.cwnd.inflight = net->flight_size;
376 	sctp_clog.x.cwnd.cwnd_augment = burst;
377 	if (stcb->asoc.send_queue_cnt > 255)
378 		sctp_clog.x.cwnd.cnt_in_send = 255;
379 	else
380 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
381 	if (stcb->asoc.stream_queue_cnt > 255)
382 		sctp_clog.x.cwnd.cnt_in_str = 255;
383 	else
384 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
385 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
386 	    SCTP_LOG_EVENT_MAXBURST,
387 	    from,
388 	    sctp_clog.x.misc.log1,
389 	    sctp_clog.x.misc.log2,
390 	    sctp_clog.x.misc.log3,
391 	    sctp_clog.x.misc.log4);
392 
393 }
394 
395 void
396 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
397 {
398 	struct sctp_cwnd_log sctp_clog;
399 
400 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
401 	sctp_clog.x.rwnd.send_size = snd_size;
402 	sctp_clog.x.rwnd.overhead = overhead;
403 	sctp_clog.x.rwnd.new_rwnd = 0;
404 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
405 	    SCTP_LOG_EVENT_RWND,
406 	    from,
407 	    sctp_clog.x.misc.log1,
408 	    sctp_clog.x.misc.log2,
409 	    sctp_clog.x.misc.log3,
410 	    sctp_clog.x.misc.log4);
411 }
412 
413 void
414 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
415 {
416 	struct sctp_cwnd_log sctp_clog;
417 
418 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
419 	sctp_clog.x.rwnd.send_size = flight_size;
420 	sctp_clog.x.rwnd.overhead = overhead;
421 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
422 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
423 	    SCTP_LOG_EVENT_RWND,
424 	    from,
425 	    sctp_clog.x.misc.log1,
426 	    sctp_clog.x.misc.log2,
427 	    sctp_clog.x.misc.log3,
428 	    sctp_clog.x.misc.log4);
429 }
430 
431 void
432 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
433 {
434 	struct sctp_cwnd_log sctp_clog;
435 
436 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
437 	sctp_clog.x.mbcnt.size_change = book;
438 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
439 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
440 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
441 	    SCTP_LOG_EVENT_MBCNT,
442 	    from,
443 	    sctp_clog.x.misc.log1,
444 	    sctp_clog.x.misc.log2,
445 	    sctp_clog.x.misc.log3,
446 	    sctp_clog.x.misc.log4);
447 
448 }
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 
504 }
505 
506 void
507 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
508 {
509 	struct sctp_cwnd_log sctp_clog;
510 
511 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
512 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
513 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
514 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
515 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
516 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
517 	sctp_clog.x.blk.sndlen = sendlen;
518 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
519 	    SCTP_LOG_EVENT_BLOCK,
520 	    from,
521 	    sctp_clog.x.misc.log1,
522 	    sctp_clog.x.misc.log2,
523 	    sctp_clog.x.misc.log3,
524 	    sctp_clog.x.misc.log4);
525 
526 }
527 
528 int
529 sctp_fill_stat_log(void *optval, size_t *optsize)
530 {
531 	/* May need to fix this if ktrdump does not work */
532 	return (0);
533 }
534 
535 #ifdef SCTP_AUDITING_ENABLED
536 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
537 static int sctp_audit_indx = 0;
538 
539 static
540 void
541 sctp_print_audit_report(void)
542 {
543 	int i;
544 	int cnt;
545 
546 	cnt = 0;
547 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
548 		if ((sctp_audit_data[i][0] == 0xe0) &&
549 		    (sctp_audit_data[i][1] == 0x01)) {
550 			cnt = 0;
551 			SCTP_PRINTF("\n");
552 		} else if (sctp_audit_data[i][0] == 0xf0) {
553 			cnt = 0;
554 			SCTP_PRINTF("\n");
555 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
556 		    (sctp_audit_data[i][1] == 0x01)) {
557 			SCTP_PRINTF("\n");
558 			cnt = 0;
559 		}
560 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
561 		    (uint32_t) sctp_audit_data[i][1]);
562 		cnt++;
563 		if ((cnt % 14) == 0)
564 			SCTP_PRINTF("\n");
565 	}
566 	for (i = 0; i < sctp_audit_indx; i++) {
567 		if ((sctp_audit_data[i][0] == 0xe0) &&
568 		    (sctp_audit_data[i][1] == 0x01)) {
569 			cnt = 0;
570 			SCTP_PRINTF("\n");
571 		} else if (sctp_audit_data[i][0] == 0xf0) {
572 			cnt = 0;
573 			SCTP_PRINTF("\n");
574 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
575 		    (sctp_audit_data[i][1] == 0x01)) {
576 			SCTP_PRINTF("\n");
577 			cnt = 0;
578 		}
579 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
580 		    (uint32_t) sctp_audit_data[i][1]);
581 		cnt++;
582 		if ((cnt % 14) == 0)
583 			SCTP_PRINTF("\n");
584 	}
585 	SCTP_PRINTF("\n");
586 }
587 
588 void
589 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
590     struct sctp_nets *net)
591 {
592 	int resend_cnt, tot_out, rep, tot_book_cnt;
593 	struct sctp_nets *lnet;
594 	struct sctp_tmit_chunk *chk;
595 
596 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
597 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
598 	sctp_audit_indx++;
599 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
600 		sctp_audit_indx = 0;
601 	}
602 	if (inp == NULL) {
603 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
604 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
605 		sctp_audit_indx++;
606 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 			sctp_audit_indx = 0;
608 		}
609 		return;
610 	}
611 	if (stcb == NULL) {
612 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
613 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
614 		sctp_audit_indx++;
615 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
616 			sctp_audit_indx = 0;
617 		}
618 		return;
619 	}
620 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
621 	sctp_audit_data[sctp_audit_indx][1] =
622 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
623 	sctp_audit_indx++;
624 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625 		sctp_audit_indx = 0;
626 	}
627 	rep = 0;
628 	tot_book_cnt = 0;
629 	resend_cnt = tot_out = 0;
630 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
631 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
632 			resend_cnt++;
633 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
634 			tot_out += chk->book_size;
635 			tot_book_cnt++;
636 		}
637 	}
638 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
639 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
640 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
641 		sctp_audit_indx++;
642 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 			sctp_audit_indx = 0;
644 		}
645 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
646 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
647 		rep = 1;
648 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
649 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
650 		sctp_audit_data[sctp_audit_indx][1] =
651 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
652 		sctp_audit_indx++;
653 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
654 			sctp_audit_indx = 0;
655 		}
656 	}
657 	if (tot_out != stcb->asoc.total_flight) {
658 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
659 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
660 		sctp_audit_indx++;
661 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
662 			sctp_audit_indx = 0;
663 		}
664 		rep = 1;
665 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
666 		    (int)stcb->asoc.total_flight);
667 		stcb->asoc.total_flight = tot_out;
668 	}
669 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
670 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
671 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
672 		sctp_audit_indx++;
673 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
674 			sctp_audit_indx = 0;
675 		}
676 		rep = 1;
677 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
678 
679 		stcb->asoc.total_flight_count = tot_book_cnt;
680 	}
681 	tot_out = 0;
682 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
683 		tot_out += lnet->flight_size;
684 	}
685 	if (tot_out != stcb->asoc.total_flight) {
686 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
687 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
688 		sctp_audit_indx++;
689 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
690 			sctp_audit_indx = 0;
691 		}
692 		rep = 1;
693 		SCTP_PRINTF("real flight:%d net total was %d\n",
694 		    stcb->asoc.total_flight, tot_out);
695 		/* now corrective action */
696 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
697 
698 			tot_out = 0;
699 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
700 				if ((chk->whoTo == lnet) &&
701 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
702 					tot_out += chk->book_size;
703 				}
704 			}
705 			if (lnet->flight_size != tot_out) {
706 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
707 				    (uint32_t) lnet, lnet->flight_size,
708 				    tot_out);
709 				lnet->flight_size = tot_out;
710 			}
711 		}
712 	}
713 	if (rep) {
714 		sctp_print_audit_report();
715 	}
716 }
717 
718 void
719 sctp_audit_log(uint8_t ev, uint8_t fd)
720 {
721 
722 	sctp_audit_data[sctp_audit_indx][0] = ev;
723 	sctp_audit_data[sctp_audit_indx][1] = fd;
724 	sctp_audit_indx++;
725 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
726 		sctp_audit_indx = 0;
727 	}
728 }
729 
730 #endif
731 
732 /*
733  * a list of sizes based on typical mtu's, used only if next hop size not
734  * returned.
735  */
736 static int sctp_mtu_sizes[] = {
737 	68,
738 	296,
739 	508,
740 	512,
741 	544,
742 	576,
743 	1006,
744 	1492,
745 	1500,
746 	1536,
747 	2002,
748 	2048,
749 	4352,
750 	4464,
751 	8166,
752 	17914,
753 	32000,
754 	65535
755 };
756 
757 void
758 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
759 {
760 	struct sctp_association *asoc;
761 	struct sctp_nets *net;
762 
763 	asoc = &stcb->asoc;
764 
765 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
766 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
767 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
768 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
769 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
770 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
771 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
772 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
773 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
774 	}
775 }
776 
777 int
778 find_next_best_mtu(int totsz)
779 {
780 	int i, perfer;
781 
782 	/*
783 	 * if we are in here we must find the next best fit based on the
784 	 * size of the dg that failed to be sent.
785 	 */
786 	perfer = 0;
787 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
788 		if (totsz < sctp_mtu_sizes[i]) {
789 			perfer = i - 1;
790 			if (perfer < 0)
791 				perfer = 0;
792 			break;
793 		}
794 	}
795 	return (sctp_mtu_sizes[perfer]);
796 }
797 
798 void
799 sctp_fill_random_store(struct sctp_pcb *m)
800 {
801 	/*
802 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
803 	 * our counter. The result becomes our good random numbers and we
804 	 * then setup to give these out. Note that we do no locking to
805 	 * protect this. This is ok, since if competing folks call this we
806 	 * will get more gobbled gook in the random store which is what we
807 	 * want. There is a danger that two guys will use the same random
808 	 * numbers, but thats ok too since that is random as well :->
809 	 */
810 	m->store_at = 0;
811 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
812 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
813 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
814 	m->random_counter++;
815 }
816 
817 uint32_t
818 sctp_select_initial_TSN(struct sctp_pcb *inp)
819 {
820 	/*
821 	 * A true implementation should use random selection process to get
822 	 * the initial stream sequence number, using RFC1750 as a good
823 	 * guideline
824 	 */
825 	uint32_t x, *xp;
826 	uint8_t *p;
827 	int store_at, new_store;
828 
829 	if (inp->initial_sequence_debug != 0) {
830 		uint32_t ret;
831 
832 		ret = inp->initial_sequence_debug;
833 		inp->initial_sequence_debug++;
834 		return (ret);
835 	}
836 retry:
837 	store_at = inp->store_at;
838 	new_store = store_at + sizeof(uint32_t);
839 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
840 		new_store = 0;
841 	}
842 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
843 		goto retry;
844 	}
845 	if (new_store == 0) {
846 		/* Refill the random store */
847 		sctp_fill_random_store(inp);
848 	}
849 	p = &inp->random_store[store_at];
850 	xp = (uint32_t *) p;
851 	x = *xp;
852 	return (x);
853 }
854 
855 uint32_t
856 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
857 {
858 	u_long x, not_done;
859 	struct timeval now;
860 
861 	(void)SCTP_GETTIME_TIMEVAL(&now);
862 	not_done = 1;
863 	while (not_done) {
864 		x = sctp_select_initial_TSN(&inp->sctp_ep);
865 		if (x == 0) {
866 			/* we never use 0 */
867 			continue;
868 		}
869 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
870 			not_done = 0;
871 		}
872 	}
873 	return (x);
874 }
875 
876 int
877 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
878     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
879 {
880 	struct sctp_association *asoc;
881 
882 	/*
883 	 * Anything set to zero is taken care of by the allocation routine's
884 	 * bzero
885 	 */
886 
887 	/*
888 	 * Up front select what scoping to apply on addresses I tell my peer
889 	 * Not sure what to do with these right now, we will need to come up
890 	 * with a way to set them. We may need to pass them through from the
891 	 * caller in the sctp_aloc_assoc() function.
892 	 */
893 	int i;
894 
895 	asoc = &stcb->asoc;
896 	/* init all variables to a known value. */
897 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
898 	asoc->max_burst = m->sctp_ep.max_burst;
899 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
900 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
901 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
902 	/* EY Init nr_sack variable */
903 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
904 	/* JRS 5/21/07 - Init CMT PF variables */
905 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
906 	asoc->sctp_frag_point = m->sctp_frag_point;
907 #ifdef INET
908 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
909 #else
910 	asoc->default_tos = 0;
911 #endif
912 
913 #ifdef INET6
914 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
915 #else
916 	asoc->default_flowlabel = 0;
917 #endif
918 	asoc->sb_send_resv = 0;
919 	if (override_tag) {
920 #ifdef MICHAELS_EXPERIMENT
921 		if (sctp_is_in_timewait(override_tag, stcb->sctp_ep->sctp_lport, stcb->rport)) {
922 			/*
923 			 * It must be in the time-wait hash, we put it there
924 			 * when we aloc one. If not the peer is playing
925 			 * games.
926 			 */
927 			asoc->my_vtag = override_tag;
928 		} else {
929 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
930 #ifdef INVARIANTS
931 			panic("Huh is_in_timewait fails");
932 #endif
933 			return (ENOMEM);
934 		}
935 #else
936 		asoc->my_vtag = override_tag;
937 #endif
938 	} else {
939 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
940 	}
941 	/* Get the nonce tags */
942 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
943 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
944 	asoc->vrf_id = vrf_id;
945 
946 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
947 		asoc->hb_is_disabled = 1;
948 	else
949 		asoc->hb_is_disabled = 0;
950 
951 #ifdef SCTP_ASOCLOG_OF_TSNS
952 	asoc->tsn_in_at = 0;
953 	asoc->tsn_out_at = 0;
954 	asoc->tsn_in_wrapped = 0;
955 	asoc->tsn_out_wrapped = 0;
956 	asoc->cumack_log_at = 0;
957 	asoc->cumack_log_atsnt = 0;
958 #endif
959 #ifdef SCTP_FS_SPEC_LOG
960 	asoc->fs_index = 0;
961 #endif
962 	asoc->refcnt = 0;
963 	asoc->assoc_up_sent = 0;
964 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
965 	    sctp_select_initial_TSN(&m->sctp_ep);
966 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
967 	/* we are optimisitic here */
968 	asoc->peer_supports_pktdrop = 1;
969 	asoc->peer_supports_nat = 0;
970 	asoc->sent_queue_retran_cnt = 0;
971 
972 	/* for CMT */
973 	asoc->last_net_data_came_from = NULL;
974 
975 	/* This will need to be adjusted */
976 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
977 	asoc->last_acked_seq = asoc->init_seq_number - 1;
978 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
979 	asoc->asconf_seq_in = asoc->last_acked_seq;
980 
981 	/* here we are different, we hold the next one we expect */
982 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
983 
984 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
985 	asoc->initial_rto = m->sctp_ep.initial_rto;
986 
987 	asoc->max_init_times = m->sctp_ep.max_init_times;
988 	asoc->max_send_times = m->sctp_ep.max_send_times;
989 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
990 	asoc->free_chunk_cnt = 0;
991 
992 	asoc->iam_blocking = 0;
993 	/* ECN Nonce initialization */
994 	asoc->context = m->sctp_context;
995 	asoc->def_send = m->def_send;
996 	asoc->ecn_nonce_allowed = 0;
997 	asoc->receiver_nonce_sum = 1;
998 	asoc->nonce_sum_expect_base = 1;
999 	asoc->nonce_sum_check = 1;
1000 	asoc->nonce_resync_tsn = 0;
1001 	asoc->nonce_wait_for_ecne = 0;
1002 	asoc->nonce_wait_tsn = 0;
1003 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1004 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
1005 	asoc->pr_sctp_cnt = 0;
1006 	asoc->total_output_queue_size = 0;
1007 
1008 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1009 		struct in6pcb *inp6;
1010 
1011 		/* Its a V6 socket */
1012 		inp6 = (struct in6pcb *)m;
1013 		asoc->ipv6_addr_legal = 1;
1014 		/* Now look at the binding flag to see if V4 will be legal */
1015 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1016 			asoc->ipv4_addr_legal = 1;
1017 		} else {
1018 			/* V4 addresses are NOT legal on the association */
1019 			asoc->ipv4_addr_legal = 0;
1020 		}
1021 	} else {
1022 		/* Its a V4 socket, no - V6 */
1023 		asoc->ipv4_addr_legal = 1;
1024 		asoc->ipv6_addr_legal = 0;
1025 	}
1026 
1027 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1028 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1029 
1030 	asoc->smallest_mtu = m->sctp_frag_point;
1031 #ifdef SCTP_PRINT_FOR_B_AND_M
1032 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1033 	    asoc->smallest_mtu);
1034 #endif
1035 	asoc->minrto = m->sctp_ep.sctp_minrto;
1036 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1037 
1038 	asoc->locked_on_sending = NULL;
1039 	asoc->stream_locked_on = 0;
1040 	asoc->ecn_echo_cnt_onq = 0;
1041 	asoc->stream_locked = 0;
1042 
1043 	asoc->send_sack = 1;
1044 
1045 	LIST_INIT(&asoc->sctp_restricted_addrs);
1046 
1047 	TAILQ_INIT(&asoc->nets);
1048 	TAILQ_INIT(&asoc->pending_reply_queue);
1049 	TAILQ_INIT(&asoc->asconf_ack_sent);
1050 	/* Setup to fill the hb random cache at first HB */
1051 	asoc->hb_random_idx = 4;
1052 
1053 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1054 
1055 	/*
1056 	 * JRS - Pick the default congestion control module based on the
1057 	 * sysctl.
1058 	 */
1059 	switch (m->sctp_ep.sctp_default_cc_module) {
1060 		/* JRS - Standard TCP congestion control */
1061 	case SCTP_CC_RFC2581:
1062 		{
1063 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1064 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1065 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1066 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1067 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1068 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1069 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1070 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1071 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1072 			break;
1073 		}
1074 		/* JRS - High Speed TCP congestion control (Floyd) */
1075 	case SCTP_CC_HSTCP:
1076 		{
1077 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1078 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1079 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1080 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1081 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1082 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1083 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1084 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1085 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1086 			break;
1087 		}
1088 		/* JRS - HTCP congestion control */
1089 	case SCTP_CC_HTCP:
1090 		{
1091 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1092 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1093 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1094 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1095 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1096 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1097 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1098 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1099 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1100 			break;
1101 		}
1102 		/* JRS - By default, use RFC2581 */
1103 	default:
1104 		{
1105 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1106 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1107 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1108 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1109 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1110 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1111 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1112 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1113 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1114 			break;
1115 		}
1116 	}
1117 
1118 	/*
1119 	 * Now the stream parameters, here we allocate space for all streams
1120 	 * that we request by default.
1121 	 */
1122 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1123 	    m->sctp_ep.pre_open_stream_count;
1124 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1125 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1126 	    SCTP_M_STRMO);
1127 	if (asoc->strmout == NULL) {
1128 		/* big trouble no memory */
1129 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1130 		return (ENOMEM);
1131 	}
1132 	for (i = 0; i < asoc->streamoutcnt; i++) {
1133 		/*
1134 		 * inbound side must be set to 0xffff, also NOTE when we get
1135 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1136 		 * count (streamoutcnt) but first check if we sent to any of
1137 		 * the upper streams that were dropped (if some were). Those
1138 		 * that were dropped must be notified to the upper layer as
1139 		 * failed to send.
1140 		 */
1141 		asoc->strmout[i].next_sequence_sent = 0x0;
1142 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1143 		asoc->strmout[i].stream_no = i;
1144 		asoc->strmout[i].last_msg_incomplete = 0;
1145 		asoc->strmout[i].next_spoke.tqe_next = 0;
1146 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1147 	}
1148 	/* Now the mapping array */
1149 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1150 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1151 	    SCTP_M_MAP);
1152 	if (asoc->mapping_array == NULL) {
1153 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1154 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1155 		return (ENOMEM);
1156 	}
1157 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1158 	/* EY  - initialize the nr_mapping_array just like mapping array */
1159 	asoc->nr_mapping_array_size = SCTP_INITIAL_NR_MAPPING_ARRAY;
1160 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->nr_mapping_array_size,
1161 	    SCTP_M_MAP);
1162 	/*
1163 	 * if (asoc->nr_mapping_array == NULL) { SCTP_FREE(asoc->strmout,
1164 	 * SCTP_M_STRMO); SCTP_LTRACE_ERR_RET(NULL, stcb, NULL,
1165 	 * SCTP_FROM_SCTPUTIL, ENOMEM); return (ENOMEM); }
1166 	 */
1167 	memset(asoc->nr_mapping_array, 0, asoc->nr_mapping_array_size);
1168 
1169 	/* Now the init of the other outqueues */
1170 	TAILQ_INIT(&asoc->free_chunks);
1171 	TAILQ_INIT(&asoc->out_wheel);
1172 	TAILQ_INIT(&asoc->control_send_queue);
1173 	TAILQ_INIT(&asoc->asconf_send_queue);
1174 	TAILQ_INIT(&asoc->send_queue);
1175 	TAILQ_INIT(&asoc->sent_queue);
1176 	TAILQ_INIT(&asoc->reasmqueue);
1177 	TAILQ_INIT(&asoc->resetHead);
1178 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1179 	TAILQ_INIT(&asoc->asconf_queue);
1180 	/* authentication fields */
1181 	asoc->authinfo.random = NULL;
1182 	asoc->authinfo.active_keyid = 0;
1183 	asoc->authinfo.assoc_key = NULL;
1184 	asoc->authinfo.assoc_keyid = 0;
1185 	asoc->authinfo.recv_key = NULL;
1186 	asoc->authinfo.recv_keyid = 0;
1187 	LIST_INIT(&asoc->shared_keys);
1188 	asoc->marked_retrans = 0;
1189 	asoc->timoinit = 0;
1190 	asoc->timodata = 0;
1191 	asoc->timosack = 0;
1192 	asoc->timoshutdown = 0;
1193 	asoc->timoheartbeat = 0;
1194 	asoc->timocookie = 0;
1195 	asoc->timoshutdownack = 0;
1196 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1197 	asoc->discontinuity_time = asoc->start_time;
1198 	/*
1199 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1200 	 * freed later whe the association is freed.
1201 	 */
1202 	return (0);
1203 }
1204 
1205 int
1206 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1207 {
1208 	/* mapping array needs to grow */
1209 	uint8_t *new_array;
1210 	uint32_t new_size;
1211 
1212 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1213 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1214 	if (new_array == NULL) {
1215 		/* can't get more, forget it */
1216 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1217 		    new_size);
1218 		return (-1);
1219 	}
1220 	memset(new_array, 0, new_size);
1221 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1222 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1223 	asoc->mapping_array = new_array;
1224 	asoc->mapping_array_size = new_size;
1225 	return (0);
1226 }
1227 
1228 /* EY - nr_sack version of the above method */
1229 int
1230 sctp_expand_nr_mapping_array(struct sctp_association *asoc, uint32_t needed)
1231 {
1232 	/* nr mapping array needs to grow */
1233 	uint8_t *new_array;
1234 	uint32_t new_size;
1235 
1236 	new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
1237 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1238 	if (new_array == NULL) {
1239 		/* can't get more, forget it */
1240 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1241 		    new_size);
1242 		return (-1);
1243 	}
1244 	memset(new_array, 0, new_size);
1245 	memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
1246 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1247 	asoc->nr_mapping_array = new_array;
1248 	asoc->nr_mapping_array_size = new_size;
1249 	return (0);
1250 }
1251 
1252 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1253 static void
1254 sctp_iterator_work(struct sctp_iterator *it)
1255 {
1256 	int iteration_count = 0;
1257 	int inp_skip = 0;
1258 
1259 	SCTP_ITERATOR_LOCK();
1260 	if (it->inp) {
1261 		SCTP_INP_DECR_REF(it->inp);
1262 	}
1263 	if (it->inp == NULL) {
1264 		/* iterator is complete */
1265 done_with_iterator:
1266 		SCTP_ITERATOR_UNLOCK();
1267 		if (it->function_atend != NULL) {
1268 			(*it->function_atend) (it->pointer, it->val);
1269 		}
1270 		SCTP_FREE(it, SCTP_M_ITER);
1271 		return;
1272 	}
1273 select_a_new_ep:
1274 	SCTP_INP_WLOCK(it->inp);
1275 	while (((it->pcb_flags) &&
1276 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1277 	    ((it->pcb_features) &&
1278 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1279 		/* endpoint flags or features don't match, so keep looking */
1280 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1281 			SCTP_INP_WUNLOCK(it->inp);
1282 			goto done_with_iterator;
1283 		}
1284 		SCTP_INP_WUNLOCK(it->inp);
1285 		it->inp = LIST_NEXT(it->inp, sctp_list);
1286 		if (it->inp == NULL) {
1287 			goto done_with_iterator;
1288 		}
1289 		SCTP_INP_WLOCK(it->inp);
1290 	}
1291 
1292 	SCTP_INP_WUNLOCK(it->inp);
1293 	SCTP_INP_RLOCK(it->inp);
1294 
1295 	/* now go through each assoc which is in the desired state */
1296 	if (it->done_current_ep == 0) {
1297 		if (it->function_inp != NULL)
1298 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1299 		it->done_current_ep = 1;
1300 	}
1301 	if (it->stcb == NULL) {
1302 		/* run the per instance function */
1303 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1304 	}
1305 	if ((inp_skip) || it->stcb == NULL) {
1306 		if (it->function_inp_end != NULL) {
1307 			inp_skip = (*it->function_inp_end) (it->inp,
1308 			    it->pointer,
1309 			    it->val);
1310 		}
1311 		SCTP_INP_RUNLOCK(it->inp);
1312 		goto no_stcb;
1313 	}
1314 	while (it->stcb) {
1315 		SCTP_TCB_LOCK(it->stcb);
1316 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1317 			/* not in the right state... keep looking */
1318 			SCTP_TCB_UNLOCK(it->stcb);
1319 			goto next_assoc;
1320 		}
1321 		/* see if we have limited out the iterator loop */
1322 		iteration_count++;
1323 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1324 			/* Pause to let others grab the lock */
1325 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1326 			SCTP_TCB_UNLOCK(it->stcb);
1327 
1328 			SCTP_INP_INCR_REF(it->inp);
1329 			SCTP_INP_RUNLOCK(it->inp);
1330 			SCTP_ITERATOR_UNLOCK();
1331 			SCTP_ITERATOR_LOCK();
1332 			SCTP_INP_RLOCK(it->inp);
1333 
1334 			SCTP_INP_DECR_REF(it->inp);
1335 			SCTP_TCB_LOCK(it->stcb);
1336 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1337 			iteration_count = 0;
1338 		}
1339 		/* run function on this one */
1340 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1341 
1342 		/*
1343 		 * we lie here, it really needs to have its own type but
1344 		 * first I must verify that this won't effect things :-0
1345 		 */
1346 		if (it->no_chunk_output == 0)
1347 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1348 
1349 		SCTP_TCB_UNLOCK(it->stcb);
1350 next_assoc:
1351 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1352 		if (it->stcb == NULL) {
1353 			/* Run last function */
1354 			if (it->function_inp_end != NULL) {
1355 				inp_skip = (*it->function_inp_end) (it->inp,
1356 				    it->pointer,
1357 				    it->val);
1358 			}
1359 		}
1360 	}
1361 	SCTP_INP_RUNLOCK(it->inp);
1362 no_stcb:
1363 	/* done with all assocs on this endpoint, move on to next endpoint */
1364 	it->done_current_ep = 0;
1365 	SCTP_INP_WLOCK(it->inp);
1366 	SCTP_INP_WUNLOCK(it->inp);
1367 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1368 		it->inp = NULL;
1369 	} else {
1370 		SCTP_INP_INFO_RLOCK();
1371 		it->inp = LIST_NEXT(it->inp, sctp_list);
1372 		SCTP_INP_INFO_RUNLOCK();
1373 	}
1374 	if (it->inp == NULL) {
1375 		goto done_with_iterator;
1376 	}
1377 	goto select_a_new_ep;
1378 }
1379 
1380 void
1381 sctp_iterator_worker(void)
1382 {
1383 	struct sctp_iterator *it = NULL;
1384 
1385 	/* This function is called with the WQ lock in place */
1386 
1387 	SCTP_BASE_INFO(iterator_running) = 1;
1388 again:
1389 	it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1390 	while (it) {
1391 		/* now lets work on this one */
1392 		TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1393 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1394 		sctp_iterator_work(it);
1395 		SCTP_IPI_ITERATOR_WQ_LOCK();
1396 		/* sa_ignore FREED_MEMORY */
1397 		it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1398 	}
1399 	if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
1400 		goto again;
1401 	}
1402 	SCTP_BASE_INFO(iterator_running) = 0;
1403 	return;
1404 }
1405 
1406 #endif
1407 
1408 
1409 static void
1410 sctp_handle_addr_wq(void)
1411 {
1412 	/* deal with the ADDR wq from the rtsock calls */
1413 	struct sctp_laddr *wi;
1414 	struct sctp_asconf_iterator *asc;
1415 
1416 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1417 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1418 	if (asc == NULL) {
1419 		/* Try later, no memory */
1420 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1421 		    (struct sctp_inpcb *)NULL,
1422 		    (struct sctp_tcb *)NULL,
1423 		    (struct sctp_nets *)NULL);
1424 		return;
1425 	}
1426 	LIST_INIT(&asc->list_of_work);
1427 	asc->cnt = 0;
1428 	SCTP_IPI_ITERATOR_WQ_LOCK();
1429 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1430 	while (wi != NULL) {
1431 		LIST_REMOVE(wi, sctp_nxt_addr);
1432 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1433 		asc->cnt++;
1434 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1435 	}
1436 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1437 	if (asc->cnt == 0) {
1438 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1439 	} else {
1440 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1441 		    sctp_asconf_iterator_stcb,
1442 		    NULL,	/* No ep end for boundall */
1443 		    SCTP_PCB_FLAGS_BOUNDALL,
1444 		    SCTP_PCB_ANY_FEATURES,
1445 		    SCTP_ASOC_ANY_STATE,
1446 		    (void *)asc, 0,
1447 		    sctp_asconf_iterator_end, NULL, 0);
1448 	}
1449 }
1450 
1451 int retcode = 0;
1452 int cur_oerr = 0;
1453 
1454 void
1455 sctp_timeout_handler(void *t)
1456 {
1457 	struct sctp_inpcb *inp;
1458 	struct sctp_tcb *stcb;
1459 	struct sctp_nets *net;
1460 	struct sctp_timer *tmr;
1461 
1462 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1463 	struct socket *so;
1464 
1465 #endif
1466 	int did_output, type;
1467 	struct sctp_iterator *it = NULL;
1468 
1469 	tmr = (struct sctp_timer *)t;
1470 	inp = (struct sctp_inpcb *)tmr->ep;
1471 	stcb = (struct sctp_tcb *)tmr->tcb;
1472 	net = (struct sctp_nets *)tmr->net;
1473 	did_output = 1;
1474 
1475 #ifdef SCTP_AUDITING_ENABLED
1476 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1477 	sctp_auditing(3, inp, stcb, net);
1478 #endif
1479 
1480 	/* sanity checks... */
1481 	if (tmr->self != (void *)tmr) {
1482 		/*
1483 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1484 		 * tmr);
1485 		 */
1486 		return;
1487 	}
1488 	tmr->stopped_from = 0xa001;
1489 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1490 		/*
1491 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1492 		 * tmr->type);
1493 		 */
1494 		return;
1495 	}
1496 	tmr->stopped_from = 0xa002;
1497 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1498 		return;
1499 	}
1500 	/* if this is an iterator timeout, get the struct and clear inp */
1501 	tmr->stopped_from = 0xa003;
1502 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1503 		it = (struct sctp_iterator *)inp;
1504 		inp = NULL;
1505 	}
1506 	type = tmr->type;
1507 	if (inp) {
1508 		SCTP_INP_INCR_REF(inp);
1509 		if ((inp->sctp_socket == 0) &&
1510 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1511 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1512 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1513 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1514 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1515 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1516 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1517 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1518 		    ) {
1519 			SCTP_INP_DECR_REF(inp);
1520 			return;
1521 		}
1522 	}
1523 	tmr->stopped_from = 0xa004;
1524 	if (stcb) {
1525 		atomic_add_int(&stcb->asoc.refcnt, 1);
1526 		if (stcb->asoc.state == 0) {
1527 			atomic_add_int(&stcb->asoc.refcnt, -1);
1528 			if (inp) {
1529 				SCTP_INP_DECR_REF(inp);
1530 			}
1531 			return;
1532 		}
1533 	}
1534 	tmr->stopped_from = 0xa005;
1535 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1536 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1537 		if (inp) {
1538 			SCTP_INP_DECR_REF(inp);
1539 		}
1540 		if (stcb) {
1541 			atomic_add_int(&stcb->asoc.refcnt, -1);
1542 		}
1543 		return;
1544 	}
1545 	tmr->stopped_from = 0xa006;
1546 
1547 	if (stcb) {
1548 		SCTP_TCB_LOCK(stcb);
1549 		atomic_add_int(&stcb->asoc.refcnt, -1);
1550 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1551 		    ((stcb->asoc.state == 0) ||
1552 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1553 			SCTP_TCB_UNLOCK(stcb);
1554 			if (inp) {
1555 				SCTP_INP_DECR_REF(inp);
1556 			}
1557 			return;
1558 		}
1559 	}
1560 	/* record in stopped what t-o occured */
1561 	tmr->stopped_from = tmr->type;
1562 
1563 	/* mark as being serviced now */
1564 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1565 		/*
1566 		 * Callout has been rescheduled.
1567 		 */
1568 		goto get_out;
1569 	}
1570 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1571 		/*
1572 		 * Not active, so no action.
1573 		 */
1574 		goto get_out;
1575 	}
1576 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1577 
1578 	/* call the handler for the appropriate timer type */
1579 	switch (tmr->type) {
1580 	case SCTP_TIMER_TYPE_ZERO_COPY:
1581 		if (inp == NULL) {
1582 			break;
1583 		}
1584 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1585 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1586 		}
1587 		break;
1588 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1589 		if (inp == NULL) {
1590 			break;
1591 		}
1592 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1593 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1594 		}
1595 		break;
1596 	case SCTP_TIMER_TYPE_ADDR_WQ:
1597 		sctp_handle_addr_wq();
1598 		break;
1599 	case SCTP_TIMER_TYPE_ITERATOR:
1600 		SCTP_STAT_INCR(sctps_timoiterator);
1601 		sctp_iterator_timer(it);
1602 		break;
1603 	case SCTP_TIMER_TYPE_SEND:
1604 		if ((stcb == NULL) || (inp == NULL)) {
1605 			break;
1606 		}
1607 		SCTP_STAT_INCR(sctps_timodata);
1608 		stcb->asoc.timodata++;
1609 		stcb->asoc.num_send_timers_up--;
1610 		if (stcb->asoc.num_send_timers_up < 0) {
1611 			stcb->asoc.num_send_timers_up = 0;
1612 		}
1613 		SCTP_TCB_LOCK_ASSERT(stcb);
1614 		cur_oerr = stcb->asoc.overall_error_count;
1615 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1616 		if (retcode) {
1617 			/* no need to unlock on tcb its gone */
1618 
1619 			goto out_decr;
1620 		}
1621 		SCTP_TCB_LOCK_ASSERT(stcb);
1622 #ifdef SCTP_AUDITING_ENABLED
1623 		sctp_auditing(4, inp, stcb, net);
1624 #endif
1625 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1626 		if ((stcb->asoc.num_send_timers_up == 0) &&
1627 		    (stcb->asoc.sent_queue_cnt > 0)
1628 		    ) {
1629 			struct sctp_tmit_chunk *chk;
1630 
1631 			/*
1632 			 * safeguard. If there on some on the sent queue
1633 			 * somewhere but no timers running something is
1634 			 * wrong... so we start a timer on the first chunk
1635 			 * on the send queue on whatever net it is sent to.
1636 			 */
1637 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1638 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1639 			    chk->whoTo);
1640 		}
1641 		break;
1642 	case SCTP_TIMER_TYPE_INIT:
1643 		if ((stcb == NULL) || (inp == NULL)) {
1644 			break;
1645 		}
1646 		SCTP_STAT_INCR(sctps_timoinit);
1647 		stcb->asoc.timoinit++;
1648 		if (sctp_t1init_timer(inp, stcb, net)) {
1649 			/* no need to unlock on tcb its gone */
1650 			goto out_decr;
1651 		}
1652 		/* We do output but not here */
1653 		did_output = 0;
1654 		break;
1655 	case SCTP_TIMER_TYPE_RECV:
1656 		if ((stcb == NULL) || (inp == NULL)) {
1657 			break;
1658 		} {
1659 			int abort_flag;
1660 
1661 			SCTP_STAT_INCR(sctps_timosack);
1662 			stcb->asoc.timosack++;
1663 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1664 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1665 
1666 			/*
1667 			 * EY if nr_sacks used then send an nr-sack , a sack
1668 			 * otherwise
1669 			 */
1670 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
1671 				sctp_send_nr_sack(stcb);
1672 			else
1673 				sctp_send_sack(stcb);
1674 		}
1675 #ifdef SCTP_AUDITING_ENABLED
1676 		sctp_auditing(4, inp, stcb, net);
1677 #endif
1678 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1679 		break;
1680 	case SCTP_TIMER_TYPE_SHUTDOWN:
1681 		if ((stcb == NULL) || (inp == NULL)) {
1682 			break;
1683 		}
1684 		if (sctp_shutdown_timer(inp, stcb, net)) {
1685 			/* no need to unlock on tcb its gone */
1686 			goto out_decr;
1687 		}
1688 		SCTP_STAT_INCR(sctps_timoshutdown);
1689 		stcb->asoc.timoshutdown++;
1690 #ifdef SCTP_AUDITING_ENABLED
1691 		sctp_auditing(4, inp, stcb, net);
1692 #endif
1693 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1694 		break;
1695 	case SCTP_TIMER_TYPE_HEARTBEAT:
1696 		{
1697 			struct sctp_nets *lnet;
1698 			int cnt_of_unconf = 0;
1699 
1700 			if ((stcb == NULL) || (inp == NULL)) {
1701 				break;
1702 			}
1703 			SCTP_STAT_INCR(sctps_timoheartbeat);
1704 			stcb->asoc.timoheartbeat++;
1705 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1706 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1707 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1708 					cnt_of_unconf++;
1709 				}
1710 			}
1711 			if (cnt_of_unconf == 0) {
1712 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1713 				    cnt_of_unconf)) {
1714 					/* no need to unlock on tcb its gone */
1715 					goto out_decr;
1716 				}
1717 			}
1718 #ifdef SCTP_AUDITING_ENABLED
1719 			sctp_auditing(4, inp, stcb, lnet);
1720 #endif
1721 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1722 			    stcb->sctp_ep, stcb, lnet);
1723 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1724 		}
1725 		break;
1726 	case SCTP_TIMER_TYPE_COOKIE:
1727 		if ((stcb == NULL) || (inp == NULL)) {
1728 			break;
1729 		}
1730 		if (sctp_cookie_timer(inp, stcb, net)) {
1731 			/* no need to unlock on tcb its gone */
1732 			goto out_decr;
1733 		}
1734 		SCTP_STAT_INCR(sctps_timocookie);
1735 		stcb->asoc.timocookie++;
1736 #ifdef SCTP_AUDITING_ENABLED
1737 		sctp_auditing(4, inp, stcb, net);
1738 #endif
1739 		/*
1740 		 * We consider T3 and Cookie timer pretty much the same with
1741 		 * respect to where from in chunk_output.
1742 		 */
1743 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1744 		break;
1745 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1746 		{
1747 			struct timeval tv;
1748 			int i, secret;
1749 
1750 			if (inp == NULL) {
1751 				break;
1752 			}
1753 			SCTP_STAT_INCR(sctps_timosecret);
1754 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1755 			SCTP_INP_WLOCK(inp);
1756 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1757 			inp->sctp_ep.last_secret_number =
1758 			    inp->sctp_ep.current_secret_number;
1759 			inp->sctp_ep.current_secret_number++;
1760 			if (inp->sctp_ep.current_secret_number >=
1761 			    SCTP_HOW_MANY_SECRETS) {
1762 				inp->sctp_ep.current_secret_number = 0;
1763 			}
1764 			secret = (int)inp->sctp_ep.current_secret_number;
1765 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1766 				inp->sctp_ep.secret_key[secret][i] =
1767 				    sctp_select_initial_TSN(&inp->sctp_ep);
1768 			}
1769 			SCTP_INP_WUNLOCK(inp);
1770 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1771 		}
1772 		did_output = 0;
1773 		break;
1774 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1775 		if ((stcb == NULL) || (inp == NULL)) {
1776 			break;
1777 		}
1778 		SCTP_STAT_INCR(sctps_timopathmtu);
1779 		sctp_pathmtu_timer(inp, stcb, net);
1780 		did_output = 0;
1781 		break;
1782 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1783 		if ((stcb == NULL) || (inp == NULL)) {
1784 			break;
1785 		}
1786 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1787 			/* no need to unlock on tcb its gone */
1788 			goto out_decr;
1789 		}
1790 		SCTP_STAT_INCR(sctps_timoshutdownack);
1791 		stcb->asoc.timoshutdownack++;
1792 #ifdef SCTP_AUDITING_ENABLED
1793 		sctp_auditing(4, inp, stcb, net);
1794 #endif
1795 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1796 		break;
1797 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1798 		if ((stcb == NULL) || (inp == NULL)) {
1799 			break;
1800 		}
1801 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1802 		sctp_abort_an_association(inp, stcb,
1803 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1804 		/* no need to unlock on tcb its gone */
1805 		goto out_decr;
1806 
1807 	case SCTP_TIMER_TYPE_STRRESET:
1808 		if ((stcb == NULL) || (inp == NULL)) {
1809 			break;
1810 		}
1811 		if (sctp_strreset_timer(inp, stcb, net)) {
1812 			/* no need to unlock on tcb its gone */
1813 			goto out_decr;
1814 		}
1815 		SCTP_STAT_INCR(sctps_timostrmrst);
1816 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1817 		break;
1818 	case SCTP_TIMER_TYPE_EARLYFR:
1819 		/* Need to do FR of things for net */
1820 		if ((stcb == NULL) || (inp == NULL)) {
1821 			break;
1822 		}
1823 		SCTP_STAT_INCR(sctps_timoearlyfr);
1824 		sctp_early_fr_timer(inp, stcb, net);
1825 		break;
1826 	case SCTP_TIMER_TYPE_ASCONF:
1827 		if ((stcb == NULL) || (inp == NULL)) {
1828 			break;
1829 		}
1830 		if (sctp_asconf_timer(inp, stcb, net)) {
1831 			/* no need to unlock on tcb its gone */
1832 			goto out_decr;
1833 		}
1834 		SCTP_STAT_INCR(sctps_timoasconf);
1835 #ifdef SCTP_AUDITING_ENABLED
1836 		sctp_auditing(4, inp, stcb, net);
1837 #endif
1838 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1839 		break;
1840 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1841 		if ((stcb == NULL) || (inp == NULL)) {
1842 			break;
1843 		}
1844 		sctp_delete_prim_timer(inp, stcb, net);
1845 		SCTP_STAT_INCR(sctps_timodelprim);
1846 		break;
1847 
1848 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1849 		if ((stcb == NULL) || (inp == NULL)) {
1850 			break;
1851 		}
1852 		SCTP_STAT_INCR(sctps_timoautoclose);
1853 		sctp_autoclose_timer(inp, stcb, net);
1854 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1855 		did_output = 0;
1856 		break;
1857 	case SCTP_TIMER_TYPE_ASOCKILL:
1858 		if ((stcb == NULL) || (inp == NULL)) {
1859 			break;
1860 		}
1861 		SCTP_STAT_INCR(sctps_timoassockill);
1862 		/* Can we free it yet? */
1863 		SCTP_INP_DECR_REF(inp);
1864 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1865 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1866 		so = SCTP_INP_SO(inp);
1867 		atomic_add_int(&stcb->asoc.refcnt, 1);
1868 		SCTP_TCB_UNLOCK(stcb);
1869 		SCTP_SOCKET_LOCK(so, 1);
1870 		SCTP_TCB_LOCK(stcb);
1871 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1872 #endif
1873 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1874 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1875 		SCTP_SOCKET_UNLOCK(so, 1);
1876 #endif
1877 		/*
1878 		 * free asoc, always unlocks (or destroy's) so prevent
1879 		 * duplicate unlock or unlock of a free mtx :-0
1880 		 */
1881 		stcb = NULL;
1882 		goto out_no_decr;
1883 	case SCTP_TIMER_TYPE_INPKILL:
1884 		SCTP_STAT_INCR(sctps_timoinpkill);
1885 		if (inp == NULL) {
1886 			break;
1887 		}
1888 		/*
1889 		 * special case, take away our increment since WE are the
1890 		 * killer
1891 		 */
1892 		SCTP_INP_DECR_REF(inp);
1893 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1894 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1895 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1896 		inp = NULL;
1897 		goto out_no_decr;
1898 	default:
1899 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1900 		    tmr->type);
1901 		break;
1902 	};
1903 #ifdef SCTP_AUDITING_ENABLED
1904 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1905 	if (inp)
1906 		sctp_auditing(5, inp, stcb, net);
1907 #endif
1908 	if ((did_output) && stcb) {
1909 		/*
1910 		 * Now we need to clean up the control chunk chain if an
1911 		 * ECNE is on it. It must be marked as UNSENT again so next
1912 		 * call will continue to send it until such time that we get
1913 		 * a CWR, to remove it. It is, however, less likely that we
1914 		 * will find a ecn echo on the chain though.
1915 		 */
1916 		sctp_fix_ecn_echo(&stcb->asoc);
1917 	}
1918 get_out:
1919 	if (stcb) {
1920 		SCTP_TCB_UNLOCK(stcb);
1921 	}
1922 out_decr:
1923 	if (inp) {
1924 		SCTP_INP_DECR_REF(inp);
1925 	}
1926 out_no_decr:
1927 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1928 	    type);
1929 }
1930 
1931 void
1932 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1933     struct sctp_nets *net)
1934 {
1935 	int to_ticks;
1936 	struct sctp_timer *tmr;
1937 
1938 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1939 		return;
1940 
1941 	to_ticks = 0;
1942 
1943 	tmr = NULL;
1944 	if (stcb) {
1945 		SCTP_TCB_LOCK_ASSERT(stcb);
1946 	}
1947 	switch (t_type) {
1948 	case SCTP_TIMER_TYPE_ZERO_COPY:
1949 		tmr = &inp->sctp_ep.zero_copy_timer;
1950 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1951 		break;
1952 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1953 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1954 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1955 		break;
1956 	case SCTP_TIMER_TYPE_ADDR_WQ:
1957 		/* Only 1 tick away :-) */
1958 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1959 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1960 		break;
1961 	case SCTP_TIMER_TYPE_ITERATOR:
1962 		{
1963 			struct sctp_iterator *it;
1964 
1965 			it = (struct sctp_iterator *)inp;
1966 			tmr = &it->tmr;
1967 			to_ticks = SCTP_ITERATOR_TICKS;
1968 		}
1969 		break;
1970 	case SCTP_TIMER_TYPE_SEND:
1971 		/* Here we use the RTO timer */
1972 		{
1973 			int rto_val;
1974 
1975 			if ((stcb == NULL) || (net == NULL)) {
1976 				return;
1977 			}
1978 			tmr = &net->rxt_timer;
1979 			if (net->RTO == 0) {
1980 				rto_val = stcb->asoc.initial_rto;
1981 			} else {
1982 				rto_val = net->RTO;
1983 			}
1984 			to_ticks = MSEC_TO_TICKS(rto_val);
1985 		}
1986 		break;
1987 	case SCTP_TIMER_TYPE_INIT:
1988 		/*
1989 		 * Here we use the INIT timer default usually about 1
1990 		 * minute.
1991 		 */
1992 		if ((stcb == NULL) || (net == NULL)) {
1993 			return;
1994 		}
1995 		tmr = &net->rxt_timer;
1996 		if (net->RTO == 0) {
1997 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1998 		} else {
1999 			to_ticks = MSEC_TO_TICKS(net->RTO);
2000 		}
2001 		break;
2002 	case SCTP_TIMER_TYPE_RECV:
2003 		/*
2004 		 * Here we use the Delayed-Ack timer value from the inp
2005 		 * ususually about 200ms.
2006 		 */
2007 		if (stcb == NULL) {
2008 			return;
2009 		}
2010 		tmr = &stcb->asoc.dack_timer;
2011 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2012 		break;
2013 	case SCTP_TIMER_TYPE_SHUTDOWN:
2014 		/* Here we use the RTO of the destination. */
2015 		if ((stcb == NULL) || (net == NULL)) {
2016 			return;
2017 		}
2018 		if (net->RTO == 0) {
2019 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2020 		} else {
2021 			to_ticks = MSEC_TO_TICKS(net->RTO);
2022 		}
2023 		tmr = &net->rxt_timer;
2024 		break;
2025 	case SCTP_TIMER_TYPE_HEARTBEAT:
2026 		/*
2027 		 * the net is used here so that we can add in the RTO. Even
2028 		 * though we use a different timer. We also add the HB timer
2029 		 * PLUS a random jitter.
2030 		 */
2031 		if ((inp == NULL) || (stcb == NULL)) {
2032 			return;
2033 		} else {
2034 			uint32_t rndval;
2035 			uint8_t this_random;
2036 			int cnt_of_unconf = 0;
2037 			struct sctp_nets *lnet;
2038 
2039 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2040 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2041 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2042 					cnt_of_unconf++;
2043 				}
2044 			}
2045 			if (cnt_of_unconf) {
2046 				net = lnet = NULL;
2047 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2048 			}
2049 			if (stcb->asoc.hb_random_idx > 3) {
2050 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2051 				memcpy(stcb->asoc.hb_random_values, &rndval,
2052 				    sizeof(stcb->asoc.hb_random_values));
2053 				stcb->asoc.hb_random_idx = 0;
2054 			}
2055 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2056 			stcb->asoc.hb_random_idx++;
2057 			stcb->asoc.hb_ect_randombit = 0;
2058 			/*
2059 			 * this_random will be 0 - 256 ms RTO is in ms.
2060 			 */
2061 			if ((stcb->asoc.hb_is_disabled) &&
2062 			    (cnt_of_unconf == 0)) {
2063 				return;
2064 			}
2065 			if (net) {
2066 				int delay;
2067 
2068 				delay = stcb->asoc.heart_beat_delay;
2069 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2070 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2071 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2072 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2073 						delay = 0;
2074 					}
2075 				}
2076 				if (net->RTO == 0) {
2077 					/* Never been checked */
2078 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2079 				} else {
2080 					/* set rto_val to the ms */
2081 					to_ticks = delay + net->RTO + this_random;
2082 				}
2083 			} else {
2084 				if (cnt_of_unconf) {
2085 					to_ticks = this_random + stcb->asoc.initial_rto;
2086 				} else {
2087 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2088 				}
2089 			}
2090 			/*
2091 			 * Now we must convert the to_ticks that are now in
2092 			 * ms to ticks.
2093 			 */
2094 			to_ticks = MSEC_TO_TICKS(to_ticks);
2095 			tmr = &stcb->asoc.hb_timer;
2096 		}
2097 		break;
2098 	case SCTP_TIMER_TYPE_COOKIE:
2099 		/*
2100 		 * Here we can use the RTO timer from the network since one
2101 		 * RTT was compelete. If a retran happened then we will be
2102 		 * using the RTO initial value.
2103 		 */
2104 		if ((stcb == NULL) || (net == NULL)) {
2105 			return;
2106 		}
2107 		if (net->RTO == 0) {
2108 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2109 		} else {
2110 			to_ticks = MSEC_TO_TICKS(net->RTO);
2111 		}
2112 		tmr = &net->rxt_timer;
2113 		break;
2114 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2115 		/*
2116 		 * nothing needed but the endpoint here ususually about 60
2117 		 * minutes.
2118 		 */
2119 		if (inp == NULL) {
2120 			return;
2121 		}
2122 		tmr = &inp->sctp_ep.signature_change;
2123 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2124 		break;
2125 	case SCTP_TIMER_TYPE_ASOCKILL:
2126 		if (stcb == NULL) {
2127 			return;
2128 		}
2129 		tmr = &stcb->asoc.strreset_timer;
2130 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2131 		break;
2132 	case SCTP_TIMER_TYPE_INPKILL:
2133 		/*
2134 		 * The inp is setup to die. We re-use the signature_chage
2135 		 * timer since that has stopped and we are in the GONE
2136 		 * state.
2137 		 */
2138 		if (inp == NULL) {
2139 			return;
2140 		}
2141 		tmr = &inp->sctp_ep.signature_change;
2142 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2143 		break;
2144 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2145 		/*
2146 		 * Here we use the value found in the EP for PMTU ususually
2147 		 * about 10 minutes.
2148 		 */
2149 		if ((stcb == NULL) || (inp == NULL)) {
2150 			return;
2151 		}
2152 		if (net == NULL) {
2153 			return;
2154 		}
2155 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2156 		tmr = &net->pmtu_timer;
2157 		break;
2158 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2159 		/* Here we use the RTO of the destination */
2160 		if ((stcb == NULL) || (net == NULL)) {
2161 			return;
2162 		}
2163 		if (net->RTO == 0) {
2164 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2165 		} else {
2166 			to_ticks = MSEC_TO_TICKS(net->RTO);
2167 		}
2168 		tmr = &net->rxt_timer;
2169 		break;
2170 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2171 		/*
2172 		 * Here we use the endpoints shutdown guard timer usually
2173 		 * about 3 minutes.
2174 		 */
2175 		if ((inp == NULL) || (stcb == NULL)) {
2176 			return;
2177 		}
2178 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2179 		tmr = &stcb->asoc.shut_guard_timer;
2180 		break;
2181 	case SCTP_TIMER_TYPE_STRRESET:
2182 		/*
2183 		 * Here the timer comes from the stcb but its value is from
2184 		 * the net's RTO.
2185 		 */
2186 		if ((stcb == NULL) || (net == NULL)) {
2187 			return;
2188 		}
2189 		if (net->RTO == 0) {
2190 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2191 		} else {
2192 			to_ticks = MSEC_TO_TICKS(net->RTO);
2193 		}
2194 		tmr = &stcb->asoc.strreset_timer;
2195 		break;
2196 
2197 	case SCTP_TIMER_TYPE_EARLYFR:
2198 		{
2199 			unsigned int msec;
2200 
2201 			if ((stcb == NULL) || (net == NULL)) {
2202 				return;
2203 			}
2204 			if (net->flight_size > net->cwnd) {
2205 				/* no need to start */
2206 				return;
2207 			}
2208 			SCTP_STAT_INCR(sctps_earlyfrstart);
2209 			if (net->lastsa == 0) {
2210 				/* Hmm no rtt estimate yet? */
2211 				msec = stcb->asoc.initial_rto >> 2;
2212 			} else {
2213 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2214 			}
2215 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2216 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2217 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2218 					msec = SCTP_MINFR_MSEC_FLOOR;
2219 				}
2220 			}
2221 			to_ticks = MSEC_TO_TICKS(msec);
2222 			tmr = &net->fr_timer;
2223 		}
2224 		break;
2225 	case SCTP_TIMER_TYPE_ASCONF:
2226 		/*
2227 		 * Here the timer comes from the stcb but its value is from
2228 		 * the net's RTO.
2229 		 */
2230 		if ((stcb == NULL) || (net == NULL)) {
2231 			return;
2232 		}
2233 		if (net->RTO == 0) {
2234 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2235 		} else {
2236 			to_ticks = MSEC_TO_TICKS(net->RTO);
2237 		}
2238 		tmr = &stcb->asoc.asconf_timer;
2239 		break;
2240 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2241 		if ((stcb == NULL) || (net != NULL)) {
2242 			return;
2243 		}
2244 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2245 		tmr = &stcb->asoc.delete_prim_timer;
2246 		break;
2247 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2248 		if (stcb == NULL) {
2249 			return;
2250 		}
2251 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2252 			/*
2253 			 * Really an error since stcb is NOT set to
2254 			 * autoclose
2255 			 */
2256 			return;
2257 		}
2258 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2259 		tmr = &stcb->asoc.autoclose_timer;
2260 		break;
2261 	default:
2262 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2263 		    __FUNCTION__, t_type);
2264 		return;
2265 		break;
2266 	};
2267 	if ((to_ticks <= 0) || (tmr == NULL)) {
2268 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2269 		    __FUNCTION__, t_type, to_ticks, tmr);
2270 		return;
2271 	}
2272 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2273 		/*
2274 		 * we do NOT allow you to have it already running. if it is
2275 		 * we leave the current one up unchanged
2276 		 */
2277 		return;
2278 	}
2279 	/* At this point we can proceed */
2280 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2281 		stcb->asoc.num_send_timers_up++;
2282 	}
2283 	tmr->stopped_from = 0;
2284 	tmr->type = t_type;
2285 	tmr->ep = (void *)inp;
2286 	tmr->tcb = (void *)stcb;
2287 	tmr->net = (void *)net;
2288 	tmr->self = (void *)tmr;
2289 	tmr->ticks = sctp_get_tick_count();
2290 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2291 	return;
2292 }
2293 
2294 void
2295 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2296     struct sctp_nets *net, uint32_t from)
2297 {
2298 	struct sctp_timer *tmr;
2299 
2300 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2301 	    (inp == NULL))
2302 		return;
2303 
2304 	tmr = NULL;
2305 	if (stcb) {
2306 		SCTP_TCB_LOCK_ASSERT(stcb);
2307 	}
2308 	switch (t_type) {
2309 	case SCTP_TIMER_TYPE_ZERO_COPY:
2310 		tmr = &inp->sctp_ep.zero_copy_timer;
2311 		break;
2312 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2313 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2314 		break;
2315 	case SCTP_TIMER_TYPE_ADDR_WQ:
2316 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2317 		break;
2318 	case SCTP_TIMER_TYPE_EARLYFR:
2319 		if ((stcb == NULL) || (net == NULL)) {
2320 			return;
2321 		}
2322 		tmr = &net->fr_timer;
2323 		SCTP_STAT_INCR(sctps_earlyfrstop);
2324 		break;
2325 	case SCTP_TIMER_TYPE_ITERATOR:
2326 		{
2327 			struct sctp_iterator *it;
2328 
2329 			it = (struct sctp_iterator *)inp;
2330 			tmr = &it->tmr;
2331 		}
2332 		break;
2333 	case SCTP_TIMER_TYPE_SEND:
2334 		if ((stcb == NULL) || (net == NULL)) {
2335 			return;
2336 		}
2337 		tmr = &net->rxt_timer;
2338 		break;
2339 	case SCTP_TIMER_TYPE_INIT:
2340 		if ((stcb == NULL) || (net == NULL)) {
2341 			return;
2342 		}
2343 		tmr = &net->rxt_timer;
2344 		break;
2345 	case SCTP_TIMER_TYPE_RECV:
2346 		if (stcb == NULL) {
2347 			return;
2348 		}
2349 		tmr = &stcb->asoc.dack_timer;
2350 		break;
2351 	case SCTP_TIMER_TYPE_SHUTDOWN:
2352 		if ((stcb == NULL) || (net == NULL)) {
2353 			return;
2354 		}
2355 		tmr = &net->rxt_timer;
2356 		break;
2357 	case SCTP_TIMER_TYPE_HEARTBEAT:
2358 		if (stcb == NULL) {
2359 			return;
2360 		}
2361 		tmr = &stcb->asoc.hb_timer;
2362 		break;
2363 	case SCTP_TIMER_TYPE_COOKIE:
2364 		if ((stcb == NULL) || (net == NULL)) {
2365 			return;
2366 		}
2367 		tmr = &net->rxt_timer;
2368 		break;
2369 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2370 		/* nothing needed but the endpoint here */
2371 		tmr = &inp->sctp_ep.signature_change;
2372 		/*
2373 		 * We re-use the newcookie timer for the INP kill timer. We
2374 		 * must assure that we do not kill it by accident.
2375 		 */
2376 		break;
2377 	case SCTP_TIMER_TYPE_ASOCKILL:
2378 		/*
2379 		 * Stop the asoc kill timer.
2380 		 */
2381 		if (stcb == NULL) {
2382 			return;
2383 		}
2384 		tmr = &stcb->asoc.strreset_timer;
2385 		break;
2386 
2387 	case SCTP_TIMER_TYPE_INPKILL:
2388 		/*
2389 		 * The inp is setup to die. We re-use the signature_chage
2390 		 * timer since that has stopped and we are in the GONE
2391 		 * state.
2392 		 */
2393 		tmr = &inp->sctp_ep.signature_change;
2394 		break;
2395 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2396 		if ((stcb == NULL) || (net == NULL)) {
2397 			return;
2398 		}
2399 		tmr = &net->pmtu_timer;
2400 		break;
2401 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2402 		if ((stcb == NULL) || (net == NULL)) {
2403 			return;
2404 		}
2405 		tmr = &net->rxt_timer;
2406 		break;
2407 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2408 		if (stcb == NULL) {
2409 			return;
2410 		}
2411 		tmr = &stcb->asoc.shut_guard_timer;
2412 		break;
2413 	case SCTP_TIMER_TYPE_STRRESET:
2414 		if (stcb == NULL) {
2415 			return;
2416 		}
2417 		tmr = &stcb->asoc.strreset_timer;
2418 		break;
2419 	case SCTP_TIMER_TYPE_ASCONF:
2420 		if (stcb == NULL) {
2421 			return;
2422 		}
2423 		tmr = &stcb->asoc.asconf_timer;
2424 		break;
2425 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2426 		if (stcb == NULL) {
2427 			return;
2428 		}
2429 		tmr = &stcb->asoc.delete_prim_timer;
2430 		break;
2431 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2432 		if (stcb == NULL) {
2433 			return;
2434 		}
2435 		tmr = &stcb->asoc.autoclose_timer;
2436 		break;
2437 	default:
2438 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2439 		    __FUNCTION__, t_type);
2440 		break;
2441 	};
2442 	if (tmr == NULL) {
2443 		return;
2444 	}
2445 	if ((tmr->type != t_type) && tmr->type) {
2446 		/*
2447 		 * Ok we have a timer that is under joint use. Cookie timer
2448 		 * per chance with the SEND timer. We therefore are NOT
2449 		 * running the timer that the caller wants stopped.  So just
2450 		 * return.
2451 		 */
2452 		return;
2453 	}
2454 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2455 		stcb->asoc.num_send_timers_up--;
2456 		if (stcb->asoc.num_send_timers_up < 0) {
2457 			stcb->asoc.num_send_timers_up = 0;
2458 		}
2459 	}
2460 	tmr->self = NULL;
2461 	tmr->stopped_from = from;
2462 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2463 	return;
2464 }
2465 
2466 uint32_t
2467 sctp_calculate_len(struct mbuf *m)
2468 {
2469 	uint32_t tlen = 0;
2470 	struct mbuf *at;
2471 
2472 	at = m;
2473 	while (at) {
2474 		tlen += SCTP_BUF_LEN(at);
2475 		at = SCTP_BUF_NEXT(at);
2476 	}
2477 	return (tlen);
2478 }
2479 
2480 void
2481 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2482     struct sctp_association *asoc, uint32_t mtu)
2483 {
2484 	/*
2485 	 * Reset the P-MTU size on this association, this involves changing
2486 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2487 	 * allow the DF flag to be cleared.
2488 	 */
2489 	struct sctp_tmit_chunk *chk;
2490 	unsigned int eff_mtu, ovh;
2491 
2492 #ifdef SCTP_PRINT_FOR_B_AND_M
2493 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2494 	    inp, asoc, mtu);
2495 #endif
2496 	asoc->smallest_mtu = mtu;
2497 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2498 		ovh = SCTP_MIN_OVERHEAD;
2499 	} else {
2500 		ovh = SCTP_MIN_V4_OVERHEAD;
2501 	}
2502 	eff_mtu = mtu - ovh;
2503 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2504 
2505 		if (chk->send_size > eff_mtu) {
2506 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2507 		}
2508 	}
2509 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2510 		if (chk->send_size > eff_mtu) {
2511 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2512 		}
2513 	}
2514 }
2515 
2516 
2517 /*
2518  * given an association and starting time of the current RTT period return
2519  * RTO in number of msecs net should point to the current network
2520  */
2521 uint32_t
2522 sctp_calculate_rto(struct sctp_tcb *stcb,
2523     struct sctp_association *asoc,
2524     struct sctp_nets *net,
2525     struct timeval *told,
2526     int safe)
2527 {
2528 	/*-
2529 	 * given an association and the starting time of the current RTT
2530 	 * period (in value1/value2) return RTO in number of msecs.
2531 	 */
2532 	int calc_time = 0;
2533 	int o_calctime;
2534 	uint32_t new_rto = 0;
2535 	int first_measure = 0;
2536 	struct timeval now, then, *old;
2537 
2538 	/* Copy it out for sparc64 */
2539 	if (safe == sctp_align_unsafe_makecopy) {
2540 		old = &then;
2541 		memcpy(&then, told, sizeof(struct timeval));
2542 	} else if (safe == sctp_align_safe_nocopy) {
2543 		old = told;
2544 	} else {
2545 		/* error */
2546 		SCTP_PRINTF("Huh, bad rto calc call\n");
2547 		return (0);
2548 	}
2549 	/************************/
2550 	/* 1. calculate new RTT */
2551 	/************************/
2552 	/* get the current time */
2553 	(void)SCTP_GETTIME_TIMEVAL(&now);
2554 	/* compute the RTT value */
2555 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2556 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2557 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2558 			calc_time += (((u_long)now.tv_usec -
2559 			    (u_long)old->tv_usec) / 1000);
2560 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2561 			/* Borrow 1,000ms from current calculation */
2562 			calc_time -= 1000;
2563 			/* Add in the slop over */
2564 			calc_time += ((int)now.tv_usec / 1000);
2565 			/* Add in the pre-second ms's */
2566 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2567 		}
2568 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2569 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2570 			calc_time = ((u_long)now.tv_usec -
2571 			    (u_long)old->tv_usec) / 1000;
2572 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2573 			/* impossible .. garbage in nothing out */
2574 			goto calc_rto;
2575 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2576 			/*
2577 			 * We have to have 1 usec :-D this must be the
2578 			 * loopback.
2579 			 */
2580 			calc_time = 1;
2581 		} else {
2582 			/* impossible .. garbage in nothing out */
2583 			goto calc_rto;
2584 		}
2585 	} else {
2586 		/* Clock wrapped? */
2587 		goto calc_rto;
2588 	}
2589 	/***************************/
2590 	/* 2. update RTTVAR & SRTT */
2591 	/***************************/
2592 	o_calctime = calc_time;
2593 	/* this is Van Jacobson's integer version */
2594 	if (net->RTO_measured) {
2595 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2596 								 * shift=3 */
2597 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2598 			rto_logging(net, SCTP_LOG_RTTVAR);
2599 		}
2600 		net->prev_rtt = o_calctime;
2601 		net->lastsa += calc_time;	/* add 7/8th into sa when
2602 						 * shift=3 */
2603 		if (calc_time < 0) {
2604 			calc_time = -calc_time;
2605 		}
2606 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2607 									 * VAR shift=2 */
2608 		net->lastsv += calc_time;
2609 		if (net->lastsv == 0) {
2610 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2611 		}
2612 	} else {
2613 		/* First RTO measurment */
2614 		net->RTO_measured = 1;
2615 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2616 								 * shift=3 */
2617 		net->lastsv = calc_time;
2618 		if (net->lastsv == 0) {
2619 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2620 		}
2621 		first_measure = 1;
2622 		net->prev_rtt = o_calctime;
2623 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2624 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2625 		}
2626 	}
2627 calc_rto:
2628 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2629 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2630 	    (stcb->asoc.sat_network_lockout == 0)) {
2631 		stcb->asoc.sat_network = 1;
2632 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2633 		stcb->asoc.sat_network = 0;
2634 		stcb->asoc.sat_network_lockout = 1;
2635 	}
2636 	/* bound it, per C6/C7 in Section 5.3.1 */
2637 	if (new_rto < stcb->asoc.minrto) {
2638 		new_rto = stcb->asoc.minrto;
2639 	}
2640 	if (new_rto > stcb->asoc.maxrto) {
2641 		new_rto = stcb->asoc.maxrto;
2642 	}
2643 	/* we are now returning the RTO */
2644 	return (new_rto);
2645 }
2646 
2647 /*
2648  * return a pointer to a contiguous piece of data from the given mbuf chain
2649  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2650  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2651  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2652  */
2653 caddr_t
2654 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2655 {
2656 	uint32_t count;
2657 	uint8_t *ptr;
2658 
2659 	ptr = in_ptr;
2660 	if ((off < 0) || (len <= 0))
2661 		return (NULL);
2662 
2663 	/* find the desired start location */
2664 	while ((m != NULL) && (off > 0)) {
2665 		if (off < SCTP_BUF_LEN(m))
2666 			break;
2667 		off -= SCTP_BUF_LEN(m);
2668 		m = SCTP_BUF_NEXT(m);
2669 	}
2670 	if (m == NULL)
2671 		return (NULL);
2672 
2673 	/* is the current mbuf large enough (eg. contiguous)? */
2674 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2675 		return (mtod(m, caddr_t)+off);
2676 	} else {
2677 		/* else, it spans more than one mbuf, so save a temp copy... */
2678 		while ((m != NULL) && (len > 0)) {
2679 			count = min(SCTP_BUF_LEN(m) - off, len);
2680 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2681 			len -= count;
2682 			ptr += count;
2683 			off = 0;
2684 			m = SCTP_BUF_NEXT(m);
2685 		}
2686 		if ((m == NULL) && (len > 0))
2687 			return (NULL);
2688 		else
2689 			return ((caddr_t)in_ptr);
2690 	}
2691 }
2692 
2693 
2694 
2695 struct sctp_paramhdr *
2696 sctp_get_next_param(struct mbuf *m,
2697     int offset,
2698     struct sctp_paramhdr *pull,
2699     int pull_limit)
2700 {
2701 	/* This just provides a typed signature to Peter's Pull routine */
2702 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2703 	    (uint8_t *) pull));
2704 }
2705 
2706 
2707 int
2708 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2709 {
2710 	/*
2711 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2712 	 * padlen is > 3 this routine will fail.
2713 	 */
2714 	uint8_t *dp;
2715 	int i;
2716 
2717 	if (padlen > 3) {
2718 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2719 		return (ENOBUFS);
2720 	}
2721 	if (padlen <= M_TRAILINGSPACE(m)) {
2722 		/*
2723 		 * The easy way. We hope the majority of the time we hit
2724 		 * here :)
2725 		 */
2726 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2727 		SCTP_BUF_LEN(m) += padlen;
2728 	} else {
2729 		/* Hard way we must grow the mbuf */
2730 		struct mbuf *tmp;
2731 
2732 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2733 		if (tmp == NULL) {
2734 			/* Out of space GAK! we are in big trouble. */
2735 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2736 			return (ENOSPC);
2737 		}
2738 		/* setup and insert in middle */
2739 		SCTP_BUF_LEN(tmp) = padlen;
2740 		SCTP_BUF_NEXT(tmp) = NULL;
2741 		SCTP_BUF_NEXT(m) = tmp;
2742 		dp = mtod(tmp, uint8_t *);
2743 	}
2744 	/* zero out the pad */
2745 	for (i = 0; i < padlen; i++) {
2746 		*dp = 0;
2747 		dp++;
2748 	}
2749 	return (0);
2750 }
2751 
2752 int
2753 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2754 {
2755 	/* find the last mbuf in chain and pad it */
2756 	struct mbuf *m_at;
2757 
2758 	m_at = m;
2759 	if (last_mbuf) {
2760 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2761 	} else {
2762 		while (m_at) {
2763 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2764 				return (sctp_add_pad_tombuf(m_at, padval));
2765 			}
2766 			m_at = SCTP_BUF_NEXT(m_at);
2767 		}
2768 	}
2769 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2770 	return (EFAULT);
2771 }
2772 
2773 int sctp_asoc_change_wake = 0;
2774 
2775 static void
2776 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2777     uint32_t error, void *data, int so_locked
2778 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2779     SCTP_UNUSED
2780 #endif
2781 )
2782 {
2783 	struct mbuf *m_notify;
2784 	struct sctp_assoc_change *sac;
2785 	struct sctp_queued_to_read *control;
2786 
2787 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2788 	struct socket *so;
2789 
2790 #endif
2791 
2792 	/*
2793 	 * For TCP model AND UDP connected sockets we will send an error up
2794 	 * when an ABORT comes in.
2795 	 */
2796 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2797 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2798 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2799 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2800 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2801 			stcb->sctp_socket->so_error = ECONNREFUSED;
2802 		} else {
2803 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2804 			stcb->sctp_socket->so_error = ECONNRESET;
2805 		}
2806 		/* Wake ANY sleepers */
2807 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2808 		so = SCTP_INP_SO(stcb->sctp_ep);
2809 		if (!so_locked) {
2810 			atomic_add_int(&stcb->asoc.refcnt, 1);
2811 			SCTP_TCB_UNLOCK(stcb);
2812 			SCTP_SOCKET_LOCK(so, 1);
2813 			SCTP_TCB_LOCK(stcb);
2814 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2815 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2816 				SCTP_SOCKET_UNLOCK(so, 1);
2817 				return;
2818 			}
2819 		}
2820 #endif
2821 		sorwakeup(stcb->sctp_socket);
2822 		sowwakeup(stcb->sctp_socket);
2823 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2824 		if (!so_locked) {
2825 			SCTP_SOCKET_UNLOCK(so, 1);
2826 		}
2827 #endif
2828 		sctp_asoc_change_wake++;
2829 	}
2830 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2831 		/* event not enabled */
2832 		return;
2833 	}
2834 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2835 	if (m_notify == NULL)
2836 		/* no space left */
2837 		return;
2838 	SCTP_BUF_LEN(m_notify) = 0;
2839 
2840 	sac = mtod(m_notify, struct sctp_assoc_change *);
2841 	sac->sac_type = SCTP_ASSOC_CHANGE;
2842 	sac->sac_flags = 0;
2843 	sac->sac_length = sizeof(struct sctp_assoc_change);
2844 	sac->sac_state = event;
2845 	sac->sac_error = error;
2846 	/* XXX verify these stream counts */
2847 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2848 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2849 	sac->sac_assoc_id = sctp_get_associd(stcb);
2850 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2851 	SCTP_BUF_NEXT(m_notify) = NULL;
2852 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2853 	    0, 0, 0, 0, 0, 0,
2854 	    m_notify);
2855 	if (control == NULL) {
2856 		/* no memory */
2857 		sctp_m_freem(m_notify);
2858 		return;
2859 	}
2860 	control->length = SCTP_BUF_LEN(m_notify);
2861 	/* not that we need this */
2862 	control->tail_mbuf = m_notify;
2863 	control->spec_flags = M_NOTIFICATION;
2864 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2865 	    control,
2866 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
2867 	if (event == SCTP_COMM_LOST) {
2868 		/* Wake up any sleeper */
2869 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2870 		so = SCTP_INP_SO(stcb->sctp_ep);
2871 		if (!so_locked) {
2872 			atomic_add_int(&stcb->asoc.refcnt, 1);
2873 			SCTP_TCB_UNLOCK(stcb);
2874 			SCTP_SOCKET_LOCK(so, 1);
2875 			SCTP_TCB_LOCK(stcb);
2876 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2877 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2878 				SCTP_SOCKET_UNLOCK(so, 1);
2879 				return;
2880 			}
2881 		}
2882 #endif
2883 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2884 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2885 		if (!so_locked) {
2886 			SCTP_SOCKET_UNLOCK(so, 1);
2887 		}
2888 #endif
2889 	}
2890 }
2891 
2892 static void
2893 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2894     struct sockaddr *sa, uint32_t error)
2895 {
2896 	struct mbuf *m_notify;
2897 	struct sctp_paddr_change *spc;
2898 	struct sctp_queued_to_read *control;
2899 
2900 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2901 		/* event not enabled */
2902 		return;
2903 	}
2904 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2905 	if (m_notify == NULL)
2906 		return;
2907 	SCTP_BUF_LEN(m_notify) = 0;
2908 	spc = mtod(m_notify, struct sctp_paddr_change *);
2909 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2910 	spc->spc_flags = 0;
2911 	spc->spc_length = sizeof(struct sctp_paddr_change);
2912 	switch (sa->sa_family) {
2913 	case AF_INET:
2914 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2915 		break;
2916 #ifdef INET6
2917 	case AF_INET6:
2918 		{
2919 			struct sockaddr_in6 *sin6;
2920 
2921 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2922 
2923 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2924 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2925 				if (sin6->sin6_scope_id == 0) {
2926 					/* recover scope_id for user */
2927 					(void)sa6_recoverscope(sin6);
2928 				} else {
2929 					/* clear embedded scope_id for user */
2930 					in6_clearscope(&sin6->sin6_addr);
2931 				}
2932 			}
2933 			break;
2934 		}
2935 #endif
2936 	default:
2937 		/* TSNH */
2938 		break;
2939 	}
2940 	spc->spc_state = state;
2941 	spc->spc_error = error;
2942 	spc->spc_assoc_id = sctp_get_associd(stcb);
2943 
2944 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2945 	SCTP_BUF_NEXT(m_notify) = NULL;
2946 
2947 	/* append to socket */
2948 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2949 	    0, 0, 0, 0, 0, 0,
2950 	    m_notify);
2951 	if (control == NULL) {
2952 		/* no memory */
2953 		sctp_m_freem(m_notify);
2954 		return;
2955 	}
2956 	control->length = SCTP_BUF_LEN(m_notify);
2957 	control->spec_flags = M_NOTIFICATION;
2958 	/* not that we need this */
2959 	control->tail_mbuf = m_notify;
2960 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2961 	    control,
2962 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
2963 }
2964 
2965 
2966 static void
2967 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2968     struct sctp_tmit_chunk *chk, int so_locked
2969 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2970     SCTP_UNUSED
2971 #endif
2972 )
2973 {
2974 	struct mbuf *m_notify;
2975 	struct sctp_send_failed *ssf;
2976 	struct sctp_queued_to_read *control;
2977 	int length;
2978 
2979 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2980 		/* event not enabled */
2981 		return;
2982 	}
2983 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2984 	if (m_notify == NULL)
2985 		/* no space left */
2986 		return;
2987 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2988 	length -= sizeof(struct sctp_data_chunk);
2989 	SCTP_BUF_LEN(m_notify) = 0;
2990 	ssf = mtod(m_notify, struct sctp_send_failed *);
2991 	ssf->ssf_type = SCTP_SEND_FAILED;
2992 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2993 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2994 	else
2995 		ssf->ssf_flags = SCTP_DATA_SENT;
2996 	ssf->ssf_length = length;
2997 	ssf->ssf_error = error;
2998 	/* not exactly what the user sent in, but should be close :) */
2999 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3000 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
3001 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
3002 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3003 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3004 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3005 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3006 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3007 
3008 	SCTP_BUF_NEXT(m_notify) = chk->data;
3009 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3010 	if (chk->data) {
3011 		/*
3012 		 * trim off the sctp chunk header(it should be there)
3013 		 */
3014 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3015 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3016 			sctp_mbuf_crush(chk->data);
3017 			chk->send_size -= sizeof(struct sctp_data_chunk);
3018 		}
3019 	}
3020 	/* Steal off the mbuf */
3021 	chk->data = NULL;
3022 	/*
3023 	 * For this case, we check the actual socket buffer, since the assoc
3024 	 * is going away we don't want to overfill the socket buffer for a
3025 	 * non-reader
3026 	 */
3027 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3028 		sctp_m_freem(m_notify);
3029 		return;
3030 	}
3031 	/* append to socket */
3032 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3033 	    0, 0, 0, 0, 0, 0,
3034 	    m_notify);
3035 	if (control == NULL) {
3036 		/* no memory */
3037 		sctp_m_freem(m_notify);
3038 		return;
3039 	}
3040 	control->spec_flags = M_NOTIFICATION;
3041 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3042 	    control,
3043 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3044 }
3045 
3046 
3047 static void
3048 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3049     struct sctp_stream_queue_pending *sp, int so_locked
3050 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3051     SCTP_UNUSED
3052 #endif
3053 )
3054 {
3055 	struct mbuf *m_notify;
3056 	struct sctp_send_failed *ssf;
3057 	struct sctp_queued_to_read *control;
3058 	int length;
3059 
3060 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3061 		/* event not enabled */
3062 		return;
3063 	}
3064 	length = sizeof(struct sctp_send_failed) + sp->length;
3065 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3066 	if (m_notify == NULL)
3067 		/* no space left */
3068 		return;
3069 	SCTP_BUF_LEN(m_notify) = 0;
3070 	ssf = mtod(m_notify, struct sctp_send_failed *);
3071 	ssf->ssf_type = SCTP_SEND_FAILED;
3072 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3073 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3074 	else
3075 		ssf->ssf_flags = SCTP_DATA_SENT;
3076 	ssf->ssf_length = length;
3077 	ssf->ssf_error = error;
3078 	/* not exactly what the user sent in, but should be close :) */
3079 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3080 	ssf->ssf_info.sinfo_stream = sp->stream;
3081 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3082 	if (sp->some_taken) {
3083 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3084 	} else {
3085 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3086 	}
3087 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3088 	ssf->ssf_info.sinfo_context = sp->context;
3089 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3090 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3091 	SCTP_BUF_NEXT(m_notify) = sp->data;
3092 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3093 
3094 	/* Steal off the mbuf */
3095 	sp->data = NULL;
3096 	/*
3097 	 * For this case, we check the actual socket buffer, since the assoc
3098 	 * is going away we don't want to overfill the socket buffer for a
3099 	 * non-reader
3100 	 */
3101 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3102 		sctp_m_freem(m_notify);
3103 		return;
3104 	}
3105 	/* append to socket */
3106 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3107 	    0, 0, 0, 0, 0, 0,
3108 	    m_notify);
3109 	if (control == NULL) {
3110 		/* no memory */
3111 		sctp_m_freem(m_notify);
3112 		return;
3113 	}
3114 	control->spec_flags = M_NOTIFICATION;
3115 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3116 	    control,
3117 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3118 }
3119 
3120 
3121 
3122 static void
3123 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3124     uint32_t error)
3125 {
3126 	struct mbuf *m_notify;
3127 	struct sctp_adaptation_event *sai;
3128 	struct sctp_queued_to_read *control;
3129 
3130 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3131 		/* event not enabled */
3132 		return;
3133 	}
3134 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3135 	if (m_notify == NULL)
3136 		/* no space left */
3137 		return;
3138 	SCTP_BUF_LEN(m_notify) = 0;
3139 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3140 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3141 	sai->sai_flags = 0;
3142 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3143 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3144 	sai->sai_assoc_id = sctp_get_associd(stcb);
3145 
3146 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3147 	SCTP_BUF_NEXT(m_notify) = NULL;
3148 
3149 	/* append to socket */
3150 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3151 	    0, 0, 0, 0, 0, 0,
3152 	    m_notify);
3153 	if (control == NULL) {
3154 		/* no memory */
3155 		sctp_m_freem(m_notify);
3156 		return;
3157 	}
3158 	control->length = SCTP_BUF_LEN(m_notify);
3159 	control->spec_flags = M_NOTIFICATION;
3160 	/* not that we need this */
3161 	control->tail_mbuf = m_notify;
3162 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3163 	    control,
3164 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3165 }
3166 
3167 /* This always must be called with the read-queue LOCKED in the INP */
3168 void
3169 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3170     int nolock, uint32_t val)
3171 {
3172 	struct mbuf *m_notify;
3173 	struct sctp_pdapi_event *pdapi;
3174 	struct sctp_queued_to_read *control;
3175 	struct sockbuf *sb;
3176 
3177 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3178 		/* event not enabled */
3179 		return;
3180 	}
3181 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3182 	if (m_notify == NULL)
3183 		/* no space left */
3184 		return;
3185 	SCTP_BUF_LEN(m_notify) = 0;
3186 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3187 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3188 	pdapi->pdapi_flags = 0;
3189 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3190 	pdapi->pdapi_indication = error;
3191 	pdapi->pdapi_stream = (val >> 16);
3192 	pdapi->pdapi_seq = (val & 0x0000ffff);
3193 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3194 
3195 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3196 	SCTP_BUF_NEXT(m_notify) = NULL;
3197 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3198 	    0, 0, 0, 0, 0, 0,
3199 	    m_notify);
3200 	if (control == NULL) {
3201 		/* no memory */
3202 		sctp_m_freem(m_notify);
3203 		return;
3204 	}
3205 	control->spec_flags = M_NOTIFICATION;
3206 	control->length = SCTP_BUF_LEN(m_notify);
3207 	/* not that we need this */
3208 	control->tail_mbuf = m_notify;
3209 	control->held_length = 0;
3210 	control->length = 0;
3211 	if (nolock == 0) {
3212 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
3213 	}
3214 	sb = &stcb->sctp_socket->so_rcv;
3215 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3216 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3217 	}
3218 	sctp_sballoc(stcb, sb, m_notify);
3219 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3220 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3221 	}
3222 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3223 	control->end_added = 1;
3224 	if (stcb->asoc.control_pdapi)
3225 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3226 	else {
3227 		/* we really should not see this case */
3228 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3229 	}
3230 	if (nolock == 0) {
3231 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3232 	}
3233 	if (stcb->sctp_ep && stcb->sctp_socket) {
3234 		/* This should always be the case */
3235 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3236 	}
3237 }
3238 
3239 static void
3240 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3241 {
3242 	struct mbuf *m_notify;
3243 	struct sctp_shutdown_event *sse;
3244 	struct sctp_queued_to_read *control;
3245 
3246 	/*
3247 	 * For TCP model AND UDP connected sockets we will send an error up
3248 	 * when an SHUTDOWN completes
3249 	 */
3250 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3251 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3252 		/* mark socket closed for read/write and wakeup! */
3253 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3254 		struct socket *so;
3255 
3256 		so = SCTP_INP_SO(stcb->sctp_ep);
3257 		atomic_add_int(&stcb->asoc.refcnt, 1);
3258 		SCTP_TCB_UNLOCK(stcb);
3259 		SCTP_SOCKET_LOCK(so, 1);
3260 		SCTP_TCB_LOCK(stcb);
3261 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3262 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3263 			SCTP_SOCKET_UNLOCK(so, 1);
3264 			return;
3265 		}
3266 #endif
3267 		socantsendmore(stcb->sctp_socket);
3268 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3269 		SCTP_SOCKET_UNLOCK(so, 1);
3270 #endif
3271 	}
3272 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3273 		/* event not enabled */
3274 		return;
3275 	}
3276 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3277 	if (m_notify == NULL)
3278 		/* no space left */
3279 		return;
3280 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3281 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3282 	sse->sse_flags = 0;
3283 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3284 	sse->sse_assoc_id = sctp_get_associd(stcb);
3285 
3286 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3287 	SCTP_BUF_NEXT(m_notify) = NULL;
3288 
3289 	/* append to socket */
3290 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3291 	    0, 0, 0, 0, 0, 0,
3292 	    m_notify);
3293 	if (control == NULL) {
3294 		/* no memory */
3295 		sctp_m_freem(m_notify);
3296 		return;
3297 	}
3298 	control->spec_flags = M_NOTIFICATION;
3299 	control->length = SCTP_BUF_LEN(m_notify);
3300 	/* not that we need this */
3301 	control->tail_mbuf = m_notify;
3302 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3303 	    control,
3304 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3305 }
3306 
3307 static void
3308 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3309     int so_locked
3310 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3311     SCTP_UNUSED
3312 #endif
3313 )
3314 {
3315 	struct mbuf *m_notify;
3316 	struct sctp_sender_dry_event *event;
3317 	struct sctp_queued_to_read *control;
3318 
3319 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3320 		/* event not enabled */
3321 		return;
3322 	}
3323 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3324 	if (m_notify == NULL) {
3325 		/* no space left */
3326 		return;
3327 	}
3328 	SCTP_BUF_LEN(m_notify) = 0;
3329 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3330 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3331 	event->sender_dry_flags = 0;
3332 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3333 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3334 
3335 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3336 	SCTP_BUF_NEXT(m_notify) = NULL;
3337 
3338 	/* append to socket */
3339 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3340 	    0, 0, 0, 0, 0, 0, m_notify);
3341 	if (control == NULL) {
3342 		/* no memory */
3343 		sctp_m_freem(m_notify);
3344 		return;
3345 	}
3346 	control->length = SCTP_BUF_LEN(m_notify);
3347 	control->spec_flags = M_NOTIFICATION;
3348 	/* not that we need this */
3349 	control->tail_mbuf = m_notify;
3350 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3351 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3352 }
3353 
3354 
3355 static void
3356 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3357 {
3358 	struct mbuf *m_notify;
3359 	struct sctp_queued_to_read *control;
3360 	struct sctp_stream_reset_event *strreset;
3361 	int len;
3362 
3363 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3364 		/* event not enabled */
3365 		return;
3366 	}
3367 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3368 	if (m_notify == NULL)
3369 		/* no space left */
3370 		return;
3371 	SCTP_BUF_LEN(m_notify) = 0;
3372 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3373 	if (len > M_TRAILINGSPACE(m_notify)) {
3374 		/* never enough room */
3375 		sctp_m_freem(m_notify);
3376 		return;
3377 	}
3378 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3379 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3380 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3381 	strreset->strreset_length = len;
3382 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3383 	strreset->strreset_list[0] = number_entries;
3384 
3385 	SCTP_BUF_LEN(m_notify) = len;
3386 	SCTP_BUF_NEXT(m_notify) = NULL;
3387 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3388 		/* no space */
3389 		sctp_m_freem(m_notify);
3390 		return;
3391 	}
3392 	/* append to socket */
3393 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3394 	    0, 0, 0, 0, 0, 0,
3395 	    m_notify);
3396 	if (control == NULL) {
3397 		/* no memory */
3398 		sctp_m_freem(m_notify);
3399 		return;
3400 	}
3401 	control->spec_flags = M_NOTIFICATION;
3402 	control->length = SCTP_BUF_LEN(m_notify);
3403 	/* not that we need this */
3404 	control->tail_mbuf = m_notify;
3405 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3406 	    control,
3407 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3408 }
3409 
3410 
3411 static void
3412 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3413     int number_entries, uint16_t * list, int flag)
3414 {
3415 	struct mbuf *m_notify;
3416 	struct sctp_queued_to_read *control;
3417 	struct sctp_stream_reset_event *strreset;
3418 	int len;
3419 
3420 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3421 		/* event not enabled */
3422 		return;
3423 	}
3424 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3425 	if (m_notify == NULL)
3426 		/* no space left */
3427 		return;
3428 	SCTP_BUF_LEN(m_notify) = 0;
3429 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3430 	if (len > M_TRAILINGSPACE(m_notify)) {
3431 		/* never enough room */
3432 		sctp_m_freem(m_notify);
3433 		return;
3434 	}
3435 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3436 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3437 	if (number_entries == 0) {
3438 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3439 	} else {
3440 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3441 	}
3442 	strreset->strreset_length = len;
3443 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3444 	if (number_entries) {
3445 		int i;
3446 
3447 		for (i = 0; i < number_entries; i++) {
3448 			strreset->strreset_list[i] = ntohs(list[i]);
3449 		}
3450 	}
3451 	SCTP_BUF_LEN(m_notify) = len;
3452 	SCTP_BUF_NEXT(m_notify) = NULL;
3453 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3454 		/* no space */
3455 		sctp_m_freem(m_notify);
3456 		return;
3457 	}
3458 	/* append to socket */
3459 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3460 	    0, 0, 0, 0, 0, 0,
3461 	    m_notify);
3462 	if (control == NULL) {
3463 		/* no memory */
3464 		sctp_m_freem(m_notify);
3465 		return;
3466 	}
3467 	control->spec_flags = M_NOTIFICATION;
3468 	control->length = SCTP_BUF_LEN(m_notify);
3469 	/* not that we need this */
3470 	control->tail_mbuf = m_notify;
3471 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3472 	    control,
3473 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3474 }
3475 
3476 
3477 void
3478 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3479     uint32_t error, void *data, int so_locked
3480 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3481     SCTP_UNUSED
3482 #endif
3483 )
3484 {
3485 	if ((stcb == NULL) ||
3486 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3487 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3488 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3489 		/* If the socket is gone we are out of here */
3490 		return;
3491 	}
3492 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3493 		return;
3494 	}
3495 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3496 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3497 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3498 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3499 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3500 			/* Don't report these in front states */
3501 			return;
3502 		}
3503 	}
3504 	switch (notification) {
3505 	case SCTP_NOTIFY_ASSOC_UP:
3506 		if (stcb->asoc.assoc_up_sent == 0) {
3507 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3508 			stcb->asoc.assoc_up_sent = 1;
3509 		}
3510 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3511 			sctp_notify_adaptation_layer(stcb, error);
3512 		}
3513 		if (stcb->asoc.peer_supports_auth == 0) {
3514 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3515 			    NULL, so_locked);
3516 		}
3517 		break;
3518 	case SCTP_NOTIFY_ASSOC_DOWN:
3519 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3520 		break;
3521 	case SCTP_NOTIFY_INTERFACE_DOWN:
3522 		{
3523 			struct sctp_nets *net;
3524 
3525 			net = (struct sctp_nets *)data;
3526 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3527 			    (struct sockaddr *)&net->ro._l_addr, error);
3528 			break;
3529 		}
3530 	case SCTP_NOTIFY_INTERFACE_UP:
3531 		{
3532 			struct sctp_nets *net;
3533 
3534 			net = (struct sctp_nets *)data;
3535 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3536 			    (struct sockaddr *)&net->ro._l_addr, error);
3537 			break;
3538 		}
3539 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3540 		{
3541 			struct sctp_nets *net;
3542 
3543 			net = (struct sctp_nets *)data;
3544 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3545 			    (struct sockaddr *)&net->ro._l_addr, error);
3546 			break;
3547 		}
3548 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3549 		sctp_notify_send_failed2(stcb, error,
3550 		    (struct sctp_stream_queue_pending *)data, so_locked);
3551 		break;
3552 	case SCTP_NOTIFY_DG_FAIL:
3553 		sctp_notify_send_failed(stcb, error,
3554 		    (struct sctp_tmit_chunk *)data, so_locked);
3555 		break;
3556 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3557 		{
3558 			uint32_t val;
3559 
3560 			val = *((uint32_t *) data);
3561 
3562 			sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3563 		}
3564 		break;
3565 	case SCTP_NOTIFY_STRDATA_ERR:
3566 		break;
3567 	case SCTP_NOTIFY_ASSOC_ABORTED:
3568 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3569 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3570 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3571 		} else {
3572 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3573 		}
3574 		break;
3575 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3576 		break;
3577 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3578 		break;
3579 	case SCTP_NOTIFY_ASSOC_RESTART:
3580 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3581 		if (stcb->asoc.peer_supports_auth == 0) {
3582 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3583 			    NULL, so_locked);
3584 		}
3585 		break;
3586 	case SCTP_NOTIFY_HB_RESP:
3587 		break;
3588 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3589 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3590 		break;
3591 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3592 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3593 		break;
3594 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3595 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3596 		break;
3597 
3598 	case SCTP_NOTIFY_STR_RESET_SEND:
3599 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3600 		break;
3601 	case SCTP_NOTIFY_STR_RESET_RECV:
3602 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3603 		break;
3604 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3605 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3606 		break;
3607 
3608 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3609 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3610 		break;
3611 
3612 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3613 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3614 		    error);
3615 		break;
3616 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3617 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3618 		    error);
3619 		break;
3620 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3621 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3622 		    error);
3623 		break;
3624 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3625 		break;
3626 	case SCTP_NOTIFY_ASCONF_FAILED:
3627 		break;
3628 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3629 		sctp_notify_shutdown_event(stcb);
3630 		break;
3631 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3632 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3633 		    (uint16_t) (uintptr_t) data,
3634 		    so_locked);
3635 		break;
3636 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3637 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3638 		    (uint16_t) (uintptr_t) data,
3639 		    so_locked);
3640 		break;
3641 	case SCTP_NOTIFY_NO_PEER_AUTH:
3642 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3643 		    (uint16_t) (uintptr_t) data,
3644 		    so_locked);
3645 		break;
3646 	case SCTP_NOTIFY_SENDER_DRY:
3647 		sctp_notify_sender_dry_event(stcb, so_locked);
3648 		break;
3649 	default:
3650 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3651 		    __FUNCTION__, notification, notification);
3652 		break;
3653 	}			/* end switch */
3654 }
3655 
3656 void
3657 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3658 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3659     SCTP_UNUSED
3660 #endif
3661 )
3662 {
3663 	struct sctp_association *asoc;
3664 	struct sctp_stream_out *outs;
3665 	struct sctp_tmit_chunk *chk;
3666 	struct sctp_stream_queue_pending *sp;
3667 	int i;
3668 
3669 	asoc = &stcb->asoc;
3670 
3671 	if (stcb == NULL) {
3672 		return;
3673 	}
3674 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3675 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3676 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3677 		return;
3678 	}
3679 	/* now through all the gunk freeing chunks */
3680 	if (holds_lock == 0) {
3681 		SCTP_TCB_SEND_LOCK(stcb);
3682 	}
3683 	/* sent queue SHOULD be empty */
3684 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3685 		chk = TAILQ_FIRST(&asoc->sent_queue);
3686 		while (chk) {
3687 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3688 			asoc->sent_queue_cnt--;
3689 			if (chk->data != NULL) {
3690 				sctp_free_bufspace(stcb, asoc, chk, 1);
3691 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3692 				    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3693 				sctp_m_freem(chk->data);
3694 				chk->data = NULL;
3695 			}
3696 			sctp_free_a_chunk(stcb, chk);
3697 			/* sa_ignore FREED_MEMORY */
3698 			chk = TAILQ_FIRST(&asoc->sent_queue);
3699 		}
3700 	}
3701 	/* pending send queue SHOULD be empty */
3702 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3703 		chk = TAILQ_FIRST(&asoc->send_queue);
3704 		while (chk) {
3705 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3706 			asoc->send_queue_cnt--;
3707 			if (chk->data != NULL) {
3708 				sctp_free_bufspace(stcb, asoc, chk, 1);
3709 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3710 				    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3711 				sctp_m_freem(chk->data);
3712 				chk->data = NULL;
3713 			}
3714 			sctp_free_a_chunk(stcb, chk);
3715 			/* sa_ignore FREED_MEMORY */
3716 			chk = TAILQ_FIRST(&asoc->send_queue);
3717 		}
3718 	}
3719 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3720 		/* For each stream */
3721 		outs = &stcb->asoc.strmout[i];
3722 		/* clean up any sends there */
3723 		stcb->asoc.locked_on_sending = NULL;
3724 		sp = TAILQ_FIRST(&outs->outqueue);
3725 		while (sp) {
3726 			stcb->asoc.stream_queue_cnt--;
3727 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3728 			sctp_free_spbufspace(stcb, asoc, sp);
3729 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3730 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3731 			if (sp->data) {
3732 				sctp_m_freem(sp->data);
3733 				sp->data = NULL;
3734 			}
3735 			if (sp->net)
3736 				sctp_free_remote_addr(sp->net);
3737 			sp->net = NULL;
3738 			/* Free the chunk */
3739 			sctp_free_a_strmoq(stcb, sp);
3740 			/* sa_ignore FREED_MEMORY */
3741 			sp = TAILQ_FIRST(&outs->outqueue);
3742 		}
3743 	}
3744 
3745 	if (holds_lock == 0) {
3746 		SCTP_TCB_SEND_UNLOCK(stcb);
3747 	}
3748 }
3749 
3750 void
3751 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3752 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3753     SCTP_UNUSED
3754 #endif
3755 )
3756 {
3757 
3758 	if (stcb == NULL) {
3759 		return;
3760 	}
3761 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3762 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3763 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3764 		return;
3765 	}
3766 	/* Tell them we lost the asoc */
3767 	sctp_report_all_outbound(stcb, 1, so_locked);
3768 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3769 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3770 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3771 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3772 	}
3773 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3774 }
3775 
3776 void
3777 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3778     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3779     uint32_t vrf_id, uint16_t port)
3780 {
3781 	uint32_t vtag;
3782 
3783 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3784 	struct socket *so;
3785 
3786 #endif
3787 
3788 	vtag = 0;
3789 	if (stcb != NULL) {
3790 		/* We have a TCB to abort, send notification too */
3791 		vtag = stcb->asoc.peer_vtag;
3792 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3793 		/* get the assoc vrf id and table id */
3794 		vrf_id = stcb->asoc.vrf_id;
3795 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3796 	}
3797 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3798 	if (stcb != NULL) {
3799 		/* Ok, now lets free it */
3800 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3801 		so = SCTP_INP_SO(inp);
3802 		atomic_add_int(&stcb->asoc.refcnt, 1);
3803 		SCTP_TCB_UNLOCK(stcb);
3804 		SCTP_SOCKET_LOCK(so, 1);
3805 		SCTP_TCB_LOCK(stcb);
3806 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3807 #endif
3808 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3809 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3810 		SCTP_SOCKET_UNLOCK(so, 1);
3811 #endif
3812 	} else {
3813 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3814 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3815 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3816 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3817 			}
3818 		}
3819 	}
3820 }
3821 
3822 #ifdef SCTP_ASOCLOG_OF_TSNS
3823 void
3824 sctp_print_out_track_log(struct sctp_tcb *stcb)
3825 {
3826 #ifdef NOSIY_PRINTS
3827 	int i;
3828 
3829 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3830 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3831 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3832 		SCTP_PRINTF("None rcvd\n");
3833 		goto none_in;
3834 	}
3835 	if (stcb->asoc.tsn_in_wrapped) {
3836 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3837 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3838 			    stcb->asoc.in_tsnlog[i].tsn,
3839 			    stcb->asoc.in_tsnlog[i].strm,
3840 			    stcb->asoc.in_tsnlog[i].seq,
3841 			    stcb->asoc.in_tsnlog[i].flgs,
3842 			    stcb->asoc.in_tsnlog[i].sz);
3843 		}
3844 	}
3845 	if (stcb->asoc.tsn_in_at) {
3846 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3847 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3848 			    stcb->asoc.in_tsnlog[i].tsn,
3849 			    stcb->asoc.in_tsnlog[i].strm,
3850 			    stcb->asoc.in_tsnlog[i].seq,
3851 			    stcb->asoc.in_tsnlog[i].flgs,
3852 			    stcb->asoc.in_tsnlog[i].sz);
3853 		}
3854 	}
3855 none_in:
3856 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3857 	if ((stcb->asoc.tsn_out_at == 0) &&
3858 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3859 		SCTP_PRINTF("None sent\n");
3860 	}
3861 	if (stcb->asoc.tsn_out_wrapped) {
3862 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3863 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3864 			    stcb->asoc.out_tsnlog[i].tsn,
3865 			    stcb->asoc.out_tsnlog[i].strm,
3866 			    stcb->asoc.out_tsnlog[i].seq,
3867 			    stcb->asoc.out_tsnlog[i].flgs,
3868 			    stcb->asoc.out_tsnlog[i].sz);
3869 		}
3870 	}
3871 	if (stcb->asoc.tsn_out_at) {
3872 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3873 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3874 			    stcb->asoc.out_tsnlog[i].tsn,
3875 			    stcb->asoc.out_tsnlog[i].strm,
3876 			    stcb->asoc.out_tsnlog[i].seq,
3877 			    stcb->asoc.out_tsnlog[i].flgs,
3878 			    stcb->asoc.out_tsnlog[i].sz);
3879 		}
3880 	}
3881 #endif
3882 }
3883 
3884 #endif
3885 
3886 void
3887 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3888     int error, struct mbuf *op_err,
3889     int so_locked
3890 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3891     SCTP_UNUSED
3892 #endif
3893 )
3894 {
3895 	uint32_t vtag;
3896 
3897 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3898 	struct socket *so;
3899 
3900 #endif
3901 
3902 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3903 	so = SCTP_INP_SO(inp);
3904 #endif
3905 	if (stcb == NULL) {
3906 		/* Got to have a TCB */
3907 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3908 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3909 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3910 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3911 			}
3912 		}
3913 		return;
3914 	} else {
3915 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3916 	}
3917 	vtag = stcb->asoc.peer_vtag;
3918 	/* notify the ulp */
3919 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3920 		sctp_abort_notification(stcb, error, so_locked);
3921 	/* notify the peer */
3922 #if defined(SCTP_PANIC_ON_ABORT)
3923 	panic("aborting an association");
3924 #endif
3925 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3926 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3927 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3928 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3929 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3930 	}
3931 	/* now free the asoc */
3932 #ifdef SCTP_ASOCLOG_OF_TSNS
3933 	sctp_print_out_track_log(stcb);
3934 #endif
3935 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3936 	if (!so_locked) {
3937 		atomic_add_int(&stcb->asoc.refcnt, 1);
3938 		SCTP_TCB_UNLOCK(stcb);
3939 		SCTP_SOCKET_LOCK(so, 1);
3940 		SCTP_TCB_LOCK(stcb);
3941 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3942 	}
3943 #endif
3944 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3945 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3946 	if (!so_locked) {
3947 		SCTP_SOCKET_UNLOCK(so, 1);
3948 	}
3949 #endif
3950 }
3951 
3952 void
3953 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3954     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3955 {
3956 	struct sctp_chunkhdr *ch, chunk_buf;
3957 	unsigned int chk_length;
3958 
3959 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3960 	/* Generate a TO address for future reference */
3961 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3962 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3963 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3964 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3965 		}
3966 	}
3967 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3968 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3969 	while (ch != NULL) {
3970 		chk_length = ntohs(ch->chunk_length);
3971 		if (chk_length < sizeof(*ch)) {
3972 			/* break to abort land */
3973 			break;
3974 		}
3975 		switch (ch->chunk_type) {
3976 		case SCTP_COOKIE_ECHO:
3977 			/* We hit here only if the assoc is being freed */
3978 			return;
3979 		case SCTP_PACKET_DROPPED:
3980 			/* we don't respond to pkt-dropped */
3981 			return;
3982 		case SCTP_ABORT_ASSOCIATION:
3983 			/* we don't respond with an ABORT to an ABORT */
3984 			return;
3985 		case SCTP_SHUTDOWN_COMPLETE:
3986 			/*
3987 			 * we ignore it since we are not waiting for it and
3988 			 * peer is gone
3989 			 */
3990 			return;
3991 		case SCTP_SHUTDOWN_ACK:
3992 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
3993 			return;
3994 		default:
3995 			break;
3996 		}
3997 		offset += SCTP_SIZE32(chk_length);
3998 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3999 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4000 	}
4001 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4002 }
4003 
4004 /*
4005  * check the inbound datagram to make sure there is not an abort inside it,
4006  * if there is return 1, else return 0.
4007  */
4008 int
4009 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4010 {
4011 	struct sctp_chunkhdr *ch;
4012 	struct sctp_init_chunk *init_chk, chunk_buf;
4013 	int offset;
4014 	unsigned int chk_length;
4015 
4016 	offset = iphlen + sizeof(struct sctphdr);
4017 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4018 	    (uint8_t *) & chunk_buf);
4019 	while (ch != NULL) {
4020 		chk_length = ntohs(ch->chunk_length);
4021 		if (chk_length < sizeof(*ch)) {
4022 			/* packet is probably corrupt */
4023 			break;
4024 		}
4025 		/* we seem to be ok, is it an abort? */
4026 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4027 			/* yep, tell them */
4028 			return (1);
4029 		}
4030 		if (ch->chunk_type == SCTP_INITIATION) {
4031 			/* need to update the Vtag */
4032 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4033 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4034 			if (init_chk != NULL) {
4035 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4036 			}
4037 		}
4038 		/* Nope, move to the next chunk */
4039 		offset += SCTP_SIZE32(chk_length);
4040 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4041 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4042 	}
4043 	return (0);
4044 }
4045 
4046 /*
4047  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4048  * set (i.e. it's 0) so, create this function to compare link local scopes
4049  */
4050 #ifdef INET6
4051 uint32_t
4052 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4053 {
4054 	struct sockaddr_in6 a, b;
4055 
4056 	/* save copies */
4057 	a = *addr1;
4058 	b = *addr2;
4059 
4060 	if (a.sin6_scope_id == 0)
4061 		if (sa6_recoverscope(&a)) {
4062 			/* can't get scope, so can't match */
4063 			return (0);
4064 		}
4065 	if (b.sin6_scope_id == 0)
4066 		if (sa6_recoverscope(&b)) {
4067 			/* can't get scope, so can't match */
4068 			return (0);
4069 		}
4070 	if (a.sin6_scope_id != b.sin6_scope_id)
4071 		return (0);
4072 
4073 	return (1);
4074 }
4075 
4076 /*
4077  * returns a sockaddr_in6 with embedded scope recovered and removed
4078  */
4079 struct sockaddr_in6 *
4080 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4081 {
4082 	/* check and strip embedded scope junk */
4083 	if (addr->sin6_family == AF_INET6) {
4084 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4085 			if (addr->sin6_scope_id == 0) {
4086 				*store = *addr;
4087 				if (!sa6_recoverscope(store)) {
4088 					/* use the recovered scope */
4089 					addr = store;
4090 				}
4091 			} else {
4092 				/* else, return the original "to" addr */
4093 				in6_clearscope(&addr->sin6_addr);
4094 			}
4095 		}
4096 	}
4097 	return (addr);
4098 }
4099 
4100 #endif
4101 
4102 /*
4103  * are the two addresses the same?  currently a "scopeless" check returns: 1
4104  * if same, 0 if not
4105  */
4106 int
4107 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4108 {
4109 
4110 	/* must be valid */
4111 	if (sa1 == NULL || sa2 == NULL)
4112 		return (0);
4113 
4114 	/* must be the same family */
4115 	if (sa1->sa_family != sa2->sa_family)
4116 		return (0);
4117 
4118 	switch (sa1->sa_family) {
4119 #ifdef INET6
4120 	case AF_INET6:
4121 		{
4122 			/* IPv6 addresses */
4123 			struct sockaddr_in6 *sin6_1, *sin6_2;
4124 
4125 			sin6_1 = (struct sockaddr_in6 *)sa1;
4126 			sin6_2 = (struct sockaddr_in6 *)sa2;
4127 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4128 			    sin6_2));
4129 		}
4130 #endif
4131 	case AF_INET:
4132 		{
4133 			/* IPv4 addresses */
4134 			struct sockaddr_in *sin_1, *sin_2;
4135 
4136 			sin_1 = (struct sockaddr_in *)sa1;
4137 			sin_2 = (struct sockaddr_in *)sa2;
4138 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4139 		}
4140 	default:
4141 		/* we don't do these... */
4142 		return (0);
4143 	}
4144 }
4145 
4146 void
4147 sctp_print_address(struct sockaddr *sa)
4148 {
4149 #ifdef INET6
4150 	char ip6buf[INET6_ADDRSTRLEN];
4151 
4152 	ip6buf[0] = 0;
4153 #endif
4154 
4155 	switch (sa->sa_family) {
4156 #ifdef INET6
4157 	case AF_INET6:
4158 		{
4159 			struct sockaddr_in6 *sin6;
4160 
4161 			sin6 = (struct sockaddr_in6 *)sa;
4162 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4163 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4164 			    ntohs(sin6->sin6_port),
4165 			    sin6->sin6_scope_id);
4166 			break;
4167 		}
4168 #endif
4169 	case AF_INET:
4170 		{
4171 			struct sockaddr_in *sin;
4172 			unsigned char *p;
4173 
4174 			sin = (struct sockaddr_in *)sa;
4175 			p = (unsigned char *)&sin->sin_addr;
4176 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4177 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4178 			break;
4179 		}
4180 	default:
4181 		SCTP_PRINTF("?\n");
4182 		break;
4183 	}
4184 }
4185 
4186 void
4187 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4188 {
4189 	switch (iph->ip_v) {
4190 		case IPVERSION:
4191 		{
4192 			struct sockaddr_in lsa, fsa;
4193 
4194 			bzero(&lsa, sizeof(lsa));
4195 			lsa.sin_len = sizeof(lsa);
4196 			lsa.sin_family = AF_INET;
4197 			lsa.sin_addr = iph->ip_src;
4198 			lsa.sin_port = sh->src_port;
4199 			bzero(&fsa, sizeof(fsa));
4200 			fsa.sin_len = sizeof(fsa);
4201 			fsa.sin_family = AF_INET;
4202 			fsa.sin_addr = iph->ip_dst;
4203 			fsa.sin_port = sh->dest_port;
4204 			SCTP_PRINTF("src: ");
4205 			sctp_print_address((struct sockaddr *)&lsa);
4206 			SCTP_PRINTF("dest: ");
4207 			sctp_print_address((struct sockaddr *)&fsa);
4208 			break;
4209 		}
4210 #ifdef INET6
4211 	case IPV6_VERSION >> 4:
4212 		{
4213 			struct ip6_hdr *ip6;
4214 			struct sockaddr_in6 lsa6, fsa6;
4215 
4216 			ip6 = (struct ip6_hdr *)iph;
4217 			bzero(&lsa6, sizeof(lsa6));
4218 			lsa6.sin6_len = sizeof(lsa6);
4219 			lsa6.sin6_family = AF_INET6;
4220 			lsa6.sin6_addr = ip6->ip6_src;
4221 			lsa6.sin6_port = sh->src_port;
4222 			bzero(&fsa6, sizeof(fsa6));
4223 			fsa6.sin6_len = sizeof(fsa6);
4224 			fsa6.sin6_family = AF_INET6;
4225 			fsa6.sin6_addr = ip6->ip6_dst;
4226 			fsa6.sin6_port = sh->dest_port;
4227 			SCTP_PRINTF("src: ");
4228 			sctp_print_address((struct sockaddr *)&lsa6);
4229 			SCTP_PRINTF("dest: ");
4230 			sctp_print_address((struct sockaddr *)&fsa6);
4231 			break;
4232 		}
4233 #endif
4234 	default:
4235 		/* TSNH */
4236 		break;
4237 	}
4238 }
4239 
4240 void
4241 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4242     struct sctp_inpcb *new_inp,
4243     struct sctp_tcb *stcb,
4244     int waitflags)
4245 {
4246 	/*
4247 	 * go through our old INP and pull off any control structures that
4248 	 * belong to stcb and move then to the new inp.
4249 	 */
4250 	struct socket *old_so, *new_so;
4251 	struct sctp_queued_to_read *control, *nctl;
4252 	struct sctp_readhead tmp_queue;
4253 	struct mbuf *m;
4254 	int error = 0;
4255 
4256 	old_so = old_inp->sctp_socket;
4257 	new_so = new_inp->sctp_socket;
4258 	TAILQ_INIT(&tmp_queue);
4259 	error = sblock(&old_so->so_rcv, waitflags);
4260 	if (error) {
4261 		/*
4262 		 * Gak, can't get sblock, we have a problem. data will be
4263 		 * left stranded.. and we don't dare look at it since the
4264 		 * other thread may be reading something. Oh well, its a
4265 		 * screwed up app that does a peeloff OR a accept while
4266 		 * reading from the main socket... actually its only the
4267 		 * peeloff() case, since I think read will fail on a
4268 		 * listening socket..
4269 		 */
4270 		return;
4271 	}
4272 	/* lock the socket buffers */
4273 	SCTP_INP_READ_LOCK(old_inp);
4274 	control = TAILQ_FIRST(&old_inp->read_queue);
4275 	/* Pull off all for out target stcb */
4276 	while (control) {
4277 		nctl = TAILQ_NEXT(control, next);
4278 		if (control->stcb == stcb) {
4279 			/* remove it we want it */
4280 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4281 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4282 			m = control->data;
4283 			while (m) {
4284 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4285 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4286 				}
4287 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4288 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4289 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4290 				}
4291 				m = SCTP_BUF_NEXT(m);
4292 			}
4293 		}
4294 		control = nctl;
4295 	}
4296 	SCTP_INP_READ_UNLOCK(old_inp);
4297 	/* Remove the sb-lock on the old socket */
4298 
4299 	sbunlock(&old_so->so_rcv);
4300 	/* Now we move them over to the new socket buffer */
4301 	control = TAILQ_FIRST(&tmp_queue);
4302 	SCTP_INP_READ_LOCK(new_inp);
4303 	while (control) {
4304 		nctl = TAILQ_NEXT(control, next);
4305 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4306 		m = control->data;
4307 		while (m) {
4308 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4309 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4310 			}
4311 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4312 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4313 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4314 			}
4315 			m = SCTP_BUF_NEXT(m);
4316 		}
4317 		control = nctl;
4318 	}
4319 	SCTP_INP_READ_UNLOCK(new_inp);
4320 }
4321 
4322 void
4323 sctp_add_to_readq(struct sctp_inpcb *inp,
4324     struct sctp_tcb *stcb,
4325     struct sctp_queued_to_read *control,
4326     struct sockbuf *sb,
4327     int end,
4328     int so_locked
4329 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4330     SCTP_UNUSED
4331 #endif
4332 )
4333 {
4334 	/*
4335 	 * Here we must place the control on the end of the socket read
4336 	 * queue AND increment sb_cc so that select will work properly on
4337 	 * read.
4338 	 */
4339 	struct mbuf *m, *prev = NULL;
4340 
4341 	if (inp == NULL) {
4342 		/* Gak, TSNH!! */
4343 #ifdef INVARIANTS
4344 		panic("Gak, inp NULL on add_to_readq");
4345 #endif
4346 		return;
4347 	}
4348 	SCTP_INP_READ_LOCK(inp);
4349 	if (!(control->spec_flags & M_NOTIFICATION)) {
4350 		atomic_add_int(&inp->total_recvs, 1);
4351 		if (!control->do_not_ref_stcb) {
4352 			atomic_add_int(&stcb->total_recvs, 1);
4353 		}
4354 	}
4355 	m = control->data;
4356 	control->held_length = 0;
4357 	control->length = 0;
4358 	while (m) {
4359 		if (SCTP_BUF_LEN(m) == 0) {
4360 			/* Skip mbufs with NO length */
4361 			if (prev == NULL) {
4362 				/* First one */
4363 				control->data = sctp_m_free(m);
4364 				m = control->data;
4365 			} else {
4366 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4367 				m = SCTP_BUF_NEXT(prev);
4368 			}
4369 			if (m == NULL) {
4370 				control->tail_mbuf = prev;;
4371 			}
4372 			continue;
4373 		}
4374 		prev = m;
4375 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4376 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4377 		}
4378 		sctp_sballoc(stcb, sb, m);
4379 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4380 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4381 		}
4382 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4383 		m = SCTP_BUF_NEXT(m);
4384 	}
4385 	if (prev != NULL) {
4386 		control->tail_mbuf = prev;
4387 	} else {
4388 		/* Everything got collapsed out?? */
4389 		return;
4390 	}
4391 	if (end) {
4392 		control->end_added = 1;
4393 	}
4394 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4395 	SCTP_INP_READ_UNLOCK(inp);
4396 	if (inp && inp->sctp_socket) {
4397 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4398 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4399 		} else {
4400 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4401 			struct socket *so;
4402 
4403 			so = SCTP_INP_SO(inp);
4404 			if (!so_locked) {
4405 				atomic_add_int(&stcb->asoc.refcnt, 1);
4406 				SCTP_TCB_UNLOCK(stcb);
4407 				SCTP_SOCKET_LOCK(so, 1);
4408 				SCTP_TCB_LOCK(stcb);
4409 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4410 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4411 					SCTP_SOCKET_UNLOCK(so, 1);
4412 					return;
4413 				}
4414 			}
4415 #endif
4416 			sctp_sorwakeup(inp, inp->sctp_socket);
4417 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4418 			if (!so_locked) {
4419 				SCTP_SOCKET_UNLOCK(so, 1);
4420 			}
4421 #endif
4422 		}
4423 	}
4424 }
4425 
4426 
4427 int
4428 sctp_append_to_readq(struct sctp_inpcb *inp,
4429     struct sctp_tcb *stcb,
4430     struct sctp_queued_to_read *control,
4431     struct mbuf *m,
4432     int end,
4433     int ctls_cumack,
4434     struct sockbuf *sb)
4435 {
4436 	/*
4437 	 * A partial delivery API event is underway. OR we are appending on
4438 	 * the reassembly queue.
4439 	 *
4440 	 * If PDAPI this means we need to add m to the end of the data.
4441 	 * Increase the length in the control AND increment the sb_cc.
4442 	 * Otherwise sb is NULL and all we need to do is put it at the end
4443 	 * of the mbuf chain.
4444 	 */
4445 	int len = 0;
4446 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4447 
4448 	if (inp) {
4449 		SCTP_INP_READ_LOCK(inp);
4450 	}
4451 	if (control == NULL) {
4452 get_out:
4453 		if (inp) {
4454 			SCTP_INP_READ_UNLOCK(inp);
4455 		}
4456 		return (-1);
4457 	}
4458 	if (control->end_added) {
4459 		/* huh this one is complete? */
4460 		goto get_out;
4461 	}
4462 	mm = m;
4463 	if (mm == NULL) {
4464 		goto get_out;
4465 	}
4466 	while (mm) {
4467 		if (SCTP_BUF_LEN(mm) == 0) {
4468 			/* Skip mbufs with NO lenght */
4469 			if (prev == NULL) {
4470 				/* First one */
4471 				m = sctp_m_free(mm);
4472 				mm = m;
4473 			} else {
4474 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4475 				mm = SCTP_BUF_NEXT(prev);
4476 			}
4477 			continue;
4478 		}
4479 		prev = mm;
4480 		len += SCTP_BUF_LEN(mm);
4481 		if (sb) {
4482 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4483 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4484 			}
4485 			sctp_sballoc(stcb, sb, mm);
4486 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4487 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4488 			}
4489 		}
4490 		mm = SCTP_BUF_NEXT(mm);
4491 	}
4492 	if (prev) {
4493 		tail = prev;
4494 	} else {
4495 		/* Really there should always be a prev */
4496 		if (m == NULL) {
4497 			/* Huh nothing left? */
4498 #ifdef INVARIANTS
4499 			panic("Nothing left to add?");
4500 #else
4501 			goto get_out;
4502 #endif
4503 		}
4504 		tail = m;
4505 	}
4506 	if (control->tail_mbuf) {
4507 		/* append */
4508 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4509 		control->tail_mbuf = tail;
4510 	} else {
4511 		/* nothing there */
4512 #ifdef INVARIANTS
4513 		if (control->data != NULL) {
4514 			panic("This should NOT happen");
4515 		}
4516 #endif
4517 		control->data = m;
4518 		control->tail_mbuf = tail;
4519 	}
4520 	atomic_add_int(&control->length, len);
4521 	if (end) {
4522 		/* message is complete */
4523 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4524 			stcb->asoc.control_pdapi = NULL;
4525 		}
4526 		control->held_length = 0;
4527 		control->end_added = 1;
4528 	}
4529 	if (stcb == NULL) {
4530 		control->do_not_ref_stcb = 1;
4531 	}
4532 	/*
4533 	 * When we are appending in partial delivery, the cum-ack is used
4534 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4535 	 * is populated in the outbound sinfo structure from the true cumack
4536 	 * if the association exists...
4537 	 */
4538 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4539 	if (inp) {
4540 		SCTP_INP_READ_UNLOCK(inp);
4541 	}
4542 	if (inp && inp->sctp_socket) {
4543 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4544 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4545 		} else {
4546 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4547 			struct socket *so;
4548 
4549 			so = SCTP_INP_SO(inp);
4550 			atomic_add_int(&stcb->asoc.refcnt, 1);
4551 			SCTP_TCB_UNLOCK(stcb);
4552 			SCTP_SOCKET_LOCK(so, 1);
4553 			SCTP_TCB_LOCK(stcb);
4554 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4555 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4556 				SCTP_SOCKET_UNLOCK(so, 1);
4557 				return (0);
4558 			}
4559 #endif
4560 			sctp_sorwakeup(inp, inp->sctp_socket);
4561 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4562 			SCTP_SOCKET_UNLOCK(so, 1);
4563 #endif
4564 		}
4565 	}
4566 	return (0);
4567 }
4568 
4569 
4570 
4571 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4572  *************ALTERNATE ROUTING CODE
4573  */
4574 
4575 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4576  *************ALTERNATE ROUTING CODE
4577  */
4578 
4579 struct mbuf *
4580 sctp_generate_invmanparam(int err)
4581 {
4582 	/* Return a MBUF with a invalid mandatory parameter */
4583 	struct mbuf *m;
4584 
4585 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4586 	if (m) {
4587 		struct sctp_paramhdr *ph;
4588 
4589 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4590 		ph = mtod(m, struct sctp_paramhdr *);
4591 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4592 		ph->param_type = htons(err);
4593 	}
4594 	return (m);
4595 }
4596 
4597 #ifdef SCTP_MBCNT_LOGGING
4598 void
4599 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4600     struct sctp_tmit_chunk *tp1, int chk_cnt)
4601 {
4602 	if (tp1->data == NULL) {
4603 		return;
4604 	}
4605 	asoc->chunks_on_out_queue -= chk_cnt;
4606 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4607 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4608 		    asoc->total_output_queue_size,
4609 		    tp1->book_size,
4610 		    0,
4611 		    tp1->mbcnt);
4612 	}
4613 	if (asoc->total_output_queue_size >= tp1->book_size) {
4614 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4615 	} else {
4616 		asoc->total_output_queue_size = 0;
4617 	}
4618 
4619 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4620 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4621 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4622 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4623 		} else {
4624 			stcb->sctp_socket->so_snd.sb_cc = 0;
4625 
4626 		}
4627 	}
4628 }
4629 
4630 #endif
4631 
4632 int
4633 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4634     int reason, int so_locked
4635 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4636     SCTP_UNUSED
4637 #endif
4638 )
4639 {
4640 	struct sctp_stream_out *strq;
4641 	struct sctp_tmit_chunk *chk = NULL;
4642 	struct sctp_stream_queue_pending *sp;
4643 	uint16_t stream = 0, seq = 0;
4644 	uint8_t foundeom = 0;
4645 	int ret_sz = 0;
4646 	int notdone;
4647 	int do_wakeup_routine = 0;
4648 
4649 	stream = tp1->rec.data.stream_number;
4650 	seq = tp1->rec.data.stream_seq;
4651 	do {
4652 		ret_sz += tp1->book_size;
4653 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4654 		if (tp1->data != NULL) {
4655 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4656 			struct socket *so;
4657 
4658 #endif
4659 			printf("Release PR-SCTP chunk tsn:%u flags:%x\n",
4660 			    tp1->rec.data.TSN_seq,
4661 			    (unsigned int)tp1->rec.data.rcv_flags);
4662 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4663 			sctp_flight_size_decrease(tp1);
4664 			sctp_total_flight_decrease(stcb, tp1);
4665 			stcb->asoc.peers_rwnd += tp1->send_size;
4666 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4667 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4668 			sctp_m_freem(tp1->data);
4669 			tp1->data = NULL;
4670 			do_wakeup_routine = 1;
4671 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4672 				stcb->asoc.sent_queue_cnt_removeable--;
4673 			}
4674 		}
4675 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4676 		    SCTP_DATA_NOT_FRAG) {
4677 			/* not frag'ed we ae done   */
4678 			notdone = 0;
4679 			foundeom = 1;
4680 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4681 			/* end of frag, we are done */
4682 			notdone = 0;
4683 			foundeom = 1;
4684 		} else {
4685 			/*
4686 			 * Its a begin or middle piece, we must mark all of
4687 			 * it
4688 			 */
4689 			notdone = 1;
4690 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4691 		}
4692 	} while (tp1 && notdone);
4693 	if (foundeom == 0) {
4694 		/*
4695 		 * The multi-part message was scattered across the send and
4696 		 * sent queue.
4697 		 */
4698 next_on_sent:
4699 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4700 		/*
4701 		 * recurse throught the send_queue too, starting at the
4702 		 * beginning.
4703 		 */
4704 		if ((tp1) &&
4705 		    (tp1->rec.data.stream_number == stream) &&
4706 		    (tp1->rec.data.stream_seq == seq)
4707 		    ) {
4708 			/*
4709 			 * save to chk in case we have some on stream out
4710 			 * queue. If so and we have an un-transmitted one we
4711 			 * don't have to fudge the TSN.
4712 			 */
4713 			chk = tp1;
4714 			ret_sz += tp1->book_size;
4715 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4716 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4717 			sctp_m_freem(tp1->data);
4718 			tp1->data = NULL;
4719 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4720 				foundeom = 1;
4721 			}
4722 			do_wakeup_routine = 1;
4723 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4724 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4725 			/*
4726 			 * on to the sent queue so we can wait for it to be
4727 			 * passed by.
4728 			 */
4729 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4730 			    sctp_next);
4731 			stcb->asoc.send_queue_cnt--;
4732 			stcb->asoc.sent_queue_cnt++;
4733 			goto next_on_sent;
4734 		}
4735 	}
4736 	if (foundeom == 0) {
4737 		/*
4738 		 * Still no eom found. That means there is stuff left on the
4739 		 * stream out queue.. yuck.
4740 		 */
4741 		strq = &stcb->asoc.strmout[stream];
4742 		SCTP_TCB_SEND_LOCK(stcb);
4743 		sp = TAILQ_FIRST(&strq->outqueue);
4744 		while (sp->strseq <= seq) {
4745 			/* Check if its our SEQ */
4746 			if (sp->strseq == seq) {
4747 				sp->discard_rest = 1;
4748 				/*
4749 				 * We may need to put a chunk on the queue
4750 				 * that holds the TSN that would have been
4751 				 * sent with the LAST bit.
4752 				 */
4753 				if (chk == NULL) {
4754 					/* Yep, we have to */
4755 					sctp_alloc_a_chunk(stcb, chk);
4756 					if (chk == NULL) {
4757 						/*
4758 						 * we are hosed. All we can
4759 						 * do is nothing.. which
4760 						 * will cause an abort if
4761 						 * the peer is paying
4762 						 * attention.
4763 						 */
4764 						goto oh_well;
4765 					}
4766 					memset(chk, 0, sizeof(*chk));
4767 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4768 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4769 					chk->asoc = &stcb->asoc;
4770 					chk->rec.data.stream_seq = sp->strseq;
4771 					chk->rec.data.stream_number = sp->stream;
4772 					chk->rec.data.payloadtype = sp->ppid;
4773 					chk->rec.data.context = sp->context;
4774 					chk->flags = sp->act_flags;
4775 					chk->addr_over = sp->addr_over;
4776 					chk->whoTo = sp->net;
4777 					atomic_add_int(&chk->whoTo->ref_count, 1);
4778 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4779 					stcb->asoc.pr_sctp_cnt++;
4780 					chk->pr_sctp_on = 1;
4781 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4782 					stcb->asoc.sent_queue_cnt++;
4783 				} else {
4784 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4785 				}
4786 		oh_well:
4787 				if (sp->data) {
4788 					/*
4789 					 * Pull any data to free up the SB
4790 					 * and allow sender to "add more"
4791 					 * whilc we will throw away :-)
4792 					 */
4793 					sctp_free_spbufspace(stcb, &stcb->asoc,
4794 					    sp);
4795 					ret_sz += sp->length;
4796 					do_wakeup_routine = 1;
4797 					sp->some_taken = 1;
4798 					sctp_m_freem(sp->data);
4799 					sp->length = 0;
4800 					sp->data = NULL;
4801 					sp->tail_mbuf = NULL;
4802 				}
4803 				break;
4804 			} else {
4805 				/* Next one please */
4806 				sp = TAILQ_NEXT(sp, next);
4807 			}
4808 		}		/* End while */
4809 		SCTP_TCB_SEND_UNLOCK(stcb);
4810 	}
4811 	if (do_wakeup_routine) {
4812 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4813 		so = SCTP_INP_SO(stcb->sctp_ep);
4814 		if (!so_locked) {
4815 			atomic_add_int(&stcb->asoc.refcnt, 1);
4816 			SCTP_TCB_UNLOCK(stcb);
4817 			SCTP_SOCKET_LOCK(so, 1);
4818 			SCTP_TCB_LOCK(stcb);
4819 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4820 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4821 				/* assoc was freed while we were unlocked */
4822 				SCTP_SOCKET_UNLOCK(so, 1);
4823 				return (ret_sz);
4824 			}
4825 		}
4826 #endif
4827 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4828 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4829 		if (!so_locked) {
4830 			SCTP_SOCKET_UNLOCK(so, 1);
4831 		}
4832 #endif
4833 	}
4834 	return (ret_sz);
4835 }
4836 
4837 /*
4838  * checks to see if the given address, sa, is one that is currently known by
4839  * the kernel note: can't distinguish the same address on multiple interfaces
4840  * and doesn't handle multiple addresses with different zone/scope id's note:
4841  * ifa_ifwithaddr() compares the entire sockaddr struct
4842  */
4843 struct sctp_ifa *
4844 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4845     int holds_lock)
4846 {
4847 	struct sctp_laddr *laddr;
4848 
4849 	if (holds_lock == 0) {
4850 		SCTP_INP_RLOCK(inp);
4851 	}
4852 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4853 		if (laddr->ifa == NULL)
4854 			continue;
4855 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4856 			continue;
4857 		if (addr->sa_family == AF_INET) {
4858 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4859 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4860 				/* found him. */
4861 				if (holds_lock == 0) {
4862 					SCTP_INP_RUNLOCK(inp);
4863 				}
4864 				return (laddr->ifa);
4865 				break;
4866 			}
4867 		}
4868 #ifdef INET6
4869 		if (addr->sa_family == AF_INET6) {
4870 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4871 			    &laddr->ifa->address.sin6)) {
4872 				/* found him. */
4873 				if (holds_lock == 0) {
4874 					SCTP_INP_RUNLOCK(inp);
4875 				}
4876 				return (laddr->ifa);
4877 				break;
4878 			}
4879 		}
4880 #endif
4881 	}
4882 	if (holds_lock == 0) {
4883 		SCTP_INP_RUNLOCK(inp);
4884 	}
4885 	return (NULL);
4886 }
4887 
4888 uint32_t
4889 sctp_get_ifa_hash_val(struct sockaddr *addr)
4890 {
4891 	if (addr->sa_family == AF_INET) {
4892 		struct sockaddr_in *sin;
4893 
4894 		sin = (struct sockaddr_in *)addr;
4895 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4896 	} else if (addr->sa_family == AF_INET6) {
4897 		struct sockaddr_in6 *sin6;
4898 		uint32_t hash_of_addr;
4899 
4900 		sin6 = (struct sockaddr_in6 *)addr;
4901 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4902 		    sin6->sin6_addr.s6_addr32[1] +
4903 		    sin6->sin6_addr.s6_addr32[2] +
4904 		    sin6->sin6_addr.s6_addr32[3]);
4905 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4906 		return (hash_of_addr);
4907 	}
4908 	return (0);
4909 }
4910 
4911 struct sctp_ifa *
4912 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4913 {
4914 	struct sctp_ifa *sctp_ifap;
4915 	struct sctp_vrf *vrf;
4916 	struct sctp_ifalist *hash_head;
4917 	uint32_t hash_of_addr;
4918 
4919 	if (holds_lock == 0)
4920 		SCTP_IPI_ADDR_RLOCK();
4921 
4922 	vrf = sctp_find_vrf(vrf_id);
4923 	if (vrf == NULL) {
4924 stage_right:
4925 		if (holds_lock == 0)
4926 			SCTP_IPI_ADDR_RUNLOCK();
4927 		return (NULL);
4928 	}
4929 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4930 
4931 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4932 	if (hash_head == NULL) {
4933 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4934 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4935 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4936 		sctp_print_address(addr);
4937 		SCTP_PRINTF("No such bucket for address\n");
4938 		if (holds_lock == 0)
4939 			SCTP_IPI_ADDR_RUNLOCK();
4940 
4941 		return (NULL);
4942 	}
4943 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4944 		if (sctp_ifap == NULL) {
4945 #ifdef INVARIANTS
4946 			panic("Huh LIST_FOREACH corrupt");
4947 			goto stage_right;
4948 #else
4949 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4950 			goto stage_right;
4951 #endif
4952 		}
4953 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4954 			continue;
4955 		if (addr->sa_family == AF_INET) {
4956 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4957 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4958 				/* found him. */
4959 				if (holds_lock == 0)
4960 					SCTP_IPI_ADDR_RUNLOCK();
4961 				return (sctp_ifap);
4962 				break;
4963 			}
4964 		}
4965 #ifdef INET6
4966 		if (addr->sa_family == AF_INET6) {
4967 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4968 			    &sctp_ifap->address.sin6)) {
4969 				/* found him. */
4970 				if (holds_lock == 0)
4971 					SCTP_IPI_ADDR_RUNLOCK();
4972 				return (sctp_ifap);
4973 				break;
4974 			}
4975 		}
4976 #endif
4977 	}
4978 	if (holds_lock == 0)
4979 		SCTP_IPI_ADDR_RUNLOCK();
4980 	return (NULL);
4981 }
4982 
4983 static void
4984 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4985     uint32_t rwnd_req)
4986 {
4987 	/* User pulled some data, do we need a rwnd update? */
4988 	int r_unlocked = 0;
4989 	uint32_t dif, rwnd;
4990 	struct socket *so = NULL;
4991 
4992 	if (stcb == NULL)
4993 		return;
4994 
4995 	atomic_add_int(&stcb->asoc.refcnt, 1);
4996 
4997 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4998 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4999 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5000 		/* Pre-check If we are freeing no update */
5001 		goto no_lock;
5002 	}
5003 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5004 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5005 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5006 		goto out;
5007 	}
5008 	so = stcb->sctp_socket;
5009 	if (so == NULL) {
5010 		goto out;
5011 	}
5012 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5013 	/* Have you have freed enough to look */
5014 	*freed_so_far = 0;
5015 	/* Yep, its worth a look and the lock overhead */
5016 
5017 	/* Figure out what the rwnd would be */
5018 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5019 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5020 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5021 	} else {
5022 		dif = 0;
5023 	}
5024 	if (dif >= rwnd_req) {
5025 		if (hold_rlock) {
5026 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5027 			r_unlocked = 1;
5028 		}
5029 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5030 			/*
5031 			 * One last check before we allow the guy possibly
5032 			 * to get in. There is a race, where the guy has not
5033 			 * reached the gate. In that case
5034 			 */
5035 			goto out;
5036 		}
5037 		SCTP_TCB_LOCK(stcb);
5038 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5039 			/* No reports here */
5040 			SCTP_TCB_UNLOCK(stcb);
5041 			goto out;
5042 		}
5043 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5044 		/*
5045 		 * EY if nr_sacks used then send an nr-sack , a sack
5046 		 * otherwise
5047 		 */
5048 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
5049 			sctp_send_nr_sack(stcb);
5050 		else
5051 			sctp_send_sack(stcb);
5052 
5053 		sctp_chunk_output(stcb->sctp_ep, stcb,
5054 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5055 		/* make sure no timer is running */
5056 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5057 		SCTP_TCB_UNLOCK(stcb);
5058 	} else {
5059 		/* Update how much we have pending */
5060 		stcb->freed_by_sorcv_sincelast = dif;
5061 	}
5062 out:
5063 	if (so && r_unlocked && hold_rlock) {
5064 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5065 	}
5066 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5067 no_lock:
5068 	atomic_add_int(&stcb->asoc.refcnt, -1);
5069 	return;
5070 }
5071 
5072 int
5073 sctp_sorecvmsg(struct socket *so,
5074     struct uio *uio,
5075     struct mbuf **mp,
5076     struct sockaddr *from,
5077     int fromlen,
5078     int *msg_flags,
5079     struct sctp_sndrcvinfo *sinfo,
5080     int filling_sinfo)
5081 {
5082 	/*
5083 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5084 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5085 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5086 	 * On the way out we may send out any combination of:
5087 	 * MSG_NOTIFICATION MSG_EOR
5088 	 *
5089 	 */
5090 	struct sctp_inpcb *inp = NULL;
5091 	int my_len = 0;
5092 	int cp_len = 0, error = 0;
5093 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5094 	struct mbuf *m = NULL, *embuf = NULL;
5095 	struct sctp_tcb *stcb = NULL;
5096 	int wakeup_read_socket = 0;
5097 	int freecnt_applied = 0;
5098 	int out_flags = 0, in_flags = 0;
5099 	int block_allowed = 1;
5100 	uint32_t freed_so_far = 0;
5101 	uint32_t copied_so_far = 0;
5102 	int in_eeor_mode = 0;
5103 	int no_rcv_needed = 0;
5104 	uint32_t rwnd_req = 0;
5105 	int hold_sblock = 0;
5106 	int hold_rlock = 0;
5107 	int slen = 0;
5108 	uint32_t held_length = 0;
5109 	int sockbuf_lock = 0;
5110 
5111 	if (uio == NULL) {
5112 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5113 		return (EINVAL);
5114 	}
5115 	if (msg_flags) {
5116 		in_flags = *msg_flags;
5117 		if (in_flags & MSG_PEEK)
5118 			SCTP_STAT_INCR(sctps_read_peeks);
5119 	} else {
5120 		in_flags = 0;
5121 	}
5122 	slen = uio->uio_resid;
5123 
5124 	/* Pull in and set up our int flags */
5125 	if (in_flags & MSG_OOB) {
5126 		/* Out of band's NOT supported */
5127 		return (EOPNOTSUPP);
5128 	}
5129 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5130 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5131 		return (EINVAL);
5132 	}
5133 	if ((in_flags & (MSG_DONTWAIT
5134 	    | MSG_NBIO
5135 	    )) ||
5136 	    SCTP_SO_IS_NBIO(so)) {
5137 		block_allowed = 0;
5138 	}
5139 	/* setup the endpoint */
5140 	inp = (struct sctp_inpcb *)so->so_pcb;
5141 	if (inp == NULL) {
5142 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5143 		return (EFAULT);
5144 	}
5145 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5146 	/* Must be at least a MTU's worth */
5147 	if (rwnd_req < SCTP_MIN_RWND)
5148 		rwnd_req = SCTP_MIN_RWND;
5149 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5150 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5151 		sctp_misc_ints(SCTP_SORECV_ENTER,
5152 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5153 	}
5154 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5155 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5156 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5157 	}
5158 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5159 	sockbuf_lock = 1;
5160 	if (error) {
5161 		goto release_unlocked;
5162 	}
5163 restart:
5164 
5165 
5166 restart_nosblocks:
5167 	if (hold_sblock == 0) {
5168 		SOCKBUF_LOCK(&so->so_rcv);
5169 		hold_sblock = 1;
5170 	}
5171 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5172 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5173 		goto out;
5174 	}
5175 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5176 		if (so->so_error) {
5177 			error = so->so_error;
5178 			if ((in_flags & MSG_PEEK) == 0)
5179 				so->so_error = 0;
5180 			goto out;
5181 		} else {
5182 			if (so->so_rcv.sb_cc == 0) {
5183 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5184 				/* indicate EOF */
5185 				error = 0;
5186 				goto out;
5187 			}
5188 		}
5189 	}
5190 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5191 		/* we need to wait for data */
5192 		if ((so->so_rcv.sb_cc == 0) &&
5193 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5194 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5195 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5196 				/*
5197 				 * For active open side clear flags for
5198 				 * re-use passive open is blocked by
5199 				 * connect.
5200 				 */
5201 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5202 					/*
5203 					 * You were aborted, passive side
5204 					 * always hits here
5205 					 */
5206 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5207 					error = ECONNRESET;
5208 					/*
5209 					 * You get this once if you are
5210 					 * active open side
5211 					 */
5212 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5213 						/*
5214 						 * Remove flag if on the
5215 						 * active open side
5216 						 */
5217 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5218 					}
5219 				}
5220 				so->so_state &= ~(SS_ISCONNECTING |
5221 				    SS_ISDISCONNECTING |
5222 				    SS_ISCONFIRMING |
5223 				    SS_ISCONNECTED);
5224 				if (error == 0) {
5225 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5226 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5227 						error = ENOTCONN;
5228 					} else {
5229 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5230 					}
5231 				}
5232 				goto out;
5233 			}
5234 		}
5235 		error = sbwait(&so->so_rcv);
5236 		if (error) {
5237 			goto out;
5238 		}
5239 		held_length = 0;
5240 		goto restart_nosblocks;
5241 	} else if (so->so_rcv.sb_cc == 0) {
5242 		if (so->so_error) {
5243 			error = so->so_error;
5244 			if ((in_flags & MSG_PEEK) == 0)
5245 				so->so_error = 0;
5246 		} else {
5247 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5248 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5249 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5250 					/*
5251 					 * For active open side clear flags
5252 					 * for re-use passive open is
5253 					 * blocked by connect.
5254 					 */
5255 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5256 						/*
5257 						 * You were aborted, passive
5258 						 * side always hits here
5259 						 */
5260 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5261 						error = ECONNRESET;
5262 						/*
5263 						 * You get this once if you
5264 						 * are active open side
5265 						 */
5266 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5267 							/*
5268 							 * Remove flag if on
5269 							 * the active open
5270 							 * side
5271 							 */
5272 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5273 						}
5274 					}
5275 					so->so_state &= ~(SS_ISCONNECTING |
5276 					    SS_ISDISCONNECTING |
5277 					    SS_ISCONFIRMING |
5278 					    SS_ISCONNECTED);
5279 					if (error == 0) {
5280 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5281 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5282 							error = ENOTCONN;
5283 						} else {
5284 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5285 						}
5286 					}
5287 					goto out;
5288 				}
5289 			}
5290 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5291 			error = EWOULDBLOCK;
5292 		}
5293 		goto out;
5294 	}
5295 	if (hold_sblock == 1) {
5296 		SOCKBUF_UNLOCK(&so->so_rcv);
5297 		hold_sblock = 0;
5298 	}
5299 	/* we possibly have data we can read */
5300 	/* sa_ignore FREED_MEMORY */
5301 	control = TAILQ_FIRST(&inp->read_queue);
5302 	if (control == NULL) {
5303 		/*
5304 		 * This could be happening since the appender did the
5305 		 * increment but as not yet did the tailq insert onto the
5306 		 * read_queue
5307 		 */
5308 		if (hold_rlock == 0) {
5309 			SCTP_INP_READ_LOCK(inp);
5310 			hold_rlock = 1;
5311 		}
5312 		control = TAILQ_FIRST(&inp->read_queue);
5313 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5314 #ifdef INVARIANTS
5315 			panic("Huh, its non zero and nothing on control?");
5316 #endif
5317 			so->so_rcv.sb_cc = 0;
5318 		}
5319 		SCTP_INP_READ_UNLOCK(inp);
5320 		hold_rlock = 0;
5321 		goto restart;
5322 	}
5323 	if ((control->length == 0) &&
5324 	    (control->do_not_ref_stcb)) {
5325 		/*
5326 		 * Clean up code for freeing assoc that left behind a
5327 		 * pdapi.. maybe a peer in EEOR that just closed after
5328 		 * sending and never indicated a EOR.
5329 		 */
5330 		if (hold_rlock == 0) {
5331 			hold_rlock = 1;
5332 			SCTP_INP_READ_LOCK(inp);
5333 		}
5334 		control->held_length = 0;
5335 		if (control->data) {
5336 			/* Hmm there is data here .. fix */
5337 			struct mbuf *m_tmp;
5338 			int cnt = 0;
5339 
5340 			m_tmp = control->data;
5341 			while (m_tmp) {
5342 				cnt += SCTP_BUF_LEN(m_tmp);
5343 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5344 					control->tail_mbuf = m_tmp;
5345 					control->end_added = 1;
5346 				}
5347 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5348 			}
5349 			control->length = cnt;
5350 		} else {
5351 			/* remove it */
5352 			TAILQ_REMOVE(&inp->read_queue, control, next);
5353 			/* Add back any hiddend data */
5354 			sctp_free_remote_addr(control->whoFrom);
5355 			sctp_free_a_readq(stcb, control);
5356 		}
5357 		if (hold_rlock) {
5358 			hold_rlock = 0;
5359 			SCTP_INP_READ_UNLOCK(inp);
5360 		}
5361 		goto restart;
5362 	}
5363 	if (control->length == 0) {
5364 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5365 		    (filling_sinfo)) {
5366 			/* find a more suitable one then this */
5367 			ctl = TAILQ_NEXT(control, next);
5368 			while (ctl) {
5369 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5370 				    (ctl->some_taken ||
5371 				    (ctl->spec_flags & M_NOTIFICATION) ||
5372 				    ((ctl->do_not_ref_stcb == 0) &&
5373 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5374 				    ) {
5375 					/*-
5376 					 * If we have a different TCB next, and there is data
5377 					 * present. If we have already taken some (pdapi), OR we can
5378 					 * ref the tcb and no delivery as started on this stream, we
5379 					 * take it. Note we allow a notification on a different
5380 					 * assoc to be delivered..
5381 					 */
5382 					control = ctl;
5383 					goto found_one;
5384 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5385 					    (ctl->length) &&
5386 					    ((ctl->some_taken) ||
5387 					    ((ctl->do_not_ref_stcb == 0) &&
5388 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5389 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5390 				    ) {
5391 					/*-
5392 					 * If we have the same tcb, and there is data present, and we
5393 					 * have the strm interleave feature present. Then if we have
5394 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5395 					 * not started a delivery for this stream, we can take it.
5396 					 * Note we do NOT allow a notificaiton on the same assoc to
5397 					 * be delivered.
5398 					 */
5399 					control = ctl;
5400 					goto found_one;
5401 				}
5402 				ctl = TAILQ_NEXT(ctl, next);
5403 			}
5404 		}
5405 		/*
5406 		 * if we reach here, not suitable replacement is available
5407 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5408 		 * into the our held count, and its time to sleep again.
5409 		 */
5410 		held_length = so->so_rcv.sb_cc;
5411 		control->held_length = so->so_rcv.sb_cc;
5412 		goto restart;
5413 	}
5414 	/* Clear the held length since there is something to read */
5415 	control->held_length = 0;
5416 	if (hold_rlock) {
5417 		SCTP_INP_READ_UNLOCK(inp);
5418 		hold_rlock = 0;
5419 	}
5420 found_one:
5421 	/*
5422 	 * If we reach here, control has a some data for us to read off.
5423 	 * Note that stcb COULD be NULL.
5424 	 */
5425 	control->some_taken++;
5426 	if (hold_sblock) {
5427 		SOCKBUF_UNLOCK(&so->so_rcv);
5428 		hold_sblock = 0;
5429 	}
5430 	stcb = control->stcb;
5431 	if (stcb) {
5432 		if ((control->do_not_ref_stcb == 0) &&
5433 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5434 			if (freecnt_applied == 0)
5435 				stcb = NULL;
5436 		} else if (control->do_not_ref_stcb == 0) {
5437 			/* you can't free it on me please */
5438 			/*
5439 			 * The lock on the socket buffer protects us so the
5440 			 * free code will stop. But since we used the
5441 			 * socketbuf lock and the sender uses the tcb_lock
5442 			 * to increment, we need to use the atomic add to
5443 			 * the refcnt
5444 			 */
5445 			if (freecnt_applied) {
5446 #ifdef INVARIANTS
5447 				panic("refcnt already incremented");
5448 #else
5449 				printf("refcnt already incremented?\n");
5450 #endif
5451 			} else {
5452 				atomic_add_int(&stcb->asoc.refcnt, 1);
5453 				freecnt_applied = 1;
5454 			}
5455 			/*
5456 			 * Setup to remember how much we have not yet told
5457 			 * the peer our rwnd has opened up. Note we grab the
5458 			 * value from the tcb from last time. Note too that
5459 			 * sack sending clears this when a sack is sent,
5460 			 * which is fine. Once we hit the rwnd_req, we then
5461 			 * will go to the sctp_user_rcvd() that will not
5462 			 * lock until it KNOWs it MUST send a WUP-SACK.
5463 			 */
5464 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5465 			stcb->freed_by_sorcv_sincelast = 0;
5466 		}
5467 	}
5468 	if (stcb &&
5469 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5470 	    control->do_not_ref_stcb == 0) {
5471 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5472 	}
5473 	/* First lets get off the sinfo and sockaddr info */
5474 	if ((sinfo) && filling_sinfo) {
5475 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5476 		nxt = TAILQ_NEXT(control, next);
5477 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5478 			struct sctp_extrcvinfo *s_extra;
5479 
5480 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5481 			if ((nxt) &&
5482 			    (nxt->length)) {
5483 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5484 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5485 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5486 				}
5487 				if (nxt->spec_flags & M_NOTIFICATION) {
5488 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5489 				}
5490 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5491 				s_extra->sreinfo_next_length = nxt->length;
5492 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5493 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5494 				if (nxt->tail_mbuf != NULL) {
5495 					if (nxt->end_added) {
5496 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5497 					}
5498 				}
5499 			} else {
5500 				/*
5501 				 * we explicitly 0 this, since the memcpy
5502 				 * got some other things beyond the older
5503 				 * sinfo_ that is on the control's structure
5504 				 * :-D
5505 				 */
5506 				nxt = NULL;
5507 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5508 				s_extra->sreinfo_next_aid = 0;
5509 				s_extra->sreinfo_next_length = 0;
5510 				s_extra->sreinfo_next_ppid = 0;
5511 				s_extra->sreinfo_next_stream = 0;
5512 			}
5513 		}
5514 		/*
5515 		 * update off the real current cum-ack, if we have an stcb.
5516 		 */
5517 		if ((control->do_not_ref_stcb == 0) && stcb)
5518 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5519 		/*
5520 		 * mask off the high bits, we keep the actual chunk bits in
5521 		 * there.
5522 		 */
5523 		sinfo->sinfo_flags &= 0x00ff;
5524 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5525 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5526 		}
5527 	}
5528 #ifdef SCTP_ASOCLOG_OF_TSNS
5529 	{
5530 		int index, newindex;
5531 		struct sctp_pcbtsn_rlog *entry;
5532 
5533 		do {
5534 			index = inp->readlog_index;
5535 			newindex = index + 1;
5536 			if (newindex >= SCTP_READ_LOG_SIZE) {
5537 				newindex = 0;
5538 			}
5539 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5540 		entry = &inp->readlog[index];
5541 		entry->vtag = control->sinfo_assoc_id;
5542 		entry->strm = control->sinfo_stream;
5543 		entry->seq = control->sinfo_ssn;
5544 		entry->sz = control->length;
5545 		entry->flgs = control->sinfo_flags;
5546 	}
5547 #endif
5548 	if (fromlen && from) {
5549 		struct sockaddr *to;
5550 
5551 #ifdef INET
5552 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5553 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5554 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5555 #else
5556 		/* No AF_INET use AF_INET6 */
5557 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5558 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5559 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5560 #endif
5561 
5562 		to = from;
5563 #if defined(INET) && defined(INET6)
5564 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5565 		    (to->sa_family == AF_INET) &&
5566 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5567 			struct sockaddr_in *sin;
5568 			struct sockaddr_in6 sin6;
5569 
5570 			sin = (struct sockaddr_in *)to;
5571 			bzero(&sin6, sizeof(sin6));
5572 			sin6.sin6_family = AF_INET6;
5573 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5574 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5575 			bcopy(&sin->sin_addr,
5576 			    &sin6.sin6_addr.s6_addr32[3],
5577 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5578 			sin6.sin6_port = sin->sin_port;
5579 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5580 		}
5581 #endif
5582 #if defined(INET6)
5583 		{
5584 			struct sockaddr_in6 lsa6, *to6;
5585 
5586 			to6 = (struct sockaddr_in6 *)to;
5587 			sctp_recover_scope_mac(to6, (&lsa6));
5588 		}
5589 #endif
5590 	}
5591 	/* now copy out what data we can */
5592 	if (mp == NULL) {
5593 		/* copy out each mbuf in the chain up to length */
5594 get_more_data:
5595 		m = control->data;
5596 		while (m) {
5597 			/* Move out all we can */
5598 			cp_len = (int)uio->uio_resid;
5599 			my_len = (int)SCTP_BUF_LEN(m);
5600 			if (cp_len > my_len) {
5601 				/* not enough in this buf */
5602 				cp_len = my_len;
5603 			}
5604 			if (hold_rlock) {
5605 				SCTP_INP_READ_UNLOCK(inp);
5606 				hold_rlock = 0;
5607 			}
5608 			if (cp_len > 0)
5609 				error = uiomove(mtod(m, char *), cp_len, uio);
5610 			/* re-read */
5611 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5612 				goto release;
5613 			}
5614 			if ((control->do_not_ref_stcb == 0) && stcb &&
5615 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5616 				no_rcv_needed = 1;
5617 			}
5618 			if (error) {
5619 				/* error we are out of here */
5620 				goto release;
5621 			}
5622 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5623 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5624 			    ((control->end_added == 0) ||
5625 			    (control->end_added &&
5626 			    (TAILQ_NEXT(control, next) == NULL)))
5627 			    ) {
5628 				SCTP_INP_READ_LOCK(inp);
5629 				hold_rlock = 1;
5630 			}
5631 			if (cp_len == SCTP_BUF_LEN(m)) {
5632 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5633 				    (control->end_added)) {
5634 					out_flags |= MSG_EOR;
5635 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5636 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5637 				}
5638 				if (control->spec_flags & M_NOTIFICATION) {
5639 					out_flags |= MSG_NOTIFICATION;
5640 				}
5641 				/* we ate up the mbuf */
5642 				if (in_flags & MSG_PEEK) {
5643 					/* just looking */
5644 					m = SCTP_BUF_NEXT(m);
5645 					copied_so_far += cp_len;
5646 				} else {
5647 					/* dispose of the mbuf */
5648 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5649 						sctp_sblog(&so->so_rcv,
5650 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5651 					}
5652 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5653 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5654 						sctp_sblog(&so->so_rcv,
5655 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5656 					}
5657 					embuf = m;
5658 					copied_so_far += cp_len;
5659 					freed_so_far += cp_len;
5660 					freed_so_far += MSIZE;
5661 					atomic_subtract_int(&control->length, cp_len);
5662 					control->data = sctp_m_free(m);
5663 					m = control->data;
5664 					/*
5665 					 * been through it all, must hold sb
5666 					 * lock ok to null tail
5667 					 */
5668 					if (control->data == NULL) {
5669 #ifdef INVARIANTS
5670 						if ((control->end_added == 0) ||
5671 						    (TAILQ_NEXT(control, next) == NULL)) {
5672 							/*
5673 							 * If the end is not
5674 							 * added, OR the
5675 							 * next is NOT null
5676 							 * we MUST have the
5677 							 * lock.
5678 							 */
5679 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5680 								panic("Hmm we don't own the lock?");
5681 							}
5682 						}
5683 #endif
5684 						control->tail_mbuf = NULL;
5685 #ifdef INVARIANTS
5686 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5687 							panic("end_added, nothing left and no MSG_EOR");
5688 						}
5689 #endif
5690 					}
5691 				}
5692 			} else {
5693 				/* Do we need to trim the mbuf? */
5694 				if (control->spec_flags & M_NOTIFICATION) {
5695 					out_flags |= MSG_NOTIFICATION;
5696 				}
5697 				if ((in_flags & MSG_PEEK) == 0) {
5698 					SCTP_BUF_RESV_UF(m, cp_len);
5699 					SCTP_BUF_LEN(m) -= cp_len;
5700 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5701 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5702 					}
5703 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5704 					if ((control->do_not_ref_stcb == 0) &&
5705 					    stcb) {
5706 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5707 					}
5708 					copied_so_far += cp_len;
5709 					embuf = m;
5710 					freed_so_far += cp_len;
5711 					freed_so_far += MSIZE;
5712 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5713 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5714 						    SCTP_LOG_SBRESULT, 0);
5715 					}
5716 					atomic_subtract_int(&control->length, cp_len);
5717 				} else {
5718 					copied_so_far += cp_len;
5719 				}
5720 			}
5721 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5722 				break;
5723 			}
5724 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5725 			    (control->do_not_ref_stcb == 0) &&
5726 			    (freed_so_far >= rwnd_req)) {
5727 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5728 			}
5729 		}		/* end while(m) */
5730 		/*
5731 		 * At this point we have looked at it all and we either have
5732 		 * a MSG_EOR/or read all the user wants... <OR>
5733 		 * control->length == 0.
5734 		 */
5735 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5736 			/* we are done with this control */
5737 			if (control->length == 0) {
5738 				if (control->data) {
5739 #ifdef INVARIANTS
5740 					panic("control->data not null at read eor?");
5741 #else
5742 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5743 					sctp_m_freem(control->data);
5744 					control->data = NULL;
5745 #endif
5746 				}
5747 		done_with_control:
5748 				if (TAILQ_NEXT(control, next) == NULL) {
5749 					/*
5750 					 * If we don't have a next we need a
5751 					 * lock, if there is a next
5752 					 * interrupt is filling ahead of us
5753 					 * and we don't need a lock to
5754 					 * remove this guy (which is the
5755 					 * head of the queue).
5756 					 */
5757 					if (hold_rlock == 0) {
5758 						SCTP_INP_READ_LOCK(inp);
5759 						hold_rlock = 1;
5760 					}
5761 				}
5762 				TAILQ_REMOVE(&inp->read_queue, control, next);
5763 				/* Add back any hiddend data */
5764 				if (control->held_length) {
5765 					held_length = 0;
5766 					control->held_length = 0;
5767 					wakeup_read_socket = 1;
5768 				}
5769 				if (control->aux_data) {
5770 					sctp_m_free(control->aux_data);
5771 					control->aux_data = NULL;
5772 				}
5773 				no_rcv_needed = control->do_not_ref_stcb;
5774 				sctp_free_remote_addr(control->whoFrom);
5775 				control->data = NULL;
5776 				sctp_free_a_readq(stcb, control);
5777 				control = NULL;
5778 				if ((freed_so_far >= rwnd_req) &&
5779 				    (no_rcv_needed == 0))
5780 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5781 
5782 			} else {
5783 				/*
5784 				 * The user did not read all of this
5785 				 * message, turn off the returned MSG_EOR
5786 				 * since we are leaving more behind on the
5787 				 * control to read.
5788 				 */
5789 #ifdef INVARIANTS
5790 				if (control->end_added &&
5791 				    (control->data == NULL) &&
5792 				    (control->tail_mbuf == NULL)) {
5793 					panic("Gak, control->length is corrupt?");
5794 				}
5795 #endif
5796 				no_rcv_needed = control->do_not_ref_stcb;
5797 				out_flags &= ~MSG_EOR;
5798 			}
5799 		}
5800 		if (out_flags & MSG_EOR) {
5801 			goto release;
5802 		}
5803 		if ((uio->uio_resid == 0) ||
5804 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5805 		    ) {
5806 			goto release;
5807 		}
5808 		/*
5809 		 * If I hit here the receiver wants more and this message is
5810 		 * NOT done (pd-api). So two questions. Can we block? if not
5811 		 * we are done. Did the user NOT set MSG_WAITALL?
5812 		 */
5813 		if (block_allowed == 0) {
5814 			goto release;
5815 		}
5816 		/*
5817 		 * We need to wait for more data a few things: - We don't
5818 		 * sbunlock() so we don't get someone else reading. - We
5819 		 * must be sure to account for the case where what is added
5820 		 * is NOT to our control when we wakeup.
5821 		 */
5822 
5823 		/*
5824 		 * Do we need to tell the transport a rwnd update might be
5825 		 * needed before we go to sleep?
5826 		 */
5827 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5828 		    ((freed_so_far >= rwnd_req) &&
5829 		    (control->do_not_ref_stcb == 0) &&
5830 		    (no_rcv_needed == 0))) {
5831 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5832 		}
5833 wait_some_more:
5834 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5835 			goto release;
5836 		}
5837 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5838 			goto release;
5839 
5840 		if (hold_rlock == 1) {
5841 			SCTP_INP_READ_UNLOCK(inp);
5842 			hold_rlock = 0;
5843 		}
5844 		if (hold_sblock == 0) {
5845 			SOCKBUF_LOCK(&so->so_rcv);
5846 			hold_sblock = 1;
5847 		}
5848 		if ((copied_so_far) && (control->length == 0) &&
5849 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5850 		    ) {
5851 			goto release;
5852 		}
5853 		if (so->so_rcv.sb_cc <= control->held_length) {
5854 			error = sbwait(&so->so_rcv);
5855 			if (error) {
5856 				goto release;
5857 			}
5858 			control->held_length = 0;
5859 		}
5860 		if (hold_sblock) {
5861 			SOCKBUF_UNLOCK(&so->so_rcv);
5862 			hold_sblock = 0;
5863 		}
5864 		if (control->length == 0) {
5865 			/* still nothing here */
5866 			if (control->end_added == 1) {
5867 				/* he aborted, or is done i.e.did a shutdown */
5868 				out_flags |= MSG_EOR;
5869 				if (control->pdapi_aborted) {
5870 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5871 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5872 
5873 					out_flags |= MSG_TRUNC;
5874 				} else {
5875 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5876 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5877 				}
5878 				goto done_with_control;
5879 			}
5880 			if (so->so_rcv.sb_cc > held_length) {
5881 				control->held_length = so->so_rcv.sb_cc;
5882 				held_length = 0;
5883 			}
5884 			goto wait_some_more;
5885 		} else if (control->data == NULL) {
5886 			/*
5887 			 * we must re-sync since data is probably being
5888 			 * added
5889 			 */
5890 			SCTP_INP_READ_LOCK(inp);
5891 			if ((control->length > 0) && (control->data == NULL)) {
5892 				/*
5893 				 * big trouble.. we have the lock and its
5894 				 * corrupt?
5895 				 */
5896 #ifdef INVARIANTS
5897 				panic("Impossible data==NULL length !=0");
5898 #endif
5899 				out_flags |= MSG_EOR;
5900 				out_flags |= MSG_TRUNC;
5901 				control->length = 0;
5902 				SCTP_INP_READ_UNLOCK(inp);
5903 				goto done_with_control;
5904 			}
5905 			SCTP_INP_READ_UNLOCK(inp);
5906 			/* We will fall around to get more data */
5907 		}
5908 		goto get_more_data;
5909 	} else {
5910 		/*-
5911 		 * Give caller back the mbuf chain,
5912 		 * store in uio_resid the length
5913 		 */
5914 		wakeup_read_socket = 0;
5915 		if ((control->end_added == 0) ||
5916 		    (TAILQ_NEXT(control, next) == NULL)) {
5917 			/* Need to get rlock */
5918 			if (hold_rlock == 0) {
5919 				SCTP_INP_READ_LOCK(inp);
5920 				hold_rlock = 1;
5921 			}
5922 		}
5923 		if (control->end_added) {
5924 			out_flags |= MSG_EOR;
5925 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5926 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5927 		}
5928 		if (control->spec_flags & M_NOTIFICATION) {
5929 			out_flags |= MSG_NOTIFICATION;
5930 		}
5931 		uio->uio_resid = control->length;
5932 		*mp = control->data;
5933 		m = control->data;
5934 		while (m) {
5935 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5936 				sctp_sblog(&so->so_rcv,
5937 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5938 			}
5939 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5940 			freed_so_far += SCTP_BUF_LEN(m);
5941 			freed_so_far += MSIZE;
5942 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5943 				sctp_sblog(&so->so_rcv,
5944 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5945 			}
5946 			m = SCTP_BUF_NEXT(m);
5947 		}
5948 		control->data = control->tail_mbuf = NULL;
5949 		control->length = 0;
5950 		if (out_flags & MSG_EOR) {
5951 			/* Done with this control */
5952 			goto done_with_control;
5953 		}
5954 	}
5955 release:
5956 	if (hold_rlock == 1) {
5957 		SCTP_INP_READ_UNLOCK(inp);
5958 		hold_rlock = 0;
5959 	}
5960 	if (hold_sblock == 1) {
5961 		SOCKBUF_UNLOCK(&so->so_rcv);
5962 		hold_sblock = 0;
5963 	}
5964 	sbunlock(&so->so_rcv);
5965 	sockbuf_lock = 0;
5966 
5967 release_unlocked:
5968 	if (hold_sblock) {
5969 		SOCKBUF_UNLOCK(&so->so_rcv);
5970 		hold_sblock = 0;
5971 	}
5972 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5973 		if ((freed_so_far >= rwnd_req) &&
5974 		    (control && (control->do_not_ref_stcb == 0)) &&
5975 		    (no_rcv_needed == 0))
5976 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5977 	}
5978 out:
5979 	if (msg_flags) {
5980 		*msg_flags = out_flags;
5981 	}
5982 	if (((out_flags & MSG_EOR) == 0) &&
5983 	    ((in_flags & MSG_PEEK) == 0) &&
5984 	    (sinfo) &&
5985 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5986 		struct sctp_extrcvinfo *s_extra;
5987 
5988 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5989 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5990 	}
5991 	if (hold_rlock == 1) {
5992 		SCTP_INP_READ_UNLOCK(inp);
5993 		hold_rlock = 0;
5994 	}
5995 	if (hold_sblock) {
5996 		SOCKBUF_UNLOCK(&so->so_rcv);
5997 		hold_sblock = 0;
5998 	}
5999 	if (sockbuf_lock) {
6000 		sbunlock(&so->so_rcv);
6001 	}
6002 	if (freecnt_applied) {
6003 		/*
6004 		 * The lock on the socket buffer protects us so the free
6005 		 * code will stop. But since we used the socketbuf lock and
6006 		 * the sender uses the tcb_lock to increment, we need to use
6007 		 * the atomic add to the refcnt.
6008 		 */
6009 		if (stcb == NULL) {
6010 #ifdef INVARIANTS
6011 			panic("stcb for refcnt has gone NULL?");
6012 			goto stage_left;
6013 #else
6014 			goto stage_left;
6015 #endif
6016 		}
6017 		atomic_add_int(&stcb->asoc.refcnt, -1);
6018 		freecnt_applied = 0;
6019 		/* Save the value back for next time */
6020 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6021 	}
6022 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6023 		if (stcb) {
6024 			sctp_misc_ints(SCTP_SORECV_DONE,
6025 			    freed_so_far,
6026 			    ((uio) ? (slen - uio->uio_resid) : slen),
6027 			    stcb->asoc.my_rwnd,
6028 			    so->so_rcv.sb_cc);
6029 		} else {
6030 			sctp_misc_ints(SCTP_SORECV_DONE,
6031 			    freed_so_far,
6032 			    ((uio) ? (slen - uio->uio_resid) : slen),
6033 			    0,
6034 			    so->so_rcv.sb_cc);
6035 		}
6036 	}
6037 stage_left:
6038 	if (wakeup_read_socket) {
6039 		sctp_sorwakeup(inp, so);
6040 	}
6041 	return (error);
6042 }
6043 
6044 
6045 #ifdef SCTP_MBUF_LOGGING
6046 struct mbuf *
6047 sctp_m_free(struct mbuf *m)
6048 {
6049 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6050 		if (SCTP_BUF_IS_EXTENDED(m)) {
6051 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6052 		}
6053 	}
6054 	return (m_free(m));
6055 }
6056 
6057 void
6058 sctp_m_freem(struct mbuf *mb)
6059 {
6060 	while (mb != NULL)
6061 		mb = sctp_m_free(mb);
6062 }
6063 
6064 #endif
6065 
6066 int
6067 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6068 {
6069 	/*
6070 	 * Given a local address. For all associations that holds the
6071 	 * address, request a peer-set-primary.
6072 	 */
6073 	struct sctp_ifa *ifa;
6074 	struct sctp_laddr *wi;
6075 
6076 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6077 	if (ifa == NULL) {
6078 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6079 		return (EADDRNOTAVAIL);
6080 	}
6081 	/*
6082 	 * Now that we have the ifa we must awaken the iterator with this
6083 	 * message.
6084 	 */
6085 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6086 	if (wi == NULL) {
6087 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6088 		return (ENOMEM);
6089 	}
6090 	/* Now incr the count and int wi structure */
6091 	SCTP_INCR_LADDR_COUNT();
6092 	bzero(wi, sizeof(*wi));
6093 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6094 	wi->ifa = ifa;
6095 	wi->action = SCTP_SET_PRIM_ADDR;
6096 	atomic_add_int(&ifa->refcount, 1);
6097 
6098 	/* Now add it to the work queue */
6099 	SCTP_IPI_ITERATOR_WQ_LOCK();
6100 	/*
6101 	 * Should this really be a tailq? As it is we will process the
6102 	 * newest first :-0
6103 	 */
6104 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6105 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6106 	    (struct sctp_inpcb *)NULL,
6107 	    (struct sctp_tcb *)NULL,
6108 	    (struct sctp_nets *)NULL);
6109 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
6110 	return (0);
6111 }
6112 
6113 
6114 int
6115 sctp_soreceive(struct socket *so,
6116     struct sockaddr **psa,
6117     struct uio *uio,
6118     struct mbuf **mp0,
6119     struct mbuf **controlp,
6120     int *flagsp)
6121 {
6122 	int error, fromlen;
6123 	uint8_t sockbuf[256];
6124 	struct sockaddr *from;
6125 	struct sctp_extrcvinfo sinfo;
6126 	int filling_sinfo = 1;
6127 	struct sctp_inpcb *inp;
6128 
6129 	inp = (struct sctp_inpcb *)so->so_pcb;
6130 	/* pickup the assoc we are reading from */
6131 	if (inp == NULL) {
6132 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6133 		return (EINVAL);
6134 	}
6135 	if ((sctp_is_feature_off(inp,
6136 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6137 	    (controlp == NULL)) {
6138 		/* user does not want the sndrcv ctl */
6139 		filling_sinfo = 0;
6140 	}
6141 	if (psa) {
6142 		from = (struct sockaddr *)sockbuf;
6143 		fromlen = sizeof(sockbuf);
6144 		from->sa_len = 0;
6145 	} else {
6146 		from = NULL;
6147 		fromlen = 0;
6148 	}
6149 
6150 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6151 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6152 	if ((controlp) && (filling_sinfo)) {
6153 		/* copy back the sinfo in a CMSG format */
6154 		if (filling_sinfo)
6155 			*controlp = sctp_build_ctl_nchunk(inp,
6156 			    (struct sctp_sndrcvinfo *)&sinfo);
6157 		else
6158 			*controlp = NULL;
6159 	}
6160 	if (psa) {
6161 		/* copy back the address info */
6162 		if (from && from->sa_len) {
6163 			*psa = sodupsockaddr(from, M_NOWAIT);
6164 		} else {
6165 			*psa = NULL;
6166 		}
6167 	}
6168 	return (error);
6169 }
6170 
6171 
6172 int
6173 sctp_l_soreceive(struct socket *so,
6174     struct sockaddr **name,
6175     struct uio *uio,
6176     char **controlp,
6177     int *controllen,
6178     int *flag)
6179 {
6180 	int error, fromlen;
6181 	uint8_t sockbuf[256];
6182 	struct sockaddr *from;
6183 	struct sctp_extrcvinfo sinfo;
6184 	int filling_sinfo = 1;
6185 	struct sctp_inpcb *inp;
6186 
6187 	inp = (struct sctp_inpcb *)so->so_pcb;
6188 	/* pickup the assoc we are reading from */
6189 	if (inp == NULL) {
6190 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6191 		return (EINVAL);
6192 	}
6193 	if ((sctp_is_feature_off(inp,
6194 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6195 	    (controlp == NULL)) {
6196 		/* user does not want the sndrcv ctl */
6197 		filling_sinfo = 0;
6198 	}
6199 	if (name) {
6200 		from = (struct sockaddr *)sockbuf;
6201 		fromlen = sizeof(sockbuf);
6202 		from->sa_len = 0;
6203 	} else {
6204 		from = NULL;
6205 		fromlen = 0;
6206 	}
6207 
6208 	error = sctp_sorecvmsg(so, uio,
6209 	    (struct mbuf **)NULL,
6210 	    from, fromlen, flag,
6211 	    (struct sctp_sndrcvinfo *)&sinfo,
6212 	    filling_sinfo);
6213 	if ((controlp) && (filling_sinfo)) {
6214 		/*
6215 		 * copy back the sinfo in a CMSG format note that the caller
6216 		 * has reponsibility for freeing the memory.
6217 		 */
6218 		if (filling_sinfo)
6219 			*controlp = sctp_build_ctl_cchunk(inp,
6220 			    controllen,
6221 			    (struct sctp_sndrcvinfo *)&sinfo);
6222 	}
6223 	if (name) {
6224 		/* copy back the address info */
6225 		if (from && from->sa_len) {
6226 			*name = sodupsockaddr(from, M_WAIT);
6227 		} else {
6228 			*name = NULL;
6229 		}
6230 	}
6231 	return (error);
6232 }
6233 
6234 
6235 
6236 
6237 
6238 
6239 
6240 int
6241 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6242     int totaddr, int *error)
6243 {
6244 	int added = 0;
6245 	int i;
6246 	struct sctp_inpcb *inp;
6247 	struct sockaddr *sa;
6248 	size_t incr = 0;
6249 
6250 	sa = addr;
6251 	inp = stcb->sctp_ep;
6252 	*error = 0;
6253 	for (i = 0; i < totaddr; i++) {
6254 		if (sa->sa_family == AF_INET) {
6255 			incr = sizeof(struct sockaddr_in);
6256 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6257 				/* assoc gone no un-lock */
6258 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6259 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6260 				*error = ENOBUFS;
6261 				goto out_now;
6262 			}
6263 			added++;
6264 		} else if (sa->sa_family == AF_INET6) {
6265 			incr = sizeof(struct sockaddr_in6);
6266 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6267 				/* assoc gone no un-lock */
6268 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6269 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6270 				*error = ENOBUFS;
6271 				goto out_now;
6272 			}
6273 			added++;
6274 		}
6275 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6276 	}
6277 out_now:
6278 	return (added);
6279 }
6280 
6281 struct sctp_tcb *
6282 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6283     int *totaddr, int *num_v4, int *num_v6, int *error,
6284     int limit, int *bad_addr)
6285 {
6286 	struct sockaddr *sa;
6287 	struct sctp_tcb *stcb = NULL;
6288 	size_t incr, at, i;
6289 
6290 	at = incr = 0;
6291 	sa = addr;
6292 	*error = *num_v6 = *num_v4 = 0;
6293 	/* account and validate addresses */
6294 	for (i = 0; i < (size_t)*totaddr; i++) {
6295 		if (sa->sa_family == AF_INET) {
6296 			(*num_v4) += 1;
6297 			incr = sizeof(struct sockaddr_in);
6298 			if (sa->sa_len != incr) {
6299 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6300 				*error = EINVAL;
6301 				*bad_addr = 1;
6302 				return (NULL);
6303 			}
6304 		} else if (sa->sa_family == AF_INET6) {
6305 			struct sockaddr_in6 *sin6;
6306 
6307 			sin6 = (struct sockaddr_in6 *)sa;
6308 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6309 				/* Must be non-mapped for connectx */
6310 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6311 				*error = EINVAL;
6312 				*bad_addr = 1;
6313 				return (NULL);
6314 			}
6315 			(*num_v6) += 1;
6316 			incr = sizeof(struct sockaddr_in6);
6317 			if (sa->sa_len != incr) {
6318 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6319 				*error = EINVAL;
6320 				*bad_addr = 1;
6321 				return (NULL);
6322 			}
6323 		} else {
6324 			*totaddr = i;
6325 			/* we are done */
6326 			break;
6327 		}
6328 		SCTP_INP_INCR_REF(inp);
6329 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6330 		if (stcb != NULL) {
6331 			/* Already have or am bring up an association */
6332 			return (stcb);
6333 		} else {
6334 			SCTP_INP_DECR_REF(inp);
6335 		}
6336 		if ((at + incr) > (size_t)limit) {
6337 			*totaddr = i;
6338 			break;
6339 		}
6340 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6341 	}
6342 	return ((struct sctp_tcb *)NULL);
6343 }
6344 
6345 /*
6346  * sctp_bindx(ADD) for one address.
6347  * assumes all arguments are valid/checked by caller.
6348  */
6349 void
6350 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6351     struct sockaddr *sa, sctp_assoc_t assoc_id,
6352     uint32_t vrf_id, int *error, void *p)
6353 {
6354 	struct sockaddr *addr_touse;
6355 
6356 #ifdef INET6
6357 	struct sockaddr_in sin;
6358 
6359 #endif
6360 
6361 	/* see if we're bound all already! */
6362 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6363 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6364 		*error = EINVAL;
6365 		return;
6366 	}
6367 	addr_touse = sa;
6368 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6369 	if (sa->sa_family == AF_INET6) {
6370 		struct sockaddr_in6 *sin6;
6371 
6372 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6373 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6374 			*error = EINVAL;
6375 			return;
6376 		}
6377 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6378 			/* can only bind v6 on PF_INET6 sockets */
6379 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6380 			*error = EINVAL;
6381 			return;
6382 		}
6383 		sin6 = (struct sockaddr_in6 *)addr_touse;
6384 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6385 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6386 			    SCTP_IPV6_V6ONLY(inp)) {
6387 				/* can't bind v4-mapped on PF_INET sockets */
6388 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6389 				*error = EINVAL;
6390 				return;
6391 			}
6392 			in6_sin6_2_sin(&sin, sin6);
6393 			addr_touse = (struct sockaddr *)&sin;
6394 		}
6395 	}
6396 #endif
6397 	if (sa->sa_family == AF_INET) {
6398 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6399 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6400 			*error = EINVAL;
6401 			return;
6402 		}
6403 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6404 		    SCTP_IPV6_V6ONLY(inp)) {
6405 			/* can't bind v4 on PF_INET sockets */
6406 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6407 			*error = EINVAL;
6408 			return;
6409 		}
6410 	}
6411 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6412 		if (p == NULL) {
6413 			/* Can't get proc for Net/Open BSD */
6414 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6415 			*error = EINVAL;
6416 			return;
6417 		}
6418 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6419 		return;
6420 	}
6421 	/*
6422 	 * No locks required here since bind and mgmt_ep_sa all do their own
6423 	 * locking. If we do something for the FIX: below we may need to
6424 	 * lock in that case.
6425 	 */
6426 	if (assoc_id == 0) {
6427 		/* add the address */
6428 		struct sctp_inpcb *lep;
6429 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6430 
6431 		/* validate the incoming port */
6432 		if ((lsin->sin_port != 0) &&
6433 		    (lsin->sin_port != inp->sctp_lport)) {
6434 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6435 			*error = EINVAL;
6436 			return;
6437 		} else {
6438 			/* user specified 0 port, set it to existing port */
6439 			lsin->sin_port = inp->sctp_lport;
6440 		}
6441 
6442 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6443 		if (lep != NULL) {
6444 			/*
6445 			 * We must decrement the refcount since we have the
6446 			 * ep already and are binding. No remove going on
6447 			 * here.
6448 			 */
6449 			SCTP_INP_DECR_REF(lep);
6450 		}
6451 		if (lep == inp) {
6452 			/* already bound to it.. ok */
6453 			return;
6454 		} else if (lep == NULL) {
6455 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6456 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6457 			    SCTP_ADD_IP_ADDRESS,
6458 			    vrf_id, NULL);
6459 		} else {
6460 			*error = EADDRINUSE;
6461 		}
6462 		if (*error)
6463 			return;
6464 	} else {
6465 		/*
6466 		 * FIX: decide whether we allow assoc based bindx
6467 		 */
6468 	}
6469 }
6470 
6471 /*
6472  * sctp_bindx(DELETE) for one address.
6473  * assumes all arguments are valid/checked by caller.
6474  */
6475 void
6476 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6477     struct sockaddr *sa, sctp_assoc_t assoc_id,
6478     uint32_t vrf_id, int *error)
6479 {
6480 	struct sockaddr *addr_touse;
6481 
6482 #ifdef INET6
6483 	struct sockaddr_in sin;
6484 
6485 #endif
6486 
6487 	/* see if we're bound all already! */
6488 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6489 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6490 		*error = EINVAL;
6491 		return;
6492 	}
6493 	addr_touse = sa;
6494 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6495 	if (sa->sa_family == AF_INET6) {
6496 		struct sockaddr_in6 *sin6;
6497 
6498 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6499 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6500 			*error = EINVAL;
6501 			return;
6502 		}
6503 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6504 			/* can only bind v6 on PF_INET6 sockets */
6505 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6506 			*error = EINVAL;
6507 			return;
6508 		}
6509 		sin6 = (struct sockaddr_in6 *)addr_touse;
6510 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6511 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6512 			    SCTP_IPV6_V6ONLY(inp)) {
6513 				/* can't bind mapped-v4 on PF_INET sockets */
6514 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6515 				*error = EINVAL;
6516 				return;
6517 			}
6518 			in6_sin6_2_sin(&sin, sin6);
6519 			addr_touse = (struct sockaddr *)&sin;
6520 		}
6521 	}
6522 #endif
6523 	if (sa->sa_family == AF_INET) {
6524 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6525 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6526 			*error = EINVAL;
6527 			return;
6528 		}
6529 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6530 		    SCTP_IPV6_V6ONLY(inp)) {
6531 			/* can't bind v4 on PF_INET sockets */
6532 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6533 			*error = EINVAL;
6534 			return;
6535 		}
6536 	}
6537 	/*
6538 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6539 	 * below is ever changed we may need to lock before calling
6540 	 * association level binding.
6541 	 */
6542 	if (assoc_id == 0) {
6543 		/* delete the address */
6544 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6545 		    SCTP_DEL_IP_ADDRESS,
6546 		    vrf_id, NULL);
6547 	} else {
6548 		/*
6549 		 * FIX: decide whether we allow assoc based bindx
6550 		 */
6551 	}
6552 }
6553 
6554 /*
6555  * returns the valid local address count for an assoc, taking into account
6556  * all scoping rules
6557  */
6558 int
6559 sctp_local_addr_count(struct sctp_tcb *stcb)
6560 {
6561 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6562 	int ipv4_addr_legal, ipv6_addr_legal;
6563 	struct sctp_vrf *vrf;
6564 	struct sctp_ifn *sctp_ifn;
6565 	struct sctp_ifa *sctp_ifa;
6566 	int count = 0;
6567 
6568 	/* Turn on all the appropriate scopes */
6569 	loopback_scope = stcb->asoc.loopback_scope;
6570 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6571 	local_scope = stcb->asoc.local_scope;
6572 	site_scope = stcb->asoc.site_scope;
6573 	ipv4_addr_legal = ipv6_addr_legal = 0;
6574 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6575 		ipv6_addr_legal = 1;
6576 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6577 			ipv4_addr_legal = 1;
6578 		}
6579 	} else {
6580 		ipv4_addr_legal = 1;
6581 	}
6582 
6583 	SCTP_IPI_ADDR_RLOCK();
6584 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6585 	if (vrf == NULL) {
6586 		/* no vrf, no addresses */
6587 		SCTP_IPI_ADDR_RUNLOCK();
6588 		return (0);
6589 	}
6590 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6591 		/*
6592 		 * bound all case: go through all ifns on the vrf
6593 		 */
6594 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6595 			if ((loopback_scope == 0) &&
6596 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6597 				continue;
6598 			}
6599 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6600 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6601 					continue;
6602 				switch (sctp_ifa->address.sa.sa_family) {
6603 				case AF_INET:
6604 					if (ipv4_addr_legal) {
6605 						struct sockaddr_in *sin;
6606 
6607 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6608 						if (sin->sin_addr.s_addr == 0) {
6609 							/*
6610 							 * skip unspecified
6611 							 * addrs
6612 							 */
6613 							continue;
6614 						}
6615 						if ((ipv4_local_scope == 0) &&
6616 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6617 							continue;
6618 						}
6619 						/* count this one */
6620 						count++;
6621 					} else {
6622 						continue;
6623 					}
6624 					break;
6625 #ifdef INET6
6626 				case AF_INET6:
6627 					if (ipv6_addr_legal) {
6628 						struct sockaddr_in6 *sin6;
6629 
6630 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6631 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6632 							continue;
6633 						}
6634 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6635 							if (local_scope == 0)
6636 								continue;
6637 							if (sin6->sin6_scope_id == 0) {
6638 								if (sa6_recoverscope(sin6) != 0)
6639 									/*
6640 									 *
6641 									 * bad
6642 									 *
6643 									 * li
6644 									 * nk
6645 									 *
6646 									 * loc
6647 									 * al
6648 									 *
6649 									 * add
6650 									 * re
6651 									 * ss
6652 									 * */
6653 									continue;
6654 							}
6655 						}
6656 						if ((site_scope == 0) &&
6657 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6658 							continue;
6659 						}
6660 						/* count this one */
6661 						count++;
6662 					}
6663 					break;
6664 #endif
6665 				default:
6666 					/* TSNH */
6667 					break;
6668 				}
6669 			}
6670 		}
6671 	} else {
6672 		/*
6673 		 * subset bound case
6674 		 */
6675 		struct sctp_laddr *laddr;
6676 
6677 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6678 		    sctp_nxt_addr) {
6679 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6680 				continue;
6681 			}
6682 			/* count this one */
6683 			count++;
6684 		}
6685 	}
6686 	SCTP_IPI_ADDR_RUNLOCK();
6687 	return (count);
6688 }
6689 
6690 #if defined(SCTP_LOCAL_TRACE_BUF)
6691 
6692 void
6693 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6694 {
6695 	uint32_t saveindex, newindex;
6696 
6697 	do {
6698 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6699 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6700 			newindex = 1;
6701 		} else {
6702 			newindex = saveindex + 1;
6703 		}
6704 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6705 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6706 		saveindex = 0;
6707 	}
6708 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6709 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6710 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6711 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6712 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6713 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6714 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6715 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6716 }
6717 
6718 #endif
6719 /* We will need to add support
6720  * to bind the ports and such here
6721  * so we can do UDP tunneling. In
6722  * the mean-time, we return error
6723  */
6724 #include <netinet/udp.h>
6725 #include <netinet/udp_var.h>
6726 #include <sys/proc.h>
6727 #ifdef INET6
6728 #include <netinet6/sctp6_var.h>
6729 #endif
6730 
6731 static void
6732 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6733 {
6734 	struct ip *iph;
6735 	struct mbuf *sp, *last;
6736 	struct udphdr *uhdr;
6737 	uint16_t port = 0, len;
6738 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6739 
6740 	/*
6741 	 * Split out the mbuf chain. Leave the IP header in m, place the
6742 	 * rest in the sp.
6743 	 */
6744 	if ((m->m_flags & M_PKTHDR) == 0) {
6745 		/* Can't handle one that is not a pkt hdr */
6746 		goto out;
6747 	}
6748 	/* pull the src port */
6749 	iph = mtod(m, struct ip *);
6750 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6751 
6752 	port = uhdr->uh_sport;
6753 	sp = m_split(m, off, M_DONTWAIT);
6754 	if (sp == NULL) {
6755 		/* Gak, drop packet, we can't do a split */
6756 		goto out;
6757 	}
6758 	if (sp->m_pkthdr.len < header_size) {
6759 		/* Gak, packet can't have an SCTP header in it - to small */
6760 		m_freem(sp);
6761 		goto out;
6762 	}
6763 	/* ok now pull up the UDP header and SCTP header together */
6764 	sp = m_pullup(sp, header_size);
6765 	if (sp == NULL) {
6766 		/* Gak pullup failed */
6767 		goto out;
6768 	}
6769 	/* trim out the UDP header */
6770 	m_adj(sp, sizeof(struct udphdr));
6771 
6772 	/* Now reconstruct the mbuf chain */
6773 	/* 1) find last one */
6774 	last = m;
6775 	while (last->m_next != NULL) {
6776 		last = last->m_next;
6777 	}
6778 	last->m_next = sp;
6779 	m->m_pkthdr.len += sp->m_pkthdr.len;
6780 	last = m;
6781 	while (last != NULL) {
6782 		last = last->m_next;
6783 	}
6784 	/* Now its ready for sctp_input or sctp6_input */
6785 	iph = mtod(m, struct ip *);
6786 	switch (iph->ip_v) {
6787 	case IPVERSION:
6788 		{
6789 			/* its IPv4 */
6790 			len = SCTP_GET_IPV4_LENGTH(iph);
6791 			len -= sizeof(struct udphdr);
6792 			SCTP_GET_IPV4_LENGTH(iph) = len;
6793 			sctp_input_with_port(m, off, port);
6794 			break;
6795 		}
6796 #ifdef INET6
6797 	case IPV6_VERSION >> 4:
6798 		{
6799 			/* its IPv6 - NOT supported */
6800 			goto out;
6801 			break;
6802 
6803 		}
6804 #endif
6805 	default:
6806 		{
6807 			m_freem(m);
6808 			break;
6809 		}
6810 	}
6811 	return;
6812 out:
6813 	m_freem(m);
6814 }
6815 
6816 void
6817 sctp_over_udp_stop(void)
6818 {
6819 	struct socket *sop;
6820 
6821 	/*
6822 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6823 	 * for writting!
6824 	 */
6825 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6826 		/* Nothing to do */
6827 		return;
6828 	}
6829 	sop = SCTP_BASE_INFO(udp_tun_socket);
6830 	soclose(sop);
6831 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6832 }
6833 int
6834 sctp_over_udp_start(void)
6835 {
6836 	uint16_t port;
6837 	int ret;
6838 	struct sockaddr_in sin;
6839 	struct socket *sop = NULL;
6840 	struct thread *th;
6841 	struct ucred *cred;
6842 
6843 	/*
6844 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6845 	 * for writting!
6846 	 */
6847 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6848 	if (port == 0) {
6849 		/* Must have a port set */
6850 		return (EINVAL);
6851 	}
6852 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6853 		/* Already running -- must stop first */
6854 		return (EALREADY);
6855 	}
6856 	th = curthread;
6857 	cred = th->td_ucred;
6858 	if ((ret = socreate(PF_INET, &sop,
6859 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6860 		return (ret);
6861 	}
6862 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6863 	/* call the special UDP hook */
6864 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6865 	if (ret) {
6866 		goto exit_stage_left;
6867 	}
6868 	/* Ok we have a socket, bind it to the port */
6869 	memset(&sin, 0, sizeof(sin));
6870 	sin.sin_len = sizeof(sin);
6871 	sin.sin_family = AF_INET;
6872 	sin.sin_port = htons(port);
6873 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6874 	if (ret) {
6875 		/* Close up we cant get the port */
6876 exit_stage_left:
6877 		sctp_over_udp_stop();
6878 		return (ret);
6879 	}
6880 	/*
6881 	 * Ok we should now get UDP packets directly to our input routine
6882 	 * sctp_recv_upd_tunneled_packet().
6883 	 */
6884 	return (0);
6885 }
6886