xref: /freebsd/sys/netinet/sctputil.c (revision 5861f9665471e98e544f6fa3ce73c4912229ff82)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 
52 #define NUMBER_OF_MTU_SIZES 18
53 
54 
55 #if defined(__Windows__) && !defined(SCTP_LOCAL_TRACE_BUF)
56 #include "eventrace_netinet.h"
57 #include "sctputil.tmh"		/* this is the file that will be auto
58 				 * generated */
59 #else
60 #ifndef KTR_SCTP
61 #define KTR_SCTP KTR_SUBSYS
62 #endif
63 #endif
64 
65 void
66 sctp_sblog(struct sockbuf *sb,
67     struct sctp_tcb *stcb, int from, int incr)
68 {
69 	struct sctp_cwnd_log sctp_clog;
70 
71 	sctp_clog.x.sb.stcb = stcb;
72 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
73 	if (stcb)
74 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
75 	else
76 		sctp_clog.x.sb.stcb_sbcc = 0;
77 	sctp_clog.x.sb.incr = incr;
78 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
79 	    SCTP_LOG_EVENT_SB,
80 	    from,
81 	    sctp_clog.x.misc.log1,
82 	    sctp_clog.x.misc.log2,
83 	    sctp_clog.x.misc.log3,
84 	    sctp_clog.x.misc.log4);
85 }
86 
87 void
88 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
89 {
90 	struct sctp_cwnd_log sctp_clog;
91 
92 	sctp_clog.x.close.inp = (void *)inp;
93 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
94 	if (stcb) {
95 		sctp_clog.x.close.stcb = (void *)stcb;
96 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
97 	} else {
98 		sctp_clog.x.close.stcb = 0;
99 		sctp_clog.x.close.state = 0;
100 	}
101 	sctp_clog.x.close.loc = loc;
102 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
103 	    SCTP_LOG_EVENT_CLOSE,
104 	    0,
105 	    sctp_clog.x.misc.log1,
106 	    sctp_clog.x.misc.log2,
107 	    sctp_clog.x.misc.log3,
108 	    sctp_clog.x.misc.log4);
109 }
110 
111 
112 void
113 rto_logging(struct sctp_nets *net, int from)
114 {
115 	struct sctp_cwnd_log sctp_clog;
116 
117 	memset(&sctp_clog, 0, sizeof(sctp_clog));
118 	sctp_clog.x.rto.net = (void *)net;
119 	sctp_clog.x.rto.rtt = net->prev_rtt;
120 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
121 	    SCTP_LOG_EVENT_RTT,
122 	    from,
123 	    sctp_clog.x.misc.log1,
124 	    sctp_clog.x.misc.log2,
125 	    sctp_clog.x.misc.log3,
126 	    sctp_clog.x.misc.log4);
127 
128 }
129 
130 void
131 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
132 {
133 	struct sctp_cwnd_log sctp_clog;
134 
135 	sctp_clog.x.strlog.stcb = stcb;
136 	sctp_clog.x.strlog.n_tsn = tsn;
137 	sctp_clog.x.strlog.n_sseq = sseq;
138 	sctp_clog.x.strlog.e_tsn = 0;
139 	sctp_clog.x.strlog.e_sseq = 0;
140 	sctp_clog.x.strlog.strm = stream;
141 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
142 	    SCTP_LOG_EVENT_STRM,
143 	    from,
144 	    sctp_clog.x.misc.log1,
145 	    sctp_clog.x.misc.log2,
146 	    sctp_clog.x.misc.log3,
147 	    sctp_clog.x.misc.log4);
148 
149 }
150 
151 void
152 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
153 {
154 	struct sctp_cwnd_log sctp_clog;
155 
156 	sctp_clog.x.nagle.stcb = (void *)stcb;
157 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
158 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
159 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
160 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
161 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
162 	    SCTP_LOG_EVENT_NAGLE,
163 	    action,
164 	    sctp_clog.x.misc.log1,
165 	    sctp_clog.x.misc.log2,
166 	    sctp_clog.x.misc.log3,
167 	    sctp_clog.x.misc.log4);
168 }
169 
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	struct sctp_cwnd_log sctp_clog;
175 
176 	sctp_clog.x.sack.cumack = cumack;
177 	sctp_clog.x.sack.oldcumack = old_cumack;
178 	sctp_clog.x.sack.tsn = tsn;
179 	sctp_clog.x.sack.numGaps = gaps;
180 	sctp_clog.x.sack.numDups = dups;
181 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 	    SCTP_LOG_EVENT_SACK,
183 	    from,
184 	    sctp_clog.x.misc.log1,
185 	    sctp_clog.x.misc.log2,
186 	    sctp_clog.x.misc.log3,
187 	    sctp_clog.x.misc.log4);
188 }
189 
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 	struct sctp_cwnd_log sctp_clog;
194 
195 	memset(&sctp_clog, 0, sizeof(sctp_clog));
196 	sctp_clog.x.map.base = map;
197 	sctp_clog.x.map.cum = cum;
198 	sctp_clog.x.map.high = high;
199 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 	    SCTP_LOG_EVENT_MAP,
201 	    from,
202 	    sctp_clog.x.misc.log1,
203 	    sctp_clog.x.misc.log2,
204 	    sctp_clog.x.misc.log3,
205 	    sctp_clog.x.misc.log4);
206 }
207 
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
210     int from)
211 {
212 	struct sctp_cwnd_log sctp_clog;
213 
214 	memset(&sctp_clog, 0, sizeof(sctp_clog));
215 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
216 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
217 	sctp_clog.x.fr.tsn = tsn;
218 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
219 	    SCTP_LOG_EVENT_FR,
220 	    from,
221 	    sctp_clog.x.misc.log1,
222 	    sctp_clog.x.misc.log2,
223 	    sctp_clog.x.misc.log3,
224 	    sctp_clog.x.misc.log4);
225 
226 }
227 
228 
229 void
230 sctp_log_mb(struct mbuf *m, int from)
231 {
232 	struct sctp_cwnd_log sctp_clog;
233 
234 	sctp_clog.x.mb.mp = m;
235 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
236 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
237 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
238 	if (SCTP_BUF_IS_EXTENDED(m)) {
239 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
240 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
241 	} else {
242 		sctp_clog.x.mb.ext = 0;
243 		sctp_clog.x.mb.refcnt = 0;
244 	}
245 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
246 	    SCTP_LOG_EVENT_MBUF,
247 	    from,
248 	    sctp_clog.x.misc.log1,
249 	    sctp_clog.x.misc.log2,
250 	    sctp_clog.x.misc.log3,
251 	    sctp_clog.x.misc.log4);
252 }
253 
254 
255 void
256 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
257     int from)
258 {
259 	struct sctp_cwnd_log sctp_clog;
260 
261 	if (control == NULL) {
262 		SCTP_PRINTF("Gak log of NULL?\n");
263 		return;
264 	}
265 	sctp_clog.x.strlog.stcb = control->stcb;
266 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
267 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
268 	sctp_clog.x.strlog.strm = control->sinfo_stream;
269 	if (poschk != NULL) {
270 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
271 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
272 	} else {
273 		sctp_clog.x.strlog.e_tsn = 0;
274 		sctp_clog.x.strlog.e_sseq = 0;
275 	}
276 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
277 	    SCTP_LOG_EVENT_STRM,
278 	    from,
279 	    sctp_clog.x.misc.log1,
280 	    sctp_clog.x.misc.log2,
281 	    sctp_clog.x.misc.log3,
282 	    sctp_clog.x.misc.log4);
283 
284 }
285 
286 void
287 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
288 {
289 	struct sctp_cwnd_log sctp_clog;
290 
291 	sctp_clog.x.cwnd.net = net;
292 	if (stcb->asoc.send_queue_cnt > 255)
293 		sctp_clog.x.cwnd.cnt_in_send = 255;
294 	else
295 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
296 	if (stcb->asoc.stream_queue_cnt > 255)
297 		sctp_clog.x.cwnd.cnt_in_str = 255;
298 	else
299 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
300 
301 	if (net) {
302 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
303 		sctp_clog.x.cwnd.inflight = net->flight_size;
304 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
305 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
306 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
307 	}
308 	if (SCTP_CWNDLOG_PRESEND == from) {
309 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
310 	}
311 	sctp_clog.x.cwnd.cwnd_augment = augment;
312 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
313 	    SCTP_LOG_EVENT_CWND,
314 	    from,
315 	    sctp_clog.x.misc.log1,
316 	    sctp_clog.x.misc.log2,
317 	    sctp_clog.x.misc.log3,
318 	    sctp_clog.x.misc.log4);
319 
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp->sctp_socket) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 
365 }
366 
367 void
368 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
369 {
370 	struct sctp_cwnd_log sctp_clog;
371 
372 	memset(&sctp_clog, 0, sizeof(sctp_clog));
373 	sctp_clog.x.cwnd.net = net;
374 	sctp_clog.x.cwnd.cwnd_new_value = error;
375 	sctp_clog.x.cwnd.inflight = net->flight_size;
376 	sctp_clog.x.cwnd.cwnd_augment = burst;
377 	if (stcb->asoc.send_queue_cnt > 255)
378 		sctp_clog.x.cwnd.cnt_in_send = 255;
379 	else
380 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
381 	if (stcb->asoc.stream_queue_cnt > 255)
382 		sctp_clog.x.cwnd.cnt_in_str = 255;
383 	else
384 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
385 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
386 	    SCTP_LOG_EVENT_MAXBURST,
387 	    from,
388 	    sctp_clog.x.misc.log1,
389 	    sctp_clog.x.misc.log2,
390 	    sctp_clog.x.misc.log3,
391 	    sctp_clog.x.misc.log4);
392 
393 }
394 
395 void
396 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
397 {
398 	struct sctp_cwnd_log sctp_clog;
399 
400 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
401 	sctp_clog.x.rwnd.send_size = snd_size;
402 	sctp_clog.x.rwnd.overhead = overhead;
403 	sctp_clog.x.rwnd.new_rwnd = 0;
404 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
405 	    SCTP_LOG_EVENT_RWND,
406 	    from,
407 	    sctp_clog.x.misc.log1,
408 	    sctp_clog.x.misc.log2,
409 	    sctp_clog.x.misc.log3,
410 	    sctp_clog.x.misc.log4);
411 }
412 
413 void
414 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
415 {
416 	struct sctp_cwnd_log sctp_clog;
417 
418 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
419 	sctp_clog.x.rwnd.send_size = flight_size;
420 	sctp_clog.x.rwnd.overhead = overhead;
421 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
422 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
423 	    SCTP_LOG_EVENT_RWND,
424 	    from,
425 	    sctp_clog.x.misc.log1,
426 	    sctp_clog.x.misc.log2,
427 	    sctp_clog.x.misc.log3,
428 	    sctp_clog.x.misc.log4);
429 }
430 
431 void
432 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
433 {
434 	struct sctp_cwnd_log sctp_clog;
435 
436 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
437 	sctp_clog.x.mbcnt.size_change = book;
438 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
439 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
440 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
441 	    SCTP_LOG_EVENT_MBCNT,
442 	    from,
443 	    sctp_clog.x.misc.log1,
444 	    sctp_clog.x.misc.log2,
445 	    sctp_clog.x.misc.log3,
446 	    sctp_clog.x.misc.log4);
447 
448 }
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 
504 }
505 
506 void
507 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
508 {
509 	struct sctp_cwnd_log sctp_clog;
510 
511 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
512 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
513 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
514 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
515 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
516 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
517 	sctp_clog.x.blk.sndlen = sendlen;
518 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
519 	    SCTP_LOG_EVENT_BLOCK,
520 	    from,
521 	    sctp_clog.x.misc.log1,
522 	    sctp_clog.x.misc.log2,
523 	    sctp_clog.x.misc.log3,
524 	    sctp_clog.x.misc.log4);
525 
526 }
527 
528 int
529 sctp_fill_stat_log(void *optval, size_t *optsize)
530 {
531 	/* May need to fix this if ktrdump does not work */
532 	return (0);
533 }
534 
535 #ifdef SCTP_AUDITING_ENABLED
536 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
537 static int sctp_audit_indx = 0;
538 
539 static
540 void
541 sctp_print_audit_report(void)
542 {
543 	int i;
544 	int cnt;
545 
546 	cnt = 0;
547 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
548 		if ((sctp_audit_data[i][0] == 0xe0) &&
549 		    (sctp_audit_data[i][1] == 0x01)) {
550 			cnt = 0;
551 			SCTP_PRINTF("\n");
552 		} else if (sctp_audit_data[i][0] == 0xf0) {
553 			cnt = 0;
554 			SCTP_PRINTF("\n");
555 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
556 		    (sctp_audit_data[i][1] == 0x01)) {
557 			SCTP_PRINTF("\n");
558 			cnt = 0;
559 		}
560 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
561 		    (uint32_t) sctp_audit_data[i][1]);
562 		cnt++;
563 		if ((cnt % 14) == 0)
564 			SCTP_PRINTF("\n");
565 	}
566 	for (i = 0; i < sctp_audit_indx; i++) {
567 		if ((sctp_audit_data[i][0] == 0xe0) &&
568 		    (sctp_audit_data[i][1] == 0x01)) {
569 			cnt = 0;
570 			SCTP_PRINTF("\n");
571 		} else if (sctp_audit_data[i][0] == 0xf0) {
572 			cnt = 0;
573 			SCTP_PRINTF("\n");
574 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
575 		    (sctp_audit_data[i][1] == 0x01)) {
576 			SCTP_PRINTF("\n");
577 			cnt = 0;
578 		}
579 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
580 		    (uint32_t) sctp_audit_data[i][1]);
581 		cnt++;
582 		if ((cnt % 14) == 0)
583 			SCTP_PRINTF("\n");
584 	}
585 	SCTP_PRINTF("\n");
586 }
587 
588 void
589 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
590     struct sctp_nets *net)
591 {
592 	int resend_cnt, tot_out, rep, tot_book_cnt;
593 	struct sctp_nets *lnet;
594 	struct sctp_tmit_chunk *chk;
595 
596 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
597 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
598 	sctp_audit_indx++;
599 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
600 		sctp_audit_indx = 0;
601 	}
602 	if (inp == NULL) {
603 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
604 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
605 		sctp_audit_indx++;
606 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 			sctp_audit_indx = 0;
608 		}
609 		return;
610 	}
611 	if (stcb == NULL) {
612 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
613 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
614 		sctp_audit_indx++;
615 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
616 			sctp_audit_indx = 0;
617 		}
618 		return;
619 	}
620 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
621 	sctp_audit_data[sctp_audit_indx][1] =
622 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
623 	sctp_audit_indx++;
624 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625 		sctp_audit_indx = 0;
626 	}
627 	rep = 0;
628 	tot_book_cnt = 0;
629 	resend_cnt = tot_out = 0;
630 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
631 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
632 			resend_cnt++;
633 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
634 			tot_out += chk->book_size;
635 			tot_book_cnt++;
636 		}
637 	}
638 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
639 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
640 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
641 		sctp_audit_indx++;
642 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 			sctp_audit_indx = 0;
644 		}
645 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
646 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
647 		rep = 1;
648 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
649 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
650 		sctp_audit_data[sctp_audit_indx][1] =
651 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
652 		sctp_audit_indx++;
653 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
654 			sctp_audit_indx = 0;
655 		}
656 	}
657 	if (tot_out != stcb->asoc.total_flight) {
658 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
659 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
660 		sctp_audit_indx++;
661 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
662 			sctp_audit_indx = 0;
663 		}
664 		rep = 1;
665 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
666 		    (int)stcb->asoc.total_flight);
667 		stcb->asoc.total_flight = tot_out;
668 	}
669 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
670 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
671 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
672 		sctp_audit_indx++;
673 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
674 			sctp_audit_indx = 0;
675 		}
676 		rep = 1;
677 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
678 
679 		stcb->asoc.total_flight_count = tot_book_cnt;
680 	}
681 	tot_out = 0;
682 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
683 		tot_out += lnet->flight_size;
684 	}
685 	if (tot_out != stcb->asoc.total_flight) {
686 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
687 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
688 		sctp_audit_indx++;
689 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
690 			sctp_audit_indx = 0;
691 		}
692 		rep = 1;
693 		SCTP_PRINTF("real flight:%d net total was %d\n",
694 		    stcb->asoc.total_flight, tot_out);
695 		/* now corrective action */
696 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
697 
698 			tot_out = 0;
699 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
700 				if ((chk->whoTo == lnet) &&
701 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
702 					tot_out += chk->book_size;
703 				}
704 			}
705 			if (lnet->flight_size != tot_out) {
706 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
707 				    (uint32_t) lnet, lnet->flight_size,
708 				    tot_out);
709 				lnet->flight_size = tot_out;
710 			}
711 		}
712 	}
713 	if (rep) {
714 		sctp_print_audit_report();
715 	}
716 }
717 
718 void
719 sctp_audit_log(uint8_t ev, uint8_t fd)
720 {
721 
722 	sctp_audit_data[sctp_audit_indx][0] = ev;
723 	sctp_audit_data[sctp_audit_indx][1] = fd;
724 	sctp_audit_indx++;
725 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
726 		sctp_audit_indx = 0;
727 	}
728 }
729 
730 #endif
731 
732 /*
733  * a list of sizes based on typical mtu's, used only if next hop size not
734  * returned.
735  */
736 static int sctp_mtu_sizes[] = {
737 	68,
738 	296,
739 	508,
740 	512,
741 	544,
742 	576,
743 	1006,
744 	1492,
745 	1500,
746 	1536,
747 	2002,
748 	2048,
749 	4352,
750 	4464,
751 	8166,
752 	17914,
753 	32000,
754 	65535
755 };
756 
757 void
758 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
759 {
760 	struct sctp_association *asoc;
761 	struct sctp_nets *net;
762 
763 	asoc = &stcb->asoc;
764 
765 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
766 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
767 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
768 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
769 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
770 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
771 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
772 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
773 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
774 	}
775 }
776 
777 int
778 find_next_best_mtu(int totsz)
779 {
780 	int i, perfer;
781 
782 	/*
783 	 * if we are in here we must find the next best fit based on the
784 	 * size of the dg that failed to be sent.
785 	 */
786 	perfer = 0;
787 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
788 		if (totsz < sctp_mtu_sizes[i]) {
789 			perfer = i - 1;
790 			if (perfer < 0)
791 				perfer = 0;
792 			break;
793 		}
794 	}
795 	return (sctp_mtu_sizes[perfer]);
796 }
797 
798 void
799 sctp_fill_random_store(struct sctp_pcb *m)
800 {
801 	/*
802 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
803 	 * our counter. The result becomes our good random numbers and we
804 	 * then setup to give these out. Note that we do no locking to
805 	 * protect this. This is ok, since if competing folks call this we
806 	 * will get more gobbled gook in the random store which is what we
807 	 * want. There is a danger that two guys will use the same random
808 	 * numbers, but thats ok too since that is random as well :->
809 	 */
810 	m->store_at = 0;
811 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
812 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
813 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
814 	m->random_counter++;
815 }
816 
817 uint32_t
818 sctp_select_initial_TSN(struct sctp_pcb *inp)
819 {
820 	/*
821 	 * A true implementation should use random selection process to get
822 	 * the initial stream sequence number, using RFC1750 as a good
823 	 * guideline
824 	 */
825 	uint32_t x, *xp;
826 	uint8_t *p;
827 	int store_at, new_store;
828 
829 	if (inp->initial_sequence_debug != 0) {
830 		uint32_t ret;
831 
832 		ret = inp->initial_sequence_debug;
833 		inp->initial_sequence_debug++;
834 		return (ret);
835 	}
836 retry:
837 	store_at = inp->store_at;
838 	new_store = store_at + sizeof(uint32_t);
839 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
840 		new_store = 0;
841 	}
842 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
843 		goto retry;
844 	}
845 	if (new_store == 0) {
846 		/* Refill the random store */
847 		sctp_fill_random_store(inp);
848 	}
849 	p = &inp->random_store[store_at];
850 	xp = (uint32_t *) p;
851 	x = *xp;
852 	return (x);
853 }
854 
855 uint32_t
856 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
857 {
858 	u_long x, not_done;
859 	struct timeval now;
860 
861 	(void)SCTP_GETTIME_TIMEVAL(&now);
862 	not_done = 1;
863 	while (not_done) {
864 		x = sctp_select_initial_TSN(&inp->sctp_ep);
865 		if (x == 0) {
866 			/* we never use 0 */
867 			continue;
868 		}
869 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
870 			not_done = 0;
871 		}
872 	}
873 	return (x);
874 }
875 
876 int
877 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
878     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
879 {
880 	struct sctp_association *asoc;
881 
882 	/*
883 	 * Anything set to zero is taken care of by the allocation routine's
884 	 * bzero
885 	 */
886 
887 	/*
888 	 * Up front select what scoping to apply on addresses I tell my peer
889 	 * Not sure what to do with these right now, we will need to come up
890 	 * with a way to set them. We may need to pass them through from the
891 	 * caller in the sctp_aloc_assoc() function.
892 	 */
893 	int i;
894 
895 	asoc = &stcb->asoc;
896 	/* init all variables to a known value. */
897 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
898 	asoc->max_burst = m->sctp_ep.max_burst;
899 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
900 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
901 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
902 	/* EY Init nr_sack variable */
903 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
904 	/* JRS 5/21/07 - Init CMT PF variables */
905 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
906 	asoc->sctp_frag_point = m->sctp_frag_point;
907 #ifdef INET
908 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
909 #else
910 	asoc->default_tos = 0;
911 #endif
912 
913 #ifdef INET6
914 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
915 #else
916 	asoc->default_flowlabel = 0;
917 #endif
918 	asoc->sb_send_resv = 0;
919 	if (override_tag) {
920 		asoc->my_vtag = override_tag;
921 	} else {
922 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
923 	}
924 	/* Get the nonce tags */
925 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
926 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
927 	asoc->vrf_id = vrf_id;
928 
929 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
930 		asoc->hb_is_disabled = 1;
931 	else
932 		asoc->hb_is_disabled = 0;
933 
934 #ifdef SCTP_ASOCLOG_OF_TSNS
935 	asoc->tsn_in_at = 0;
936 	asoc->tsn_out_at = 0;
937 	asoc->tsn_in_wrapped = 0;
938 	asoc->tsn_out_wrapped = 0;
939 	asoc->cumack_log_at = 0;
940 	asoc->cumack_log_atsnt = 0;
941 #endif
942 #ifdef SCTP_FS_SPEC_LOG
943 	asoc->fs_index = 0;
944 #endif
945 	asoc->refcnt = 0;
946 	asoc->assoc_up_sent = 0;
947 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
948 	    sctp_select_initial_TSN(&m->sctp_ep);
949 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
950 	/* we are optimisitic here */
951 	asoc->peer_supports_pktdrop = 1;
952 	asoc->peer_supports_nat = 0;
953 	asoc->sent_queue_retran_cnt = 0;
954 
955 	/* for CMT */
956 	asoc->last_net_cmt_send_started = NULL;
957 
958 	/* This will need to be adjusted */
959 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
960 	asoc->last_acked_seq = asoc->init_seq_number - 1;
961 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
962 	asoc->asconf_seq_in = asoc->last_acked_seq;
963 
964 	/* here we are different, we hold the next one we expect */
965 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
966 
967 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
968 	asoc->initial_rto = m->sctp_ep.initial_rto;
969 
970 	asoc->max_init_times = m->sctp_ep.max_init_times;
971 	asoc->max_send_times = m->sctp_ep.max_send_times;
972 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
973 	asoc->free_chunk_cnt = 0;
974 
975 	asoc->iam_blocking = 0;
976 	/* ECN Nonce initialization */
977 	asoc->context = m->sctp_context;
978 	asoc->def_send = m->def_send;
979 	asoc->ecn_nonce_allowed = 0;
980 	asoc->receiver_nonce_sum = 1;
981 	asoc->nonce_sum_expect_base = 1;
982 	asoc->nonce_sum_check = 1;
983 	asoc->nonce_resync_tsn = 0;
984 	asoc->nonce_wait_for_ecne = 0;
985 	asoc->nonce_wait_tsn = 0;
986 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
987 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
988 	asoc->pr_sctp_cnt = 0;
989 	asoc->total_output_queue_size = 0;
990 
991 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
992 		struct in6pcb *inp6;
993 
994 		/* Its a V6 socket */
995 		inp6 = (struct in6pcb *)m;
996 		asoc->ipv6_addr_legal = 1;
997 		/* Now look at the binding flag to see if V4 will be legal */
998 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
999 			asoc->ipv4_addr_legal = 1;
1000 		} else {
1001 			/* V4 addresses are NOT legal on the association */
1002 			asoc->ipv4_addr_legal = 0;
1003 		}
1004 	} else {
1005 		/* Its a V4 socket, no - V6 */
1006 		asoc->ipv4_addr_legal = 1;
1007 		asoc->ipv6_addr_legal = 0;
1008 	}
1009 
1010 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1011 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1012 
1013 	asoc->smallest_mtu = m->sctp_frag_point;
1014 #ifdef SCTP_PRINT_FOR_B_AND_M
1015 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1016 	    asoc->smallest_mtu);
1017 #endif
1018 	asoc->minrto = m->sctp_ep.sctp_minrto;
1019 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1020 
1021 	asoc->locked_on_sending = NULL;
1022 	asoc->stream_locked_on = 0;
1023 	asoc->ecn_echo_cnt_onq = 0;
1024 	asoc->stream_locked = 0;
1025 
1026 	asoc->send_sack = 1;
1027 
1028 	LIST_INIT(&asoc->sctp_restricted_addrs);
1029 
1030 	TAILQ_INIT(&asoc->nets);
1031 	TAILQ_INIT(&asoc->pending_reply_queue);
1032 	TAILQ_INIT(&asoc->asconf_ack_sent);
1033 	/* Setup to fill the hb random cache at first HB */
1034 	asoc->hb_random_idx = 4;
1035 
1036 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1037 
1038 	/*
1039 	 * JRS - Pick the default congestion control module based on the
1040 	 * sysctl.
1041 	 */
1042 	switch (m->sctp_ep.sctp_default_cc_module) {
1043 		/* JRS - Standard TCP congestion control */
1044 	case SCTP_CC_RFC2581:
1045 		{
1046 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1047 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1048 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1049 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1050 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1051 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1052 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1053 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1054 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1055 			break;
1056 		}
1057 		/* JRS - High Speed TCP congestion control (Floyd) */
1058 	case SCTP_CC_HSTCP:
1059 		{
1060 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1061 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1064 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1065 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1066 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1067 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1068 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1069 			break;
1070 		}
1071 		/* JRS - HTCP congestion control */
1072 	case SCTP_CC_HTCP:
1073 		{
1074 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1075 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1078 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1079 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1080 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1081 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1082 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1083 			break;
1084 		}
1085 		/* JRS - By default, use RFC2581 */
1086 	default:
1087 		{
1088 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1089 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1092 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1093 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1094 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1095 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1096 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1097 			break;
1098 		}
1099 	}
1100 
1101 	/*
1102 	 * Now the stream parameters, here we allocate space for all streams
1103 	 * that we request by default.
1104 	 */
1105 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1106 	    m->sctp_ep.pre_open_stream_count;
1107 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1108 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1109 	    SCTP_M_STRMO);
1110 	if (asoc->strmout == NULL) {
1111 		/* big trouble no memory */
1112 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1113 		return (ENOMEM);
1114 	}
1115 	for (i = 0; i < asoc->streamoutcnt; i++) {
1116 		/*
1117 		 * inbound side must be set to 0xffff, also NOTE when we get
1118 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1119 		 * count (streamoutcnt) but first check if we sent to any of
1120 		 * the upper streams that were dropped (if some were). Those
1121 		 * that were dropped must be notified to the upper layer as
1122 		 * failed to send.
1123 		 */
1124 		asoc->strmout[i].next_sequence_sent = 0x0;
1125 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1126 		asoc->strmout[i].stream_no = i;
1127 		asoc->strmout[i].last_msg_incomplete = 0;
1128 		asoc->strmout[i].next_spoke.tqe_next = 0;
1129 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1130 	}
1131 	/* Now the mapping array */
1132 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1133 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1134 	    SCTP_M_MAP);
1135 	if (asoc->mapping_array == NULL) {
1136 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1137 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1138 		return (ENOMEM);
1139 	}
1140 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1141 	/* EY  - initialize the nr_mapping_array just like mapping array */
1142 	asoc->nr_mapping_array_size = SCTP_INITIAL_NR_MAPPING_ARRAY;
1143 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->nr_mapping_array_size,
1144 	    SCTP_M_MAP);
1145 	if (asoc->nr_mapping_array == NULL) {
1146 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1147 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1148 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1149 		return (ENOMEM);
1150 	}
1151 	memset(asoc->nr_mapping_array, 0, asoc->nr_mapping_array_size);
1152 
1153 	/* Now the init of the other outqueues */
1154 	TAILQ_INIT(&asoc->free_chunks);
1155 	TAILQ_INIT(&asoc->out_wheel);
1156 	TAILQ_INIT(&asoc->control_send_queue);
1157 	TAILQ_INIT(&asoc->asconf_send_queue);
1158 	TAILQ_INIT(&asoc->send_queue);
1159 	TAILQ_INIT(&asoc->sent_queue);
1160 	TAILQ_INIT(&asoc->reasmqueue);
1161 	TAILQ_INIT(&asoc->resetHead);
1162 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1163 	TAILQ_INIT(&asoc->asconf_queue);
1164 	/* authentication fields */
1165 	asoc->authinfo.random = NULL;
1166 	asoc->authinfo.active_keyid = 0;
1167 	asoc->authinfo.assoc_key = NULL;
1168 	asoc->authinfo.assoc_keyid = 0;
1169 	asoc->authinfo.recv_key = NULL;
1170 	asoc->authinfo.recv_keyid = 0;
1171 	LIST_INIT(&asoc->shared_keys);
1172 	asoc->marked_retrans = 0;
1173 	asoc->timoinit = 0;
1174 	asoc->timodata = 0;
1175 	asoc->timosack = 0;
1176 	asoc->timoshutdown = 0;
1177 	asoc->timoheartbeat = 0;
1178 	asoc->timocookie = 0;
1179 	asoc->timoshutdownack = 0;
1180 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1181 	asoc->discontinuity_time = asoc->start_time;
1182 	/*
1183 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1184 	 * freed later whe the association is freed.
1185 	 */
1186 	return (0);
1187 }
1188 
1189 int
1190 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1191 {
1192 	/* mapping array needs to grow */
1193 	uint8_t *new_array;
1194 	uint32_t new_size;
1195 
1196 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1197 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1198 	if (new_array == NULL) {
1199 		/* can't get more, forget it */
1200 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1201 		    new_size);
1202 		return (-1);
1203 	}
1204 	memset(new_array, 0, new_size);
1205 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1206 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1207 	asoc->mapping_array = new_array;
1208 	asoc->mapping_array_size = new_size;
1209 	if (asoc->peer_supports_nr_sack) {
1210 		new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
1211 		SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1212 		if (new_array == NULL) {
1213 			/* can't get more, forget it */
1214 			SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1215 			    new_size);
1216 			return (-1);
1217 		}
1218 		memset(new_array, 0, new_size);
1219 		memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
1220 		SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1221 		asoc->nr_mapping_array = new_array;
1222 		asoc->nr_mapping_array_size = new_size;
1223 	}
1224 	return (0);
1225 }
1226 
1227 
1228 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1229 static void
1230 sctp_iterator_work(struct sctp_iterator *it)
1231 {
1232 	int iteration_count = 0;
1233 	int inp_skip = 0;
1234 
1235 	SCTP_ITERATOR_LOCK();
1236 	if (it->inp) {
1237 		SCTP_INP_DECR_REF(it->inp);
1238 	}
1239 	if (it->inp == NULL) {
1240 		/* iterator is complete */
1241 done_with_iterator:
1242 		SCTP_ITERATOR_UNLOCK();
1243 		if (it->function_atend != NULL) {
1244 			(*it->function_atend) (it->pointer, it->val);
1245 		}
1246 		SCTP_FREE(it, SCTP_M_ITER);
1247 		return;
1248 	}
1249 select_a_new_ep:
1250 	SCTP_INP_WLOCK(it->inp);
1251 	while (((it->pcb_flags) &&
1252 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1253 	    ((it->pcb_features) &&
1254 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1255 		/* endpoint flags or features don't match, so keep looking */
1256 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1257 			SCTP_INP_WUNLOCK(it->inp);
1258 			goto done_with_iterator;
1259 		}
1260 		SCTP_INP_WUNLOCK(it->inp);
1261 		it->inp = LIST_NEXT(it->inp, sctp_list);
1262 		if (it->inp == NULL) {
1263 			goto done_with_iterator;
1264 		}
1265 		SCTP_INP_WLOCK(it->inp);
1266 	}
1267 
1268 	SCTP_INP_WUNLOCK(it->inp);
1269 	SCTP_INP_RLOCK(it->inp);
1270 
1271 	/* now go through each assoc which is in the desired state */
1272 	if (it->done_current_ep == 0) {
1273 		if (it->function_inp != NULL)
1274 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1275 		it->done_current_ep = 1;
1276 	}
1277 	if (it->stcb == NULL) {
1278 		/* run the per instance function */
1279 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1280 	}
1281 	if ((inp_skip) || it->stcb == NULL) {
1282 		if (it->function_inp_end != NULL) {
1283 			inp_skip = (*it->function_inp_end) (it->inp,
1284 			    it->pointer,
1285 			    it->val);
1286 		}
1287 		SCTP_INP_RUNLOCK(it->inp);
1288 		goto no_stcb;
1289 	}
1290 	while (it->stcb) {
1291 		SCTP_TCB_LOCK(it->stcb);
1292 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1293 			/* not in the right state... keep looking */
1294 			SCTP_TCB_UNLOCK(it->stcb);
1295 			goto next_assoc;
1296 		}
1297 		/* see if we have limited out the iterator loop */
1298 		iteration_count++;
1299 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1300 			/* Pause to let others grab the lock */
1301 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1302 			SCTP_TCB_UNLOCK(it->stcb);
1303 
1304 			SCTP_INP_INCR_REF(it->inp);
1305 			SCTP_INP_RUNLOCK(it->inp);
1306 			SCTP_ITERATOR_UNLOCK();
1307 			SCTP_ITERATOR_LOCK();
1308 			SCTP_INP_RLOCK(it->inp);
1309 
1310 			SCTP_INP_DECR_REF(it->inp);
1311 			SCTP_TCB_LOCK(it->stcb);
1312 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1313 			iteration_count = 0;
1314 		}
1315 		/* run function on this one */
1316 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1317 
1318 		/*
1319 		 * we lie here, it really needs to have its own type but
1320 		 * first I must verify that this won't effect things :-0
1321 		 */
1322 		if (it->no_chunk_output == 0)
1323 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1324 
1325 		SCTP_TCB_UNLOCK(it->stcb);
1326 next_assoc:
1327 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1328 		if (it->stcb == NULL) {
1329 			/* Run last function */
1330 			if (it->function_inp_end != NULL) {
1331 				inp_skip = (*it->function_inp_end) (it->inp,
1332 				    it->pointer,
1333 				    it->val);
1334 			}
1335 		}
1336 	}
1337 	SCTP_INP_RUNLOCK(it->inp);
1338 no_stcb:
1339 	/* done with all assocs on this endpoint, move on to next endpoint */
1340 	it->done_current_ep = 0;
1341 	SCTP_INP_WLOCK(it->inp);
1342 	SCTP_INP_WUNLOCK(it->inp);
1343 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1344 		it->inp = NULL;
1345 	} else {
1346 		SCTP_INP_INFO_RLOCK();
1347 		it->inp = LIST_NEXT(it->inp, sctp_list);
1348 		SCTP_INP_INFO_RUNLOCK();
1349 	}
1350 	if (it->inp == NULL) {
1351 		goto done_with_iterator;
1352 	}
1353 	goto select_a_new_ep;
1354 }
1355 
1356 void
1357 sctp_iterator_worker(void)
1358 {
1359 	struct sctp_iterator *it = NULL;
1360 
1361 	/* This function is called with the WQ lock in place */
1362 
1363 	SCTP_BASE_INFO(iterator_running) = 1;
1364 again:
1365 	it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1366 	while (it) {
1367 		/* now lets work on this one */
1368 		TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1369 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1370 		sctp_iterator_work(it);
1371 		SCTP_IPI_ITERATOR_WQ_LOCK();
1372 		/* sa_ignore FREED_MEMORY */
1373 		it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1374 	}
1375 	if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
1376 		goto again;
1377 	}
1378 	SCTP_BASE_INFO(iterator_running) = 0;
1379 	return;
1380 }
1381 
1382 #endif
1383 
1384 
1385 static void
1386 sctp_handle_addr_wq(void)
1387 {
1388 	/* deal with the ADDR wq from the rtsock calls */
1389 	struct sctp_laddr *wi;
1390 	struct sctp_asconf_iterator *asc;
1391 
1392 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1393 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1394 	if (asc == NULL) {
1395 		/* Try later, no memory */
1396 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1397 		    (struct sctp_inpcb *)NULL,
1398 		    (struct sctp_tcb *)NULL,
1399 		    (struct sctp_nets *)NULL);
1400 		return;
1401 	}
1402 	LIST_INIT(&asc->list_of_work);
1403 	asc->cnt = 0;
1404 	SCTP_IPI_ITERATOR_WQ_LOCK();
1405 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1406 	while (wi != NULL) {
1407 		LIST_REMOVE(wi, sctp_nxt_addr);
1408 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1409 		asc->cnt++;
1410 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1411 	}
1412 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1413 	if (asc->cnt == 0) {
1414 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1415 	} else {
1416 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1417 		    sctp_asconf_iterator_stcb,
1418 		    NULL,	/* No ep end for boundall */
1419 		    SCTP_PCB_FLAGS_BOUNDALL,
1420 		    SCTP_PCB_ANY_FEATURES,
1421 		    SCTP_ASOC_ANY_STATE,
1422 		    (void *)asc, 0,
1423 		    sctp_asconf_iterator_end, NULL, 0);
1424 	}
1425 }
1426 
1427 int retcode = 0;
1428 int cur_oerr = 0;
1429 
1430 void
1431 sctp_timeout_handler(void *t)
1432 {
1433 	struct sctp_inpcb *inp;
1434 	struct sctp_tcb *stcb;
1435 	struct sctp_nets *net;
1436 	struct sctp_timer *tmr;
1437 
1438 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1439 	struct socket *so;
1440 
1441 #endif
1442 	int did_output, type;
1443 	struct sctp_iterator *it = NULL;
1444 
1445 	tmr = (struct sctp_timer *)t;
1446 	inp = (struct sctp_inpcb *)tmr->ep;
1447 	stcb = (struct sctp_tcb *)tmr->tcb;
1448 	net = (struct sctp_nets *)tmr->net;
1449 	did_output = 1;
1450 
1451 #ifdef SCTP_AUDITING_ENABLED
1452 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1453 	sctp_auditing(3, inp, stcb, net);
1454 #endif
1455 
1456 	/* sanity checks... */
1457 	if (tmr->self != (void *)tmr) {
1458 		/*
1459 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1460 		 * tmr);
1461 		 */
1462 		return;
1463 	}
1464 	tmr->stopped_from = 0xa001;
1465 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1466 		/*
1467 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1468 		 * tmr->type);
1469 		 */
1470 		return;
1471 	}
1472 	tmr->stopped_from = 0xa002;
1473 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1474 		return;
1475 	}
1476 	/* if this is an iterator timeout, get the struct and clear inp */
1477 	tmr->stopped_from = 0xa003;
1478 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1479 		it = (struct sctp_iterator *)inp;
1480 		inp = NULL;
1481 	}
1482 	type = tmr->type;
1483 	if (inp) {
1484 		SCTP_INP_INCR_REF(inp);
1485 		if ((inp->sctp_socket == 0) &&
1486 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1487 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1488 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1489 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1490 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1491 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1492 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1493 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1494 		    ) {
1495 			SCTP_INP_DECR_REF(inp);
1496 			return;
1497 		}
1498 	}
1499 	tmr->stopped_from = 0xa004;
1500 	if (stcb) {
1501 		atomic_add_int(&stcb->asoc.refcnt, 1);
1502 		if (stcb->asoc.state == 0) {
1503 			atomic_add_int(&stcb->asoc.refcnt, -1);
1504 			if (inp) {
1505 				SCTP_INP_DECR_REF(inp);
1506 			}
1507 			return;
1508 		}
1509 	}
1510 	tmr->stopped_from = 0xa005;
1511 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1512 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1513 		if (inp) {
1514 			SCTP_INP_DECR_REF(inp);
1515 		}
1516 		if (stcb) {
1517 			atomic_add_int(&stcb->asoc.refcnt, -1);
1518 		}
1519 		return;
1520 	}
1521 	tmr->stopped_from = 0xa006;
1522 
1523 	if (stcb) {
1524 		SCTP_TCB_LOCK(stcb);
1525 		atomic_add_int(&stcb->asoc.refcnt, -1);
1526 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1527 		    ((stcb->asoc.state == 0) ||
1528 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1529 			SCTP_TCB_UNLOCK(stcb);
1530 			if (inp) {
1531 				SCTP_INP_DECR_REF(inp);
1532 			}
1533 			return;
1534 		}
1535 	}
1536 	/* record in stopped what t-o occured */
1537 	tmr->stopped_from = tmr->type;
1538 
1539 	/* mark as being serviced now */
1540 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1541 		/*
1542 		 * Callout has been rescheduled.
1543 		 */
1544 		goto get_out;
1545 	}
1546 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1547 		/*
1548 		 * Not active, so no action.
1549 		 */
1550 		goto get_out;
1551 	}
1552 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1553 
1554 	/* call the handler for the appropriate timer type */
1555 	switch (tmr->type) {
1556 	case SCTP_TIMER_TYPE_ZERO_COPY:
1557 		if (inp == NULL) {
1558 			break;
1559 		}
1560 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1561 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1562 		}
1563 		break;
1564 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1565 		if (inp == NULL) {
1566 			break;
1567 		}
1568 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1569 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1570 		}
1571 		break;
1572 	case SCTP_TIMER_TYPE_ADDR_WQ:
1573 		sctp_handle_addr_wq();
1574 		break;
1575 	case SCTP_TIMER_TYPE_ITERATOR:
1576 		SCTP_STAT_INCR(sctps_timoiterator);
1577 		sctp_iterator_timer(it);
1578 		break;
1579 	case SCTP_TIMER_TYPE_SEND:
1580 		if ((stcb == NULL) || (inp == NULL)) {
1581 			break;
1582 		}
1583 		SCTP_STAT_INCR(sctps_timodata);
1584 		stcb->asoc.timodata++;
1585 		stcb->asoc.num_send_timers_up--;
1586 		if (stcb->asoc.num_send_timers_up < 0) {
1587 			stcb->asoc.num_send_timers_up = 0;
1588 		}
1589 		SCTP_TCB_LOCK_ASSERT(stcb);
1590 		cur_oerr = stcb->asoc.overall_error_count;
1591 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1592 		if (retcode) {
1593 			/* no need to unlock on tcb its gone */
1594 
1595 			goto out_decr;
1596 		}
1597 		SCTP_TCB_LOCK_ASSERT(stcb);
1598 #ifdef SCTP_AUDITING_ENABLED
1599 		sctp_auditing(4, inp, stcb, net);
1600 #endif
1601 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1602 		if ((stcb->asoc.num_send_timers_up == 0) &&
1603 		    (stcb->asoc.sent_queue_cnt > 0)
1604 		    ) {
1605 			struct sctp_tmit_chunk *chk;
1606 
1607 			/*
1608 			 * safeguard. If there on some on the sent queue
1609 			 * somewhere but no timers running something is
1610 			 * wrong... so we start a timer on the first chunk
1611 			 * on the send queue on whatever net it is sent to.
1612 			 */
1613 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1614 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1615 			    chk->whoTo);
1616 		}
1617 		break;
1618 	case SCTP_TIMER_TYPE_INIT:
1619 		if ((stcb == NULL) || (inp == NULL)) {
1620 			break;
1621 		}
1622 		SCTP_STAT_INCR(sctps_timoinit);
1623 		stcb->asoc.timoinit++;
1624 		if (sctp_t1init_timer(inp, stcb, net)) {
1625 			/* no need to unlock on tcb its gone */
1626 			goto out_decr;
1627 		}
1628 		/* We do output but not here */
1629 		did_output = 0;
1630 		break;
1631 	case SCTP_TIMER_TYPE_RECV:
1632 		if ((stcb == NULL) || (inp == NULL)) {
1633 			break;
1634 		} {
1635 			int abort_flag;
1636 
1637 			SCTP_STAT_INCR(sctps_timosack);
1638 			stcb->asoc.timosack++;
1639 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1640 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1641 
1642 			/*
1643 			 * EY if nr_sacks used then send an nr-sack , a sack
1644 			 * otherwise
1645 			 */
1646 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
1647 				sctp_send_nr_sack(stcb);
1648 			else
1649 				sctp_send_sack(stcb);
1650 		}
1651 #ifdef SCTP_AUDITING_ENABLED
1652 		sctp_auditing(4, inp, stcb, net);
1653 #endif
1654 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1655 		break;
1656 	case SCTP_TIMER_TYPE_SHUTDOWN:
1657 		if ((stcb == NULL) || (inp == NULL)) {
1658 			break;
1659 		}
1660 		if (sctp_shutdown_timer(inp, stcb, net)) {
1661 			/* no need to unlock on tcb its gone */
1662 			goto out_decr;
1663 		}
1664 		SCTP_STAT_INCR(sctps_timoshutdown);
1665 		stcb->asoc.timoshutdown++;
1666 #ifdef SCTP_AUDITING_ENABLED
1667 		sctp_auditing(4, inp, stcb, net);
1668 #endif
1669 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1670 		break;
1671 	case SCTP_TIMER_TYPE_HEARTBEAT:
1672 		{
1673 			struct sctp_nets *lnet;
1674 			int cnt_of_unconf = 0;
1675 
1676 			if ((stcb == NULL) || (inp == NULL)) {
1677 				break;
1678 			}
1679 			SCTP_STAT_INCR(sctps_timoheartbeat);
1680 			stcb->asoc.timoheartbeat++;
1681 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1682 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1683 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1684 					cnt_of_unconf++;
1685 				}
1686 			}
1687 			if (cnt_of_unconf == 0) {
1688 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1689 				    cnt_of_unconf)) {
1690 					/* no need to unlock on tcb its gone */
1691 					goto out_decr;
1692 				}
1693 			}
1694 #ifdef SCTP_AUDITING_ENABLED
1695 			sctp_auditing(4, inp, stcb, lnet);
1696 #endif
1697 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1698 			    stcb->sctp_ep, stcb, lnet);
1699 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1700 		}
1701 		break;
1702 	case SCTP_TIMER_TYPE_COOKIE:
1703 		if ((stcb == NULL) || (inp == NULL)) {
1704 			break;
1705 		}
1706 		if (sctp_cookie_timer(inp, stcb, net)) {
1707 			/* no need to unlock on tcb its gone */
1708 			goto out_decr;
1709 		}
1710 		SCTP_STAT_INCR(sctps_timocookie);
1711 		stcb->asoc.timocookie++;
1712 #ifdef SCTP_AUDITING_ENABLED
1713 		sctp_auditing(4, inp, stcb, net);
1714 #endif
1715 		/*
1716 		 * We consider T3 and Cookie timer pretty much the same with
1717 		 * respect to where from in chunk_output.
1718 		 */
1719 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1720 		break;
1721 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1722 		{
1723 			struct timeval tv;
1724 			int i, secret;
1725 
1726 			if (inp == NULL) {
1727 				break;
1728 			}
1729 			SCTP_STAT_INCR(sctps_timosecret);
1730 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1731 			SCTP_INP_WLOCK(inp);
1732 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1733 			inp->sctp_ep.last_secret_number =
1734 			    inp->sctp_ep.current_secret_number;
1735 			inp->sctp_ep.current_secret_number++;
1736 			if (inp->sctp_ep.current_secret_number >=
1737 			    SCTP_HOW_MANY_SECRETS) {
1738 				inp->sctp_ep.current_secret_number = 0;
1739 			}
1740 			secret = (int)inp->sctp_ep.current_secret_number;
1741 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1742 				inp->sctp_ep.secret_key[secret][i] =
1743 				    sctp_select_initial_TSN(&inp->sctp_ep);
1744 			}
1745 			SCTP_INP_WUNLOCK(inp);
1746 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1747 		}
1748 		did_output = 0;
1749 		break;
1750 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1751 		if ((stcb == NULL) || (inp == NULL)) {
1752 			break;
1753 		}
1754 		SCTP_STAT_INCR(sctps_timopathmtu);
1755 		sctp_pathmtu_timer(inp, stcb, net);
1756 		did_output = 0;
1757 		break;
1758 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1759 		if ((stcb == NULL) || (inp == NULL)) {
1760 			break;
1761 		}
1762 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1763 			/* no need to unlock on tcb its gone */
1764 			goto out_decr;
1765 		}
1766 		SCTP_STAT_INCR(sctps_timoshutdownack);
1767 		stcb->asoc.timoshutdownack++;
1768 #ifdef SCTP_AUDITING_ENABLED
1769 		sctp_auditing(4, inp, stcb, net);
1770 #endif
1771 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1772 		break;
1773 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1774 		if ((stcb == NULL) || (inp == NULL)) {
1775 			break;
1776 		}
1777 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1778 		sctp_abort_an_association(inp, stcb,
1779 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1780 		/* no need to unlock on tcb its gone */
1781 		goto out_decr;
1782 
1783 	case SCTP_TIMER_TYPE_STRRESET:
1784 		if ((stcb == NULL) || (inp == NULL)) {
1785 			break;
1786 		}
1787 		if (sctp_strreset_timer(inp, stcb, net)) {
1788 			/* no need to unlock on tcb its gone */
1789 			goto out_decr;
1790 		}
1791 		SCTP_STAT_INCR(sctps_timostrmrst);
1792 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1793 		break;
1794 	case SCTP_TIMER_TYPE_EARLYFR:
1795 		/* Need to do FR of things for net */
1796 		if ((stcb == NULL) || (inp == NULL)) {
1797 			break;
1798 		}
1799 		SCTP_STAT_INCR(sctps_timoearlyfr);
1800 		sctp_early_fr_timer(inp, stcb, net);
1801 		break;
1802 	case SCTP_TIMER_TYPE_ASCONF:
1803 		if ((stcb == NULL) || (inp == NULL)) {
1804 			break;
1805 		}
1806 		if (sctp_asconf_timer(inp, stcb, net)) {
1807 			/* no need to unlock on tcb its gone */
1808 			goto out_decr;
1809 		}
1810 		SCTP_STAT_INCR(sctps_timoasconf);
1811 #ifdef SCTP_AUDITING_ENABLED
1812 		sctp_auditing(4, inp, stcb, net);
1813 #endif
1814 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1815 		break;
1816 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1817 		if ((stcb == NULL) || (inp == NULL)) {
1818 			break;
1819 		}
1820 		sctp_delete_prim_timer(inp, stcb, net);
1821 		SCTP_STAT_INCR(sctps_timodelprim);
1822 		break;
1823 
1824 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1825 		if ((stcb == NULL) || (inp == NULL)) {
1826 			break;
1827 		}
1828 		SCTP_STAT_INCR(sctps_timoautoclose);
1829 		sctp_autoclose_timer(inp, stcb, net);
1830 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1831 		did_output = 0;
1832 		break;
1833 	case SCTP_TIMER_TYPE_ASOCKILL:
1834 		if ((stcb == NULL) || (inp == NULL)) {
1835 			break;
1836 		}
1837 		SCTP_STAT_INCR(sctps_timoassockill);
1838 		/* Can we free it yet? */
1839 		SCTP_INP_DECR_REF(inp);
1840 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1841 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1842 		so = SCTP_INP_SO(inp);
1843 		atomic_add_int(&stcb->asoc.refcnt, 1);
1844 		SCTP_TCB_UNLOCK(stcb);
1845 		SCTP_SOCKET_LOCK(so, 1);
1846 		SCTP_TCB_LOCK(stcb);
1847 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1848 #endif
1849 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1850 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1851 		SCTP_SOCKET_UNLOCK(so, 1);
1852 #endif
1853 		/*
1854 		 * free asoc, always unlocks (or destroy's) so prevent
1855 		 * duplicate unlock or unlock of a free mtx :-0
1856 		 */
1857 		stcb = NULL;
1858 		goto out_no_decr;
1859 	case SCTP_TIMER_TYPE_INPKILL:
1860 		SCTP_STAT_INCR(sctps_timoinpkill);
1861 		if (inp == NULL) {
1862 			break;
1863 		}
1864 		/*
1865 		 * special case, take away our increment since WE are the
1866 		 * killer
1867 		 */
1868 		SCTP_INP_DECR_REF(inp);
1869 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1870 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1871 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1872 		inp = NULL;
1873 		goto out_no_decr;
1874 	default:
1875 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1876 		    tmr->type);
1877 		break;
1878 	};
1879 #ifdef SCTP_AUDITING_ENABLED
1880 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1881 	if (inp)
1882 		sctp_auditing(5, inp, stcb, net);
1883 #endif
1884 	if ((did_output) && stcb) {
1885 		/*
1886 		 * Now we need to clean up the control chunk chain if an
1887 		 * ECNE is on it. It must be marked as UNSENT again so next
1888 		 * call will continue to send it until such time that we get
1889 		 * a CWR, to remove it. It is, however, less likely that we
1890 		 * will find a ecn echo on the chain though.
1891 		 */
1892 		sctp_fix_ecn_echo(&stcb->asoc);
1893 	}
1894 get_out:
1895 	if (stcb) {
1896 		SCTP_TCB_UNLOCK(stcb);
1897 	}
1898 out_decr:
1899 	if (inp) {
1900 		SCTP_INP_DECR_REF(inp);
1901 	}
1902 out_no_decr:
1903 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1904 	    type);
1905 }
1906 
1907 void
1908 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1909     struct sctp_nets *net)
1910 {
1911 	int to_ticks;
1912 	struct sctp_timer *tmr;
1913 
1914 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1915 		return;
1916 
1917 	to_ticks = 0;
1918 
1919 	tmr = NULL;
1920 	if (stcb) {
1921 		SCTP_TCB_LOCK_ASSERT(stcb);
1922 	}
1923 	switch (t_type) {
1924 	case SCTP_TIMER_TYPE_ZERO_COPY:
1925 		tmr = &inp->sctp_ep.zero_copy_timer;
1926 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1927 		break;
1928 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1929 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1930 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1931 		break;
1932 	case SCTP_TIMER_TYPE_ADDR_WQ:
1933 		/* Only 1 tick away :-) */
1934 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1935 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1936 		break;
1937 	case SCTP_TIMER_TYPE_ITERATOR:
1938 		{
1939 			struct sctp_iterator *it;
1940 
1941 			it = (struct sctp_iterator *)inp;
1942 			tmr = &it->tmr;
1943 			to_ticks = SCTP_ITERATOR_TICKS;
1944 		}
1945 		break;
1946 	case SCTP_TIMER_TYPE_SEND:
1947 		/* Here we use the RTO timer */
1948 		{
1949 			int rto_val;
1950 
1951 			if ((stcb == NULL) || (net == NULL)) {
1952 				return;
1953 			}
1954 			tmr = &net->rxt_timer;
1955 			if (net->RTO == 0) {
1956 				rto_val = stcb->asoc.initial_rto;
1957 			} else {
1958 				rto_val = net->RTO;
1959 			}
1960 			to_ticks = MSEC_TO_TICKS(rto_val);
1961 		}
1962 		break;
1963 	case SCTP_TIMER_TYPE_INIT:
1964 		/*
1965 		 * Here we use the INIT timer default usually about 1
1966 		 * minute.
1967 		 */
1968 		if ((stcb == NULL) || (net == NULL)) {
1969 			return;
1970 		}
1971 		tmr = &net->rxt_timer;
1972 		if (net->RTO == 0) {
1973 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1974 		} else {
1975 			to_ticks = MSEC_TO_TICKS(net->RTO);
1976 		}
1977 		break;
1978 	case SCTP_TIMER_TYPE_RECV:
1979 		/*
1980 		 * Here we use the Delayed-Ack timer value from the inp
1981 		 * ususually about 200ms.
1982 		 */
1983 		if (stcb == NULL) {
1984 			return;
1985 		}
1986 		tmr = &stcb->asoc.dack_timer;
1987 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1988 		break;
1989 	case SCTP_TIMER_TYPE_SHUTDOWN:
1990 		/* Here we use the RTO of the destination. */
1991 		if ((stcb == NULL) || (net == NULL)) {
1992 			return;
1993 		}
1994 		if (net->RTO == 0) {
1995 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1996 		} else {
1997 			to_ticks = MSEC_TO_TICKS(net->RTO);
1998 		}
1999 		tmr = &net->rxt_timer;
2000 		break;
2001 	case SCTP_TIMER_TYPE_HEARTBEAT:
2002 		/*
2003 		 * the net is used here so that we can add in the RTO. Even
2004 		 * though we use a different timer. We also add the HB timer
2005 		 * PLUS a random jitter.
2006 		 */
2007 		if ((inp == NULL) || (stcb == NULL)) {
2008 			return;
2009 		} else {
2010 			uint32_t rndval;
2011 			uint8_t this_random;
2012 			int cnt_of_unconf = 0;
2013 			struct sctp_nets *lnet;
2014 
2015 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2016 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2017 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2018 					cnt_of_unconf++;
2019 				}
2020 			}
2021 			if (cnt_of_unconf) {
2022 				net = lnet = NULL;
2023 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2024 			}
2025 			if (stcb->asoc.hb_random_idx > 3) {
2026 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2027 				memcpy(stcb->asoc.hb_random_values, &rndval,
2028 				    sizeof(stcb->asoc.hb_random_values));
2029 				stcb->asoc.hb_random_idx = 0;
2030 			}
2031 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2032 			stcb->asoc.hb_random_idx++;
2033 			stcb->asoc.hb_ect_randombit = 0;
2034 			/*
2035 			 * this_random will be 0 - 256 ms RTO is in ms.
2036 			 */
2037 			if ((stcb->asoc.hb_is_disabled) &&
2038 			    (cnt_of_unconf == 0)) {
2039 				return;
2040 			}
2041 			if (net) {
2042 				int delay;
2043 
2044 				delay = stcb->asoc.heart_beat_delay;
2045 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2046 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2047 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2048 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2049 						delay = 0;
2050 					}
2051 				}
2052 				if (net->RTO == 0) {
2053 					/* Never been checked */
2054 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2055 				} else {
2056 					/* set rto_val to the ms */
2057 					to_ticks = delay + net->RTO + this_random;
2058 				}
2059 			} else {
2060 				if (cnt_of_unconf) {
2061 					to_ticks = this_random + stcb->asoc.initial_rto;
2062 				} else {
2063 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2064 				}
2065 			}
2066 			/*
2067 			 * Now we must convert the to_ticks that are now in
2068 			 * ms to ticks.
2069 			 */
2070 			to_ticks = MSEC_TO_TICKS(to_ticks);
2071 			tmr = &stcb->asoc.hb_timer;
2072 		}
2073 		break;
2074 	case SCTP_TIMER_TYPE_COOKIE:
2075 		/*
2076 		 * Here we can use the RTO timer from the network since one
2077 		 * RTT was compelete. If a retran happened then we will be
2078 		 * using the RTO initial value.
2079 		 */
2080 		if ((stcb == NULL) || (net == NULL)) {
2081 			return;
2082 		}
2083 		if (net->RTO == 0) {
2084 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2085 		} else {
2086 			to_ticks = MSEC_TO_TICKS(net->RTO);
2087 		}
2088 		tmr = &net->rxt_timer;
2089 		break;
2090 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2091 		/*
2092 		 * nothing needed but the endpoint here ususually about 60
2093 		 * minutes.
2094 		 */
2095 		if (inp == NULL) {
2096 			return;
2097 		}
2098 		tmr = &inp->sctp_ep.signature_change;
2099 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2100 		break;
2101 	case SCTP_TIMER_TYPE_ASOCKILL:
2102 		if (stcb == NULL) {
2103 			return;
2104 		}
2105 		tmr = &stcb->asoc.strreset_timer;
2106 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2107 		break;
2108 	case SCTP_TIMER_TYPE_INPKILL:
2109 		/*
2110 		 * The inp is setup to die. We re-use the signature_chage
2111 		 * timer since that has stopped and we are in the GONE
2112 		 * state.
2113 		 */
2114 		if (inp == NULL) {
2115 			return;
2116 		}
2117 		tmr = &inp->sctp_ep.signature_change;
2118 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2119 		break;
2120 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2121 		/*
2122 		 * Here we use the value found in the EP for PMTU ususually
2123 		 * about 10 minutes.
2124 		 */
2125 		if ((stcb == NULL) || (inp == NULL)) {
2126 			return;
2127 		}
2128 		if (net == NULL) {
2129 			return;
2130 		}
2131 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2132 		tmr = &net->pmtu_timer;
2133 		break;
2134 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2135 		/* Here we use the RTO of the destination */
2136 		if ((stcb == NULL) || (net == NULL)) {
2137 			return;
2138 		}
2139 		if (net->RTO == 0) {
2140 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2141 		} else {
2142 			to_ticks = MSEC_TO_TICKS(net->RTO);
2143 		}
2144 		tmr = &net->rxt_timer;
2145 		break;
2146 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2147 		/*
2148 		 * Here we use the endpoints shutdown guard timer usually
2149 		 * about 3 minutes.
2150 		 */
2151 		if ((inp == NULL) || (stcb == NULL)) {
2152 			return;
2153 		}
2154 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2155 		tmr = &stcb->asoc.shut_guard_timer;
2156 		break;
2157 	case SCTP_TIMER_TYPE_STRRESET:
2158 		/*
2159 		 * Here the timer comes from the stcb but its value is from
2160 		 * the net's RTO.
2161 		 */
2162 		if ((stcb == NULL) || (net == NULL)) {
2163 			return;
2164 		}
2165 		if (net->RTO == 0) {
2166 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2167 		} else {
2168 			to_ticks = MSEC_TO_TICKS(net->RTO);
2169 		}
2170 		tmr = &stcb->asoc.strreset_timer;
2171 		break;
2172 
2173 	case SCTP_TIMER_TYPE_EARLYFR:
2174 		{
2175 			unsigned int msec;
2176 
2177 			if ((stcb == NULL) || (net == NULL)) {
2178 				return;
2179 			}
2180 			if (net->flight_size > net->cwnd) {
2181 				/* no need to start */
2182 				return;
2183 			}
2184 			SCTP_STAT_INCR(sctps_earlyfrstart);
2185 			if (net->lastsa == 0) {
2186 				/* Hmm no rtt estimate yet? */
2187 				msec = stcb->asoc.initial_rto >> 2;
2188 			} else {
2189 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2190 			}
2191 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2192 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2193 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2194 					msec = SCTP_MINFR_MSEC_FLOOR;
2195 				}
2196 			}
2197 			to_ticks = MSEC_TO_TICKS(msec);
2198 			tmr = &net->fr_timer;
2199 		}
2200 		break;
2201 	case SCTP_TIMER_TYPE_ASCONF:
2202 		/*
2203 		 * Here the timer comes from the stcb but its value is from
2204 		 * the net's RTO.
2205 		 */
2206 		if ((stcb == NULL) || (net == NULL)) {
2207 			return;
2208 		}
2209 		if (net->RTO == 0) {
2210 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2211 		} else {
2212 			to_ticks = MSEC_TO_TICKS(net->RTO);
2213 		}
2214 		tmr = &stcb->asoc.asconf_timer;
2215 		break;
2216 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2217 		if ((stcb == NULL) || (net != NULL)) {
2218 			return;
2219 		}
2220 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2221 		tmr = &stcb->asoc.delete_prim_timer;
2222 		break;
2223 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2224 		if (stcb == NULL) {
2225 			return;
2226 		}
2227 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2228 			/*
2229 			 * Really an error since stcb is NOT set to
2230 			 * autoclose
2231 			 */
2232 			return;
2233 		}
2234 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2235 		tmr = &stcb->asoc.autoclose_timer;
2236 		break;
2237 	default:
2238 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2239 		    __FUNCTION__, t_type);
2240 		return;
2241 		break;
2242 	};
2243 	if ((to_ticks <= 0) || (tmr == NULL)) {
2244 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2245 		    __FUNCTION__, t_type, to_ticks, tmr);
2246 		return;
2247 	}
2248 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2249 		/*
2250 		 * we do NOT allow you to have it already running. if it is
2251 		 * we leave the current one up unchanged
2252 		 */
2253 		return;
2254 	}
2255 	/* At this point we can proceed */
2256 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2257 		stcb->asoc.num_send_timers_up++;
2258 	}
2259 	tmr->stopped_from = 0;
2260 	tmr->type = t_type;
2261 	tmr->ep = (void *)inp;
2262 	tmr->tcb = (void *)stcb;
2263 	tmr->net = (void *)net;
2264 	tmr->self = (void *)tmr;
2265 	tmr->ticks = sctp_get_tick_count();
2266 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2267 	return;
2268 }
2269 
2270 void
2271 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2272     struct sctp_nets *net, uint32_t from)
2273 {
2274 	struct sctp_timer *tmr;
2275 
2276 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2277 	    (inp == NULL))
2278 		return;
2279 
2280 	tmr = NULL;
2281 	if (stcb) {
2282 		SCTP_TCB_LOCK_ASSERT(stcb);
2283 	}
2284 	switch (t_type) {
2285 	case SCTP_TIMER_TYPE_ZERO_COPY:
2286 		tmr = &inp->sctp_ep.zero_copy_timer;
2287 		break;
2288 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2289 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2290 		break;
2291 	case SCTP_TIMER_TYPE_ADDR_WQ:
2292 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2293 		break;
2294 	case SCTP_TIMER_TYPE_EARLYFR:
2295 		if ((stcb == NULL) || (net == NULL)) {
2296 			return;
2297 		}
2298 		tmr = &net->fr_timer;
2299 		SCTP_STAT_INCR(sctps_earlyfrstop);
2300 		break;
2301 	case SCTP_TIMER_TYPE_ITERATOR:
2302 		{
2303 			struct sctp_iterator *it;
2304 
2305 			it = (struct sctp_iterator *)inp;
2306 			tmr = &it->tmr;
2307 		}
2308 		break;
2309 	case SCTP_TIMER_TYPE_SEND:
2310 		if ((stcb == NULL) || (net == NULL)) {
2311 			return;
2312 		}
2313 		tmr = &net->rxt_timer;
2314 		break;
2315 	case SCTP_TIMER_TYPE_INIT:
2316 		if ((stcb == NULL) || (net == NULL)) {
2317 			return;
2318 		}
2319 		tmr = &net->rxt_timer;
2320 		break;
2321 	case SCTP_TIMER_TYPE_RECV:
2322 		if (stcb == NULL) {
2323 			return;
2324 		}
2325 		tmr = &stcb->asoc.dack_timer;
2326 		break;
2327 	case SCTP_TIMER_TYPE_SHUTDOWN:
2328 		if ((stcb == NULL) || (net == NULL)) {
2329 			return;
2330 		}
2331 		tmr = &net->rxt_timer;
2332 		break;
2333 	case SCTP_TIMER_TYPE_HEARTBEAT:
2334 		if (stcb == NULL) {
2335 			return;
2336 		}
2337 		tmr = &stcb->asoc.hb_timer;
2338 		break;
2339 	case SCTP_TIMER_TYPE_COOKIE:
2340 		if ((stcb == NULL) || (net == NULL)) {
2341 			return;
2342 		}
2343 		tmr = &net->rxt_timer;
2344 		break;
2345 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2346 		/* nothing needed but the endpoint here */
2347 		tmr = &inp->sctp_ep.signature_change;
2348 		/*
2349 		 * We re-use the newcookie timer for the INP kill timer. We
2350 		 * must assure that we do not kill it by accident.
2351 		 */
2352 		break;
2353 	case SCTP_TIMER_TYPE_ASOCKILL:
2354 		/*
2355 		 * Stop the asoc kill timer.
2356 		 */
2357 		if (stcb == NULL) {
2358 			return;
2359 		}
2360 		tmr = &stcb->asoc.strreset_timer;
2361 		break;
2362 
2363 	case SCTP_TIMER_TYPE_INPKILL:
2364 		/*
2365 		 * The inp is setup to die. We re-use the signature_chage
2366 		 * timer since that has stopped and we are in the GONE
2367 		 * state.
2368 		 */
2369 		tmr = &inp->sctp_ep.signature_change;
2370 		break;
2371 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2372 		if ((stcb == NULL) || (net == NULL)) {
2373 			return;
2374 		}
2375 		tmr = &net->pmtu_timer;
2376 		break;
2377 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2378 		if ((stcb == NULL) || (net == NULL)) {
2379 			return;
2380 		}
2381 		tmr = &net->rxt_timer;
2382 		break;
2383 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2384 		if (stcb == NULL) {
2385 			return;
2386 		}
2387 		tmr = &stcb->asoc.shut_guard_timer;
2388 		break;
2389 	case SCTP_TIMER_TYPE_STRRESET:
2390 		if (stcb == NULL) {
2391 			return;
2392 		}
2393 		tmr = &stcb->asoc.strreset_timer;
2394 		break;
2395 	case SCTP_TIMER_TYPE_ASCONF:
2396 		if (stcb == NULL) {
2397 			return;
2398 		}
2399 		tmr = &stcb->asoc.asconf_timer;
2400 		break;
2401 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2402 		if (stcb == NULL) {
2403 			return;
2404 		}
2405 		tmr = &stcb->asoc.delete_prim_timer;
2406 		break;
2407 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2408 		if (stcb == NULL) {
2409 			return;
2410 		}
2411 		tmr = &stcb->asoc.autoclose_timer;
2412 		break;
2413 	default:
2414 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2415 		    __FUNCTION__, t_type);
2416 		break;
2417 	};
2418 	if (tmr == NULL) {
2419 		return;
2420 	}
2421 	if ((tmr->type != t_type) && tmr->type) {
2422 		/*
2423 		 * Ok we have a timer that is under joint use. Cookie timer
2424 		 * per chance with the SEND timer. We therefore are NOT
2425 		 * running the timer that the caller wants stopped.  So just
2426 		 * return.
2427 		 */
2428 		return;
2429 	}
2430 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2431 		stcb->asoc.num_send_timers_up--;
2432 		if (stcb->asoc.num_send_timers_up < 0) {
2433 			stcb->asoc.num_send_timers_up = 0;
2434 		}
2435 	}
2436 	tmr->self = NULL;
2437 	tmr->stopped_from = from;
2438 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2439 	return;
2440 }
2441 
2442 uint32_t
2443 sctp_calculate_len(struct mbuf *m)
2444 {
2445 	uint32_t tlen = 0;
2446 	struct mbuf *at;
2447 
2448 	at = m;
2449 	while (at) {
2450 		tlen += SCTP_BUF_LEN(at);
2451 		at = SCTP_BUF_NEXT(at);
2452 	}
2453 	return (tlen);
2454 }
2455 
2456 void
2457 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2458     struct sctp_association *asoc, uint32_t mtu)
2459 {
2460 	/*
2461 	 * Reset the P-MTU size on this association, this involves changing
2462 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2463 	 * allow the DF flag to be cleared.
2464 	 */
2465 	struct sctp_tmit_chunk *chk;
2466 	unsigned int eff_mtu, ovh;
2467 
2468 #ifdef SCTP_PRINT_FOR_B_AND_M
2469 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2470 	    inp, asoc, mtu);
2471 #endif
2472 	asoc->smallest_mtu = mtu;
2473 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2474 		ovh = SCTP_MIN_OVERHEAD;
2475 	} else {
2476 		ovh = SCTP_MIN_V4_OVERHEAD;
2477 	}
2478 	eff_mtu = mtu - ovh;
2479 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2480 
2481 		if (chk->send_size > eff_mtu) {
2482 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2483 		}
2484 	}
2485 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2486 		if (chk->send_size > eff_mtu) {
2487 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2488 		}
2489 	}
2490 }
2491 
2492 
2493 /*
2494  * given an association and starting time of the current RTT period return
2495  * RTO in number of msecs net should point to the current network
2496  */
2497 uint32_t
2498 sctp_calculate_rto(struct sctp_tcb *stcb,
2499     struct sctp_association *asoc,
2500     struct sctp_nets *net,
2501     struct timeval *told,
2502     int safe)
2503 {
2504 	/*-
2505 	 * given an association and the starting time of the current RTT
2506 	 * period (in value1/value2) return RTO in number of msecs.
2507 	 */
2508 	int calc_time = 0;
2509 	int o_calctime;
2510 	uint32_t new_rto = 0;
2511 	int first_measure = 0;
2512 	struct timeval now, then, *old;
2513 
2514 	/* Copy it out for sparc64 */
2515 	if (safe == sctp_align_unsafe_makecopy) {
2516 		old = &then;
2517 		memcpy(&then, told, sizeof(struct timeval));
2518 	} else if (safe == sctp_align_safe_nocopy) {
2519 		old = told;
2520 	} else {
2521 		/* error */
2522 		SCTP_PRINTF("Huh, bad rto calc call\n");
2523 		return (0);
2524 	}
2525 	/************************/
2526 	/* 1. calculate new RTT */
2527 	/************************/
2528 	/* get the current time */
2529 	(void)SCTP_GETTIME_TIMEVAL(&now);
2530 	/* compute the RTT value */
2531 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2532 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2533 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2534 			calc_time += (((u_long)now.tv_usec -
2535 			    (u_long)old->tv_usec) / 1000);
2536 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2537 			/* Borrow 1,000ms from current calculation */
2538 			calc_time -= 1000;
2539 			/* Add in the slop over */
2540 			calc_time += ((int)now.tv_usec / 1000);
2541 			/* Add in the pre-second ms's */
2542 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2543 		}
2544 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2545 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2546 			calc_time = ((u_long)now.tv_usec -
2547 			    (u_long)old->tv_usec) / 1000;
2548 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2549 			/* impossible .. garbage in nothing out */
2550 			goto calc_rto;
2551 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2552 			/*
2553 			 * We have to have 1 usec :-D this must be the
2554 			 * loopback.
2555 			 */
2556 			calc_time = 1;
2557 		} else {
2558 			/* impossible .. garbage in nothing out */
2559 			goto calc_rto;
2560 		}
2561 	} else {
2562 		/* Clock wrapped? */
2563 		goto calc_rto;
2564 	}
2565 	/***************************/
2566 	/* 2. update RTTVAR & SRTT */
2567 	/***************************/
2568 	net->rtt = o_calctime = calc_time;
2569 	/* this is Van Jacobson's integer version */
2570 	if (net->RTO_measured) {
2571 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2572 								 * shift=3 */
2573 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2574 			rto_logging(net, SCTP_LOG_RTTVAR);
2575 		}
2576 		net->prev_rtt = o_calctime;
2577 		net->lastsa += calc_time;	/* add 7/8th into sa when
2578 						 * shift=3 */
2579 		if (calc_time < 0) {
2580 			calc_time = -calc_time;
2581 		}
2582 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2583 									 * VAR shift=2 */
2584 		net->lastsv += calc_time;
2585 		if (net->lastsv == 0) {
2586 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2587 		}
2588 	} else {
2589 		/* First RTO measurment */
2590 		net->RTO_measured = 1;
2591 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2592 								 * shift=3 */
2593 		net->lastsv = calc_time;
2594 		if (net->lastsv == 0) {
2595 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2596 		}
2597 		first_measure = 1;
2598 		net->prev_rtt = o_calctime;
2599 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2600 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2601 		}
2602 	}
2603 calc_rto:
2604 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2605 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2606 	    (stcb->asoc.sat_network_lockout == 0)) {
2607 		stcb->asoc.sat_network = 1;
2608 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2609 		stcb->asoc.sat_network = 0;
2610 		stcb->asoc.sat_network_lockout = 1;
2611 	}
2612 	/* bound it, per C6/C7 in Section 5.3.1 */
2613 	if (new_rto < stcb->asoc.minrto) {
2614 		new_rto = stcb->asoc.minrto;
2615 	}
2616 	if (new_rto > stcb->asoc.maxrto) {
2617 		new_rto = stcb->asoc.maxrto;
2618 	}
2619 	/* we are now returning the RTO */
2620 	return (new_rto);
2621 }
2622 
2623 /*
2624  * return a pointer to a contiguous piece of data from the given mbuf chain
2625  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2626  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2627  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2628  */
2629 caddr_t
2630 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2631 {
2632 	uint32_t count;
2633 	uint8_t *ptr;
2634 
2635 	ptr = in_ptr;
2636 	if ((off < 0) || (len <= 0))
2637 		return (NULL);
2638 
2639 	/* find the desired start location */
2640 	while ((m != NULL) && (off > 0)) {
2641 		if (off < SCTP_BUF_LEN(m))
2642 			break;
2643 		off -= SCTP_BUF_LEN(m);
2644 		m = SCTP_BUF_NEXT(m);
2645 	}
2646 	if (m == NULL)
2647 		return (NULL);
2648 
2649 	/* is the current mbuf large enough (eg. contiguous)? */
2650 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2651 		return (mtod(m, caddr_t)+off);
2652 	} else {
2653 		/* else, it spans more than one mbuf, so save a temp copy... */
2654 		while ((m != NULL) && (len > 0)) {
2655 			count = min(SCTP_BUF_LEN(m) - off, len);
2656 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2657 			len -= count;
2658 			ptr += count;
2659 			off = 0;
2660 			m = SCTP_BUF_NEXT(m);
2661 		}
2662 		if ((m == NULL) && (len > 0))
2663 			return (NULL);
2664 		else
2665 			return ((caddr_t)in_ptr);
2666 	}
2667 }
2668 
2669 
2670 
2671 struct sctp_paramhdr *
2672 sctp_get_next_param(struct mbuf *m,
2673     int offset,
2674     struct sctp_paramhdr *pull,
2675     int pull_limit)
2676 {
2677 	/* This just provides a typed signature to Peter's Pull routine */
2678 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2679 	    (uint8_t *) pull));
2680 }
2681 
2682 
2683 int
2684 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2685 {
2686 	/*
2687 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2688 	 * padlen is > 3 this routine will fail.
2689 	 */
2690 	uint8_t *dp;
2691 	int i;
2692 
2693 	if (padlen > 3) {
2694 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2695 		return (ENOBUFS);
2696 	}
2697 	if (padlen <= M_TRAILINGSPACE(m)) {
2698 		/*
2699 		 * The easy way. We hope the majority of the time we hit
2700 		 * here :)
2701 		 */
2702 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2703 		SCTP_BUF_LEN(m) += padlen;
2704 	} else {
2705 		/* Hard way we must grow the mbuf */
2706 		struct mbuf *tmp;
2707 
2708 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2709 		if (tmp == NULL) {
2710 			/* Out of space GAK! we are in big trouble. */
2711 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2712 			return (ENOSPC);
2713 		}
2714 		/* setup and insert in middle */
2715 		SCTP_BUF_LEN(tmp) = padlen;
2716 		SCTP_BUF_NEXT(tmp) = NULL;
2717 		SCTP_BUF_NEXT(m) = tmp;
2718 		dp = mtod(tmp, uint8_t *);
2719 	}
2720 	/* zero out the pad */
2721 	for (i = 0; i < padlen; i++) {
2722 		*dp = 0;
2723 		dp++;
2724 	}
2725 	return (0);
2726 }
2727 
2728 int
2729 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2730 {
2731 	/* find the last mbuf in chain and pad it */
2732 	struct mbuf *m_at;
2733 
2734 	m_at = m;
2735 	if (last_mbuf) {
2736 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2737 	} else {
2738 		while (m_at) {
2739 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2740 				return (sctp_add_pad_tombuf(m_at, padval));
2741 			}
2742 			m_at = SCTP_BUF_NEXT(m_at);
2743 		}
2744 	}
2745 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2746 	return (EFAULT);
2747 }
2748 
2749 int sctp_asoc_change_wake = 0;
2750 
2751 static void
2752 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2753     uint32_t error, void *data, int so_locked
2754 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2755     SCTP_UNUSED
2756 #endif
2757 )
2758 {
2759 	struct mbuf *m_notify;
2760 	struct sctp_assoc_change *sac;
2761 	struct sctp_queued_to_read *control;
2762 
2763 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2764 	struct socket *so;
2765 
2766 #endif
2767 
2768 	/*
2769 	 * For TCP model AND UDP connected sockets we will send an error up
2770 	 * when an ABORT comes in.
2771 	 */
2772 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2773 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2774 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2775 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2776 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2777 			stcb->sctp_socket->so_error = ECONNREFUSED;
2778 		} else {
2779 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2780 			stcb->sctp_socket->so_error = ECONNRESET;
2781 		}
2782 		/* Wake ANY sleepers */
2783 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2784 		so = SCTP_INP_SO(stcb->sctp_ep);
2785 		if (!so_locked) {
2786 			atomic_add_int(&stcb->asoc.refcnt, 1);
2787 			SCTP_TCB_UNLOCK(stcb);
2788 			SCTP_SOCKET_LOCK(so, 1);
2789 			SCTP_TCB_LOCK(stcb);
2790 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2791 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2792 				SCTP_SOCKET_UNLOCK(so, 1);
2793 				return;
2794 			}
2795 		}
2796 #endif
2797 		sorwakeup(stcb->sctp_socket);
2798 		sowwakeup(stcb->sctp_socket);
2799 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2800 		if (!so_locked) {
2801 			SCTP_SOCKET_UNLOCK(so, 1);
2802 		}
2803 #endif
2804 		sctp_asoc_change_wake++;
2805 	}
2806 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2807 		/* event not enabled */
2808 		return;
2809 	}
2810 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2811 	if (m_notify == NULL)
2812 		/* no space left */
2813 		return;
2814 	SCTP_BUF_LEN(m_notify) = 0;
2815 
2816 	sac = mtod(m_notify, struct sctp_assoc_change *);
2817 	sac->sac_type = SCTP_ASSOC_CHANGE;
2818 	sac->sac_flags = 0;
2819 	sac->sac_length = sizeof(struct sctp_assoc_change);
2820 	sac->sac_state = event;
2821 	sac->sac_error = error;
2822 	/* XXX verify these stream counts */
2823 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2824 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2825 	sac->sac_assoc_id = sctp_get_associd(stcb);
2826 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2827 	SCTP_BUF_NEXT(m_notify) = NULL;
2828 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2829 	    0, 0, 0, 0, 0, 0,
2830 	    m_notify);
2831 	if (control == NULL) {
2832 		/* no memory */
2833 		sctp_m_freem(m_notify);
2834 		return;
2835 	}
2836 	control->length = SCTP_BUF_LEN(m_notify);
2837 	/* not that we need this */
2838 	control->tail_mbuf = m_notify;
2839 	control->spec_flags = M_NOTIFICATION;
2840 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2841 	    control,
2842 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
2843 	if (event == SCTP_COMM_LOST) {
2844 		/* Wake up any sleeper */
2845 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2846 		so = SCTP_INP_SO(stcb->sctp_ep);
2847 		if (!so_locked) {
2848 			atomic_add_int(&stcb->asoc.refcnt, 1);
2849 			SCTP_TCB_UNLOCK(stcb);
2850 			SCTP_SOCKET_LOCK(so, 1);
2851 			SCTP_TCB_LOCK(stcb);
2852 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2853 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2854 				SCTP_SOCKET_UNLOCK(so, 1);
2855 				return;
2856 			}
2857 		}
2858 #endif
2859 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2860 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2861 		if (!so_locked) {
2862 			SCTP_SOCKET_UNLOCK(so, 1);
2863 		}
2864 #endif
2865 	}
2866 }
2867 
2868 static void
2869 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2870     struct sockaddr *sa, uint32_t error)
2871 {
2872 	struct mbuf *m_notify;
2873 	struct sctp_paddr_change *spc;
2874 	struct sctp_queued_to_read *control;
2875 
2876 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2877 		/* event not enabled */
2878 		return;
2879 	}
2880 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2881 	if (m_notify == NULL)
2882 		return;
2883 	SCTP_BUF_LEN(m_notify) = 0;
2884 	spc = mtod(m_notify, struct sctp_paddr_change *);
2885 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2886 	spc->spc_flags = 0;
2887 	spc->spc_length = sizeof(struct sctp_paddr_change);
2888 	switch (sa->sa_family) {
2889 	case AF_INET:
2890 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2891 		break;
2892 #ifdef INET6
2893 	case AF_INET6:
2894 		{
2895 			struct sockaddr_in6 *sin6;
2896 
2897 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2898 
2899 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2900 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2901 				if (sin6->sin6_scope_id == 0) {
2902 					/* recover scope_id for user */
2903 					(void)sa6_recoverscope(sin6);
2904 				} else {
2905 					/* clear embedded scope_id for user */
2906 					in6_clearscope(&sin6->sin6_addr);
2907 				}
2908 			}
2909 			break;
2910 		}
2911 #endif
2912 	default:
2913 		/* TSNH */
2914 		break;
2915 	}
2916 	spc->spc_state = state;
2917 	spc->spc_error = error;
2918 	spc->spc_assoc_id = sctp_get_associd(stcb);
2919 
2920 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2921 	SCTP_BUF_NEXT(m_notify) = NULL;
2922 
2923 	/* append to socket */
2924 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2925 	    0, 0, 0, 0, 0, 0,
2926 	    m_notify);
2927 	if (control == NULL) {
2928 		/* no memory */
2929 		sctp_m_freem(m_notify);
2930 		return;
2931 	}
2932 	control->length = SCTP_BUF_LEN(m_notify);
2933 	control->spec_flags = M_NOTIFICATION;
2934 	/* not that we need this */
2935 	control->tail_mbuf = m_notify;
2936 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2937 	    control,
2938 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
2939 }
2940 
2941 
2942 static void
2943 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2944     struct sctp_tmit_chunk *chk, int so_locked
2945 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2946     SCTP_UNUSED
2947 #endif
2948 )
2949 {
2950 	struct mbuf *m_notify;
2951 	struct sctp_send_failed *ssf;
2952 	struct sctp_queued_to_read *control;
2953 	int length;
2954 
2955 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2956 		/* event not enabled */
2957 		return;
2958 	}
2959 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2960 	if (m_notify == NULL)
2961 		/* no space left */
2962 		return;
2963 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2964 	length -= sizeof(struct sctp_data_chunk);
2965 	SCTP_BUF_LEN(m_notify) = 0;
2966 	ssf = mtod(m_notify, struct sctp_send_failed *);
2967 	ssf->ssf_type = SCTP_SEND_FAILED;
2968 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2969 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2970 	else
2971 		ssf->ssf_flags = SCTP_DATA_SENT;
2972 	ssf->ssf_length = length;
2973 	ssf->ssf_error = error;
2974 	/* not exactly what the user sent in, but should be close :) */
2975 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2976 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2977 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2978 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2979 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2980 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2981 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2982 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2983 
2984 	SCTP_BUF_NEXT(m_notify) = chk->data;
2985 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2986 	if (chk->data) {
2987 		/*
2988 		 * trim off the sctp chunk header(it should be there)
2989 		 */
2990 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2991 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2992 			sctp_mbuf_crush(chk->data);
2993 			chk->send_size -= sizeof(struct sctp_data_chunk);
2994 		}
2995 	}
2996 	/* Steal off the mbuf */
2997 	chk->data = NULL;
2998 	/*
2999 	 * For this case, we check the actual socket buffer, since the assoc
3000 	 * is going away we don't want to overfill the socket buffer for a
3001 	 * non-reader
3002 	 */
3003 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3004 		sctp_m_freem(m_notify);
3005 		return;
3006 	}
3007 	/* append to socket */
3008 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3009 	    0, 0, 0, 0, 0, 0,
3010 	    m_notify);
3011 	if (control == NULL) {
3012 		/* no memory */
3013 		sctp_m_freem(m_notify);
3014 		return;
3015 	}
3016 	control->spec_flags = M_NOTIFICATION;
3017 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3018 	    control,
3019 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3020 }
3021 
3022 
3023 static void
3024 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3025     struct sctp_stream_queue_pending *sp, int so_locked
3026 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3027     SCTP_UNUSED
3028 #endif
3029 )
3030 {
3031 	struct mbuf *m_notify;
3032 	struct sctp_send_failed *ssf;
3033 	struct sctp_queued_to_read *control;
3034 	int length;
3035 
3036 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3037 		/* event not enabled */
3038 		return;
3039 	}
3040 	length = sizeof(struct sctp_send_failed) + sp->length;
3041 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3042 	if (m_notify == NULL)
3043 		/* no space left */
3044 		return;
3045 	SCTP_BUF_LEN(m_notify) = 0;
3046 	ssf = mtod(m_notify, struct sctp_send_failed *);
3047 	ssf->ssf_type = SCTP_SEND_FAILED;
3048 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3049 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3050 	else
3051 		ssf->ssf_flags = SCTP_DATA_SENT;
3052 	ssf->ssf_length = length;
3053 	ssf->ssf_error = error;
3054 	/* not exactly what the user sent in, but should be close :) */
3055 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3056 	ssf->ssf_info.sinfo_stream = sp->stream;
3057 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3058 	if (sp->some_taken) {
3059 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3060 	} else {
3061 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3062 	}
3063 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3064 	ssf->ssf_info.sinfo_context = sp->context;
3065 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3066 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3067 	SCTP_BUF_NEXT(m_notify) = sp->data;
3068 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3069 
3070 	/* Steal off the mbuf */
3071 	sp->data = NULL;
3072 	/*
3073 	 * For this case, we check the actual socket buffer, since the assoc
3074 	 * is going away we don't want to overfill the socket buffer for a
3075 	 * non-reader
3076 	 */
3077 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3078 		sctp_m_freem(m_notify);
3079 		return;
3080 	}
3081 	/* append to socket */
3082 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3083 	    0, 0, 0, 0, 0, 0,
3084 	    m_notify);
3085 	if (control == NULL) {
3086 		/* no memory */
3087 		sctp_m_freem(m_notify);
3088 		return;
3089 	}
3090 	control->spec_flags = M_NOTIFICATION;
3091 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3092 	    control,
3093 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3094 }
3095 
3096 
3097 
3098 static void
3099 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3100     uint32_t error)
3101 {
3102 	struct mbuf *m_notify;
3103 	struct sctp_adaptation_event *sai;
3104 	struct sctp_queued_to_read *control;
3105 
3106 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3107 		/* event not enabled */
3108 		return;
3109 	}
3110 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3111 	if (m_notify == NULL)
3112 		/* no space left */
3113 		return;
3114 	SCTP_BUF_LEN(m_notify) = 0;
3115 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3116 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3117 	sai->sai_flags = 0;
3118 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3119 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3120 	sai->sai_assoc_id = sctp_get_associd(stcb);
3121 
3122 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3123 	SCTP_BUF_NEXT(m_notify) = NULL;
3124 
3125 	/* append to socket */
3126 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3127 	    0, 0, 0, 0, 0, 0,
3128 	    m_notify);
3129 	if (control == NULL) {
3130 		/* no memory */
3131 		sctp_m_freem(m_notify);
3132 		return;
3133 	}
3134 	control->length = SCTP_BUF_LEN(m_notify);
3135 	control->spec_flags = M_NOTIFICATION;
3136 	/* not that we need this */
3137 	control->tail_mbuf = m_notify;
3138 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3139 	    control,
3140 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3141 }
3142 
3143 /* This always must be called with the read-queue LOCKED in the INP */
3144 void
3145 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3146     int nolock, uint32_t val)
3147 {
3148 	struct mbuf *m_notify;
3149 	struct sctp_pdapi_event *pdapi;
3150 	struct sctp_queued_to_read *control;
3151 	struct sockbuf *sb;
3152 
3153 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3154 		/* event not enabled */
3155 		return;
3156 	}
3157 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3158 	if (m_notify == NULL)
3159 		/* no space left */
3160 		return;
3161 	SCTP_BUF_LEN(m_notify) = 0;
3162 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3163 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3164 	pdapi->pdapi_flags = 0;
3165 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3166 	pdapi->pdapi_indication = error;
3167 	pdapi->pdapi_stream = (val >> 16);
3168 	pdapi->pdapi_seq = (val & 0x0000ffff);
3169 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3170 
3171 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3172 	SCTP_BUF_NEXT(m_notify) = NULL;
3173 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3174 	    0, 0, 0, 0, 0, 0,
3175 	    m_notify);
3176 	if (control == NULL) {
3177 		/* no memory */
3178 		sctp_m_freem(m_notify);
3179 		return;
3180 	}
3181 	control->spec_flags = M_NOTIFICATION;
3182 	control->length = SCTP_BUF_LEN(m_notify);
3183 	/* not that we need this */
3184 	control->tail_mbuf = m_notify;
3185 	control->held_length = 0;
3186 	control->length = 0;
3187 	if (nolock == 0) {
3188 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
3189 	}
3190 	sb = &stcb->sctp_socket->so_rcv;
3191 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3192 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3193 	}
3194 	sctp_sballoc(stcb, sb, m_notify);
3195 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3196 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3197 	}
3198 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3199 	control->end_added = 1;
3200 	if (stcb->asoc.control_pdapi)
3201 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3202 	else {
3203 		/* we really should not see this case */
3204 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3205 	}
3206 	if (nolock == 0) {
3207 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3208 	}
3209 	if (stcb->sctp_ep && stcb->sctp_socket) {
3210 		/* This should always be the case */
3211 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3212 	}
3213 }
3214 
3215 static void
3216 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3217 {
3218 	struct mbuf *m_notify;
3219 	struct sctp_shutdown_event *sse;
3220 	struct sctp_queued_to_read *control;
3221 
3222 	/*
3223 	 * For TCP model AND UDP connected sockets we will send an error up
3224 	 * when an SHUTDOWN completes
3225 	 */
3226 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3227 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3228 		/* mark socket closed for read/write and wakeup! */
3229 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3230 		struct socket *so;
3231 
3232 		so = SCTP_INP_SO(stcb->sctp_ep);
3233 		atomic_add_int(&stcb->asoc.refcnt, 1);
3234 		SCTP_TCB_UNLOCK(stcb);
3235 		SCTP_SOCKET_LOCK(so, 1);
3236 		SCTP_TCB_LOCK(stcb);
3237 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3238 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3239 			SCTP_SOCKET_UNLOCK(so, 1);
3240 			return;
3241 		}
3242 #endif
3243 		socantsendmore(stcb->sctp_socket);
3244 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3245 		SCTP_SOCKET_UNLOCK(so, 1);
3246 #endif
3247 	}
3248 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3249 		/* event not enabled */
3250 		return;
3251 	}
3252 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3253 	if (m_notify == NULL)
3254 		/* no space left */
3255 		return;
3256 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3257 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3258 	sse->sse_flags = 0;
3259 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3260 	sse->sse_assoc_id = sctp_get_associd(stcb);
3261 
3262 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3263 	SCTP_BUF_NEXT(m_notify) = NULL;
3264 
3265 	/* append to socket */
3266 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3267 	    0, 0, 0, 0, 0, 0,
3268 	    m_notify);
3269 	if (control == NULL) {
3270 		/* no memory */
3271 		sctp_m_freem(m_notify);
3272 		return;
3273 	}
3274 	control->spec_flags = M_NOTIFICATION;
3275 	control->length = SCTP_BUF_LEN(m_notify);
3276 	/* not that we need this */
3277 	control->tail_mbuf = m_notify;
3278 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3279 	    control,
3280 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3281 }
3282 
3283 static void
3284 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3285     int so_locked
3286 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3287     SCTP_UNUSED
3288 #endif
3289 )
3290 {
3291 	struct mbuf *m_notify;
3292 	struct sctp_sender_dry_event *event;
3293 	struct sctp_queued_to_read *control;
3294 
3295 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3296 		/* event not enabled */
3297 		return;
3298 	}
3299 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3300 	if (m_notify == NULL) {
3301 		/* no space left */
3302 		return;
3303 	}
3304 	SCTP_BUF_LEN(m_notify) = 0;
3305 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3306 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3307 	event->sender_dry_flags = 0;
3308 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3309 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3310 
3311 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3312 	SCTP_BUF_NEXT(m_notify) = NULL;
3313 
3314 	/* append to socket */
3315 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3316 	    0, 0, 0, 0, 0, 0, m_notify);
3317 	if (control == NULL) {
3318 		/* no memory */
3319 		sctp_m_freem(m_notify);
3320 		return;
3321 	}
3322 	control->length = SCTP_BUF_LEN(m_notify);
3323 	control->spec_flags = M_NOTIFICATION;
3324 	/* not that we need this */
3325 	control->tail_mbuf = m_notify;
3326 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3327 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3328 }
3329 
3330 
3331 static void
3332 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3333 {
3334 	struct mbuf *m_notify;
3335 	struct sctp_queued_to_read *control;
3336 	struct sctp_stream_reset_event *strreset;
3337 	int len;
3338 
3339 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3340 		/* event not enabled */
3341 		return;
3342 	}
3343 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3344 	if (m_notify == NULL)
3345 		/* no space left */
3346 		return;
3347 	SCTP_BUF_LEN(m_notify) = 0;
3348 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3349 	if (len > M_TRAILINGSPACE(m_notify)) {
3350 		/* never enough room */
3351 		sctp_m_freem(m_notify);
3352 		return;
3353 	}
3354 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3355 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3356 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3357 	strreset->strreset_length = len;
3358 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3359 	strreset->strreset_list[0] = number_entries;
3360 
3361 	SCTP_BUF_LEN(m_notify) = len;
3362 	SCTP_BUF_NEXT(m_notify) = NULL;
3363 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3364 		/* no space */
3365 		sctp_m_freem(m_notify);
3366 		return;
3367 	}
3368 	/* append to socket */
3369 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3370 	    0, 0, 0, 0, 0, 0,
3371 	    m_notify);
3372 	if (control == NULL) {
3373 		/* no memory */
3374 		sctp_m_freem(m_notify);
3375 		return;
3376 	}
3377 	control->spec_flags = M_NOTIFICATION;
3378 	control->length = SCTP_BUF_LEN(m_notify);
3379 	/* not that we need this */
3380 	control->tail_mbuf = m_notify;
3381 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3382 	    control,
3383 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3384 }
3385 
3386 
3387 static void
3388 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3389     int number_entries, uint16_t * list, int flag)
3390 {
3391 	struct mbuf *m_notify;
3392 	struct sctp_queued_to_read *control;
3393 	struct sctp_stream_reset_event *strreset;
3394 	int len;
3395 
3396 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3397 		/* event not enabled */
3398 		return;
3399 	}
3400 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3401 	if (m_notify == NULL)
3402 		/* no space left */
3403 		return;
3404 	SCTP_BUF_LEN(m_notify) = 0;
3405 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3406 	if (len > M_TRAILINGSPACE(m_notify)) {
3407 		/* never enough room */
3408 		sctp_m_freem(m_notify);
3409 		return;
3410 	}
3411 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3412 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3413 	if (number_entries == 0) {
3414 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3415 	} else {
3416 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3417 	}
3418 	strreset->strreset_length = len;
3419 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3420 	if (number_entries) {
3421 		int i;
3422 
3423 		for (i = 0; i < number_entries; i++) {
3424 			strreset->strreset_list[i] = ntohs(list[i]);
3425 		}
3426 	}
3427 	SCTP_BUF_LEN(m_notify) = len;
3428 	SCTP_BUF_NEXT(m_notify) = NULL;
3429 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3430 		/* no space */
3431 		sctp_m_freem(m_notify);
3432 		return;
3433 	}
3434 	/* append to socket */
3435 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3436 	    0, 0, 0, 0, 0, 0,
3437 	    m_notify);
3438 	if (control == NULL) {
3439 		/* no memory */
3440 		sctp_m_freem(m_notify);
3441 		return;
3442 	}
3443 	control->spec_flags = M_NOTIFICATION;
3444 	control->length = SCTP_BUF_LEN(m_notify);
3445 	/* not that we need this */
3446 	control->tail_mbuf = m_notify;
3447 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3448 	    control,
3449 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3450 }
3451 
3452 
3453 void
3454 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3455     uint32_t error, void *data, int so_locked
3456 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3457     SCTP_UNUSED
3458 #endif
3459 )
3460 {
3461 	if ((stcb == NULL) ||
3462 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3463 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3464 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3465 		/* If the socket is gone we are out of here */
3466 		return;
3467 	}
3468 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3469 		return;
3470 	}
3471 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3472 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3473 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3474 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3475 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3476 			/* Don't report these in front states */
3477 			return;
3478 		}
3479 	}
3480 	switch (notification) {
3481 	case SCTP_NOTIFY_ASSOC_UP:
3482 		if (stcb->asoc.assoc_up_sent == 0) {
3483 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3484 			stcb->asoc.assoc_up_sent = 1;
3485 		}
3486 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3487 			sctp_notify_adaptation_layer(stcb, error);
3488 		}
3489 		if (stcb->asoc.peer_supports_auth == 0) {
3490 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3491 			    NULL, so_locked);
3492 		}
3493 		break;
3494 	case SCTP_NOTIFY_ASSOC_DOWN:
3495 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3496 		break;
3497 	case SCTP_NOTIFY_INTERFACE_DOWN:
3498 		{
3499 			struct sctp_nets *net;
3500 
3501 			net = (struct sctp_nets *)data;
3502 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3503 			    (struct sockaddr *)&net->ro._l_addr, error);
3504 			break;
3505 		}
3506 	case SCTP_NOTIFY_INTERFACE_UP:
3507 		{
3508 			struct sctp_nets *net;
3509 
3510 			net = (struct sctp_nets *)data;
3511 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3512 			    (struct sockaddr *)&net->ro._l_addr, error);
3513 			break;
3514 		}
3515 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3516 		{
3517 			struct sctp_nets *net;
3518 
3519 			net = (struct sctp_nets *)data;
3520 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3521 			    (struct sockaddr *)&net->ro._l_addr, error);
3522 			break;
3523 		}
3524 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3525 		sctp_notify_send_failed2(stcb, error,
3526 		    (struct sctp_stream_queue_pending *)data, so_locked);
3527 		break;
3528 	case SCTP_NOTIFY_DG_FAIL:
3529 		sctp_notify_send_failed(stcb, error,
3530 		    (struct sctp_tmit_chunk *)data, so_locked);
3531 		break;
3532 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3533 		{
3534 			uint32_t val;
3535 
3536 			val = *((uint32_t *) data);
3537 
3538 			sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3539 		}
3540 		break;
3541 	case SCTP_NOTIFY_STRDATA_ERR:
3542 		break;
3543 	case SCTP_NOTIFY_ASSOC_ABORTED:
3544 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3545 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3546 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3547 		} else {
3548 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3549 		}
3550 		break;
3551 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3552 		break;
3553 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3554 		break;
3555 	case SCTP_NOTIFY_ASSOC_RESTART:
3556 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3557 		if (stcb->asoc.peer_supports_auth == 0) {
3558 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3559 			    NULL, so_locked);
3560 		}
3561 		break;
3562 	case SCTP_NOTIFY_HB_RESP:
3563 		break;
3564 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3565 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3566 		break;
3567 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3568 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3569 		break;
3570 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3571 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3572 		break;
3573 
3574 	case SCTP_NOTIFY_STR_RESET_SEND:
3575 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3576 		break;
3577 	case SCTP_NOTIFY_STR_RESET_RECV:
3578 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3579 		break;
3580 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3581 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3582 		break;
3583 
3584 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3585 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3586 		break;
3587 
3588 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3589 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3590 		    error);
3591 		break;
3592 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3593 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3594 		    error);
3595 		break;
3596 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3597 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3598 		    error);
3599 		break;
3600 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3601 		break;
3602 	case SCTP_NOTIFY_ASCONF_FAILED:
3603 		break;
3604 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3605 		sctp_notify_shutdown_event(stcb);
3606 		break;
3607 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3608 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3609 		    (uint16_t) (uintptr_t) data,
3610 		    so_locked);
3611 		break;
3612 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3613 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3614 		    (uint16_t) (uintptr_t) data,
3615 		    so_locked);
3616 		break;
3617 	case SCTP_NOTIFY_NO_PEER_AUTH:
3618 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3619 		    (uint16_t) (uintptr_t) data,
3620 		    so_locked);
3621 		break;
3622 	case SCTP_NOTIFY_SENDER_DRY:
3623 		sctp_notify_sender_dry_event(stcb, so_locked);
3624 		break;
3625 	default:
3626 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3627 		    __FUNCTION__, notification, notification);
3628 		break;
3629 	}			/* end switch */
3630 }
3631 
3632 void
3633 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3634 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3635     SCTP_UNUSED
3636 #endif
3637 )
3638 {
3639 	struct sctp_association *asoc;
3640 	struct sctp_stream_out *outs;
3641 	struct sctp_tmit_chunk *chk;
3642 	struct sctp_stream_queue_pending *sp;
3643 	int i;
3644 
3645 	asoc = &stcb->asoc;
3646 
3647 	if (stcb == NULL) {
3648 		return;
3649 	}
3650 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3651 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3652 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3653 		return;
3654 	}
3655 	/* now through all the gunk freeing chunks */
3656 	if (holds_lock == 0) {
3657 		SCTP_TCB_SEND_LOCK(stcb);
3658 	}
3659 	/* sent queue SHOULD be empty */
3660 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3661 		chk = TAILQ_FIRST(&asoc->sent_queue);
3662 		while (chk) {
3663 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3664 			asoc->sent_queue_cnt--;
3665 			if (chk->data != NULL) {
3666 				sctp_free_bufspace(stcb, asoc, chk, 1);
3667 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3668 				    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3669 				sctp_m_freem(chk->data);
3670 				chk->data = NULL;
3671 			}
3672 			sctp_free_a_chunk(stcb, chk);
3673 			/* sa_ignore FREED_MEMORY */
3674 			chk = TAILQ_FIRST(&asoc->sent_queue);
3675 		}
3676 	}
3677 	/* pending send queue SHOULD be empty */
3678 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3679 		chk = TAILQ_FIRST(&asoc->send_queue);
3680 		while (chk) {
3681 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3682 			asoc->send_queue_cnt--;
3683 			if (chk->data != NULL) {
3684 				sctp_free_bufspace(stcb, asoc, chk, 1);
3685 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3686 				    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3687 				sctp_m_freem(chk->data);
3688 				chk->data = NULL;
3689 			}
3690 			sctp_free_a_chunk(stcb, chk);
3691 			/* sa_ignore FREED_MEMORY */
3692 			chk = TAILQ_FIRST(&asoc->send_queue);
3693 		}
3694 	}
3695 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3696 		/* For each stream */
3697 		outs = &stcb->asoc.strmout[i];
3698 		/* clean up any sends there */
3699 		stcb->asoc.locked_on_sending = NULL;
3700 		sp = TAILQ_FIRST(&outs->outqueue);
3701 		while (sp) {
3702 			stcb->asoc.stream_queue_cnt--;
3703 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3704 			sctp_free_spbufspace(stcb, asoc, sp);
3705 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3706 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3707 			if (sp->data) {
3708 				sctp_m_freem(sp->data);
3709 				sp->data = NULL;
3710 			}
3711 			if (sp->net)
3712 				sctp_free_remote_addr(sp->net);
3713 			sp->net = NULL;
3714 			/* Free the chunk */
3715 			sctp_free_a_strmoq(stcb, sp);
3716 			/* sa_ignore FREED_MEMORY */
3717 			sp = TAILQ_FIRST(&outs->outqueue);
3718 		}
3719 	}
3720 
3721 	if (holds_lock == 0) {
3722 		SCTP_TCB_SEND_UNLOCK(stcb);
3723 	}
3724 }
3725 
3726 void
3727 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3728 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3729     SCTP_UNUSED
3730 #endif
3731 )
3732 {
3733 
3734 	if (stcb == NULL) {
3735 		return;
3736 	}
3737 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3738 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3739 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3740 		return;
3741 	}
3742 	/* Tell them we lost the asoc */
3743 	sctp_report_all_outbound(stcb, 1, so_locked);
3744 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3745 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3746 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3747 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3748 	}
3749 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3750 }
3751 
3752 void
3753 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3754     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3755     uint32_t vrf_id, uint16_t port)
3756 {
3757 	uint32_t vtag;
3758 
3759 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3760 	struct socket *so;
3761 
3762 #endif
3763 
3764 	vtag = 0;
3765 	if (stcb != NULL) {
3766 		/* We have a TCB to abort, send notification too */
3767 		vtag = stcb->asoc.peer_vtag;
3768 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3769 		/* get the assoc vrf id and table id */
3770 		vrf_id = stcb->asoc.vrf_id;
3771 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3772 	}
3773 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3774 	if (stcb != NULL) {
3775 		/* Ok, now lets free it */
3776 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3777 		so = SCTP_INP_SO(inp);
3778 		atomic_add_int(&stcb->asoc.refcnt, 1);
3779 		SCTP_TCB_UNLOCK(stcb);
3780 		SCTP_SOCKET_LOCK(so, 1);
3781 		SCTP_TCB_LOCK(stcb);
3782 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3783 #endif
3784 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3785 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3786 		SCTP_SOCKET_UNLOCK(so, 1);
3787 #endif
3788 	} else {
3789 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3790 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3791 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3792 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3793 			}
3794 		}
3795 	}
3796 }
3797 
3798 #ifdef SCTP_ASOCLOG_OF_TSNS
3799 void
3800 sctp_print_out_track_log(struct sctp_tcb *stcb)
3801 {
3802 #ifdef NOSIY_PRINTS
3803 	int i;
3804 
3805 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3806 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3807 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3808 		SCTP_PRINTF("None rcvd\n");
3809 		goto none_in;
3810 	}
3811 	if (stcb->asoc.tsn_in_wrapped) {
3812 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3813 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3814 			    stcb->asoc.in_tsnlog[i].tsn,
3815 			    stcb->asoc.in_tsnlog[i].strm,
3816 			    stcb->asoc.in_tsnlog[i].seq,
3817 			    stcb->asoc.in_tsnlog[i].flgs,
3818 			    stcb->asoc.in_tsnlog[i].sz);
3819 		}
3820 	}
3821 	if (stcb->asoc.tsn_in_at) {
3822 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3823 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3824 			    stcb->asoc.in_tsnlog[i].tsn,
3825 			    stcb->asoc.in_tsnlog[i].strm,
3826 			    stcb->asoc.in_tsnlog[i].seq,
3827 			    stcb->asoc.in_tsnlog[i].flgs,
3828 			    stcb->asoc.in_tsnlog[i].sz);
3829 		}
3830 	}
3831 none_in:
3832 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3833 	if ((stcb->asoc.tsn_out_at == 0) &&
3834 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3835 		SCTP_PRINTF("None sent\n");
3836 	}
3837 	if (stcb->asoc.tsn_out_wrapped) {
3838 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3839 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3840 			    stcb->asoc.out_tsnlog[i].tsn,
3841 			    stcb->asoc.out_tsnlog[i].strm,
3842 			    stcb->asoc.out_tsnlog[i].seq,
3843 			    stcb->asoc.out_tsnlog[i].flgs,
3844 			    stcb->asoc.out_tsnlog[i].sz);
3845 		}
3846 	}
3847 	if (stcb->asoc.tsn_out_at) {
3848 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3849 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3850 			    stcb->asoc.out_tsnlog[i].tsn,
3851 			    stcb->asoc.out_tsnlog[i].strm,
3852 			    stcb->asoc.out_tsnlog[i].seq,
3853 			    stcb->asoc.out_tsnlog[i].flgs,
3854 			    stcb->asoc.out_tsnlog[i].sz);
3855 		}
3856 	}
3857 #endif
3858 }
3859 
3860 #endif
3861 
3862 void
3863 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3864     int error, struct mbuf *op_err,
3865     int so_locked
3866 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3867     SCTP_UNUSED
3868 #endif
3869 )
3870 {
3871 	uint32_t vtag;
3872 
3873 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3874 	struct socket *so;
3875 
3876 #endif
3877 
3878 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3879 	so = SCTP_INP_SO(inp);
3880 #endif
3881 	if (stcb == NULL) {
3882 		/* Got to have a TCB */
3883 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3884 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3885 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3886 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3887 			}
3888 		}
3889 		return;
3890 	} else {
3891 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3892 	}
3893 	vtag = stcb->asoc.peer_vtag;
3894 	/* notify the ulp */
3895 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3896 		sctp_abort_notification(stcb, error, so_locked);
3897 	/* notify the peer */
3898 #if defined(SCTP_PANIC_ON_ABORT)
3899 	panic("aborting an association");
3900 #endif
3901 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3902 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3903 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3904 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3905 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3906 	}
3907 	/* now free the asoc */
3908 #ifdef SCTP_ASOCLOG_OF_TSNS
3909 	sctp_print_out_track_log(stcb);
3910 #endif
3911 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3912 	if (!so_locked) {
3913 		atomic_add_int(&stcb->asoc.refcnt, 1);
3914 		SCTP_TCB_UNLOCK(stcb);
3915 		SCTP_SOCKET_LOCK(so, 1);
3916 		SCTP_TCB_LOCK(stcb);
3917 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3918 	}
3919 #endif
3920 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3921 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3922 	if (!so_locked) {
3923 		SCTP_SOCKET_UNLOCK(so, 1);
3924 	}
3925 #endif
3926 }
3927 
3928 void
3929 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3930     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3931 {
3932 	struct sctp_chunkhdr *ch, chunk_buf;
3933 	unsigned int chk_length;
3934 
3935 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3936 	/* Generate a TO address for future reference */
3937 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3938 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3939 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3940 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3941 		}
3942 	}
3943 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3944 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3945 	while (ch != NULL) {
3946 		chk_length = ntohs(ch->chunk_length);
3947 		if (chk_length < sizeof(*ch)) {
3948 			/* break to abort land */
3949 			break;
3950 		}
3951 		switch (ch->chunk_type) {
3952 		case SCTP_COOKIE_ECHO:
3953 			/* We hit here only if the assoc is being freed */
3954 			return;
3955 		case SCTP_PACKET_DROPPED:
3956 			/* we don't respond to pkt-dropped */
3957 			return;
3958 		case SCTP_ABORT_ASSOCIATION:
3959 			/* we don't respond with an ABORT to an ABORT */
3960 			return;
3961 		case SCTP_SHUTDOWN_COMPLETE:
3962 			/*
3963 			 * we ignore it since we are not waiting for it and
3964 			 * peer is gone
3965 			 */
3966 			return;
3967 		case SCTP_SHUTDOWN_ACK:
3968 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
3969 			return;
3970 		default:
3971 			break;
3972 		}
3973 		offset += SCTP_SIZE32(chk_length);
3974 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3975 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3976 	}
3977 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
3978 }
3979 
3980 /*
3981  * check the inbound datagram to make sure there is not an abort inside it,
3982  * if there is return 1, else return 0.
3983  */
3984 int
3985 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3986 {
3987 	struct sctp_chunkhdr *ch;
3988 	struct sctp_init_chunk *init_chk, chunk_buf;
3989 	int offset;
3990 	unsigned int chk_length;
3991 
3992 	offset = iphlen + sizeof(struct sctphdr);
3993 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3994 	    (uint8_t *) & chunk_buf);
3995 	while (ch != NULL) {
3996 		chk_length = ntohs(ch->chunk_length);
3997 		if (chk_length < sizeof(*ch)) {
3998 			/* packet is probably corrupt */
3999 			break;
4000 		}
4001 		/* we seem to be ok, is it an abort? */
4002 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4003 			/* yep, tell them */
4004 			return (1);
4005 		}
4006 		if (ch->chunk_type == SCTP_INITIATION) {
4007 			/* need to update the Vtag */
4008 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4009 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4010 			if (init_chk != NULL) {
4011 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4012 			}
4013 		}
4014 		/* Nope, move to the next chunk */
4015 		offset += SCTP_SIZE32(chk_length);
4016 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4017 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4018 	}
4019 	return (0);
4020 }
4021 
4022 /*
4023  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4024  * set (i.e. it's 0) so, create this function to compare link local scopes
4025  */
4026 #ifdef INET6
4027 uint32_t
4028 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4029 {
4030 	struct sockaddr_in6 a, b;
4031 
4032 	/* save copies */
4033 	a = *addr1;
4034 	b = *addr2;
4035 
4036 	if (a.sin6_scope_id == 0)
4037 		if (sa6_recoverscope(&a)) {
4038 			/* can't get scope, so can't match */
4039 			return (0);
4040 		}
4041 	if (b.sin6_scope_id == 0)
4042 		if (sa6_recoverscope(&b)) {
4043 			/* can't get scope, so can't match */
4044 			return (0);
4045 		}
4046 	if (a.sin6_scope_id != b.sin6_scope_id)
4047 		return (0);
4048 
4049 	return (1);
4050 }
4051 
4052 /*
4053  * returns a sockaddr_in6 with embedded scope recovered and removed
4054  */
4055 struct sockaddr_in6 *
4056 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4057 {
4058 	/* check and strip embedded scope junk */
4059 	if (addr->sin6_family == AF_INET6) {
4060 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4061 			if (addr->sin6_scope_id == 0) {
4062 				*store = *addr;
4063 				if (!sa6_recoverscope(store)) {
4064 					/* use the recovered scope */
4065 					addr = store;
4066 				}
4067 			} else {
4068 				/* else, return the original "to" addr */
4069 				in6_clearscope(&addr->sin6_addr);
4070 			}
4071 		}
4072 	}
4073 	return (addr);
4074 }
4075 
4076 #endif
4077 
4078 /*
4079  * are the two addresses the same?  currently a "scopeless" check returns: 1
4080  * if same, 0 if not
4081  */
4082 int
4083 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4084 {
4085 
4086 	/* must be valid */
4087 	if (sa1 == NULL || sa2 == NULL)
4088 		return (0);
4089 
4090 	/* must be the same family */
4091 	if (sa1->sa_family != sa2->sa_family)
4092 		return (0);
4093 
4094 	switch (sa1->sa_family) {
4095 #ifdef INET6
4096 	case AF_INET6:
4097 		{
4098 			/* IPv6 addresses */
4099 			struct sockaddr_in6 *sin6_1, *sin6_2;
4100 
4101 			sin6_1 = (struct sockaddr_in6 *)sa1;
4102 			sin6_2 = (struct sockaddr_in6 *)sa2;
4103 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4104 			    sin6_2));
4105 		}
4106 #endif
4107 	case AF_INET:
4108 		{
4109 			/* IPv4 addresses */
4110 			struct sockaddr_in *sin_1, *sin_2;
4111 
4112 			sin_1 = (struct sockaddr_in *)sa1;
4113 			sin_2 = (struct sockaddr_in *)sa2;
4114 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4115 		}
4116 	default:
4117 		/* we don't do these... */
4118 		return (0);
4119 	}
4120 }
4121 
4122 void
4123 sctp_print_address(struct sockaddr *sa)
4124 {
4125 #ifdef INET6
4126 	char ip6buf[INET6_ADDRSTRLEN];
4127 
4128 	ip6buf[0] = 0;
4129 #endif
4130 
4131 	switch (sa->sa_family) {
4132 #ifdef INET6
4133 	case AF_INET6:
4134 		{
4135 			struct sockaddr_in6 *sin6;
4136 
4137 			sin6 = (struct sockaddr_in6 *)sa;
4138 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4139 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4140 			    ntohs(sin6->sin6_port),
4141 			    sin6->sin6_scope_id);
4142 			break;
4143 		}
4144 #endif
4145 	case AF_INET:
4146 		{
4147 			struct sockaddr_in *sin;
4148 			unsigned char *p;
4149 
4150 			sin = (struct sockaddr_in *)sa;
4151 			p = (unsigned char *)&sin->sin_addr;
4152 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4153 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4154 			break;
4155 		}
4156 	default:
4157 		SCTP_PRINTF("?\n");
4158 		break;
4159 	}
4160 }
4161 
4162 void
4163 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4164 {
4165 	switch (iph->ip_v) {
4166 		case IPVERSION:
4167 		{
4168 			struct sockaddr_in lsa, fsa;
4169 
4170 			bzero(&lsa, sizeof(lsa));
4171 			lsa.sin_len = sizeof(lsa);
4172 			lsa.sin_family = AF_INET;
4173 			lsa.sin_addr = iph->ip_src;
4174 			lsa.sin_port = sh->src_port;
4175 			bzero(&fsa, sizeof(fsa));
4176 			fsa.sin_len = sizeof(fsa);
4177 			fsa.sin_family = AF_INET;
4178 			fsa.sin_addr = iph->ip_dst;
4179 			fsa.sin_port = sh->dest_port;
4180 			SCTP_PRINTF("src: ");
4181 			sctp_print_address((struct sockaddr *)&lsa);
4182 			SCTP_PRINTF("dest: ");
4183 			sctp_print_address((struct sockaddr *)&fsa);
4184 			break;
4185 		}
4186 #ifdef INET6
4187 	case IPV6_VERSION >> 4:
4188 		{
4189 			struct ip6_hdr *ip6;
4190 			struct sockaddr_in6 lsa6, fsa6;
4191 
4192 			ip6 = (struct ip6_hdr *)iph;
4193 			bzero(&lsa6, sizeof(lsa6));
4194 			lsa6.sin6_len = sizeof(lsa6);
4195 			lsa6.sin6_family = AF_INET6;
4196 			lsa6.sin6_addr = ip6->ip6_src;
4197 			lsa6.sin6_port = sh->src_port;
4198 			bzero(&fsa6, sizeof(fsa6));
4199 			fsa6.sin6_len = sizeof(fsa6);
4200 			fsa6.sin6_family = AF_INET6;
4201 			fsa6.sin6_addr = ip6->ip6_dst;
4202 			fsa6.sin6_port = sh->dest_port;
4203 			SCTP_PRINTF("src: ");
4204 			sctp_print_address((struct sockaddr *)&lsa6);
4205 			SCTP_PRINTF("dest: ");
4206 			sctp_print_address((struct sockaddr *)&fsa6);
4207 			break;
4208 		}
4209 #endif
4210 	default:
4211 		/* TSNH */
4212 		break;
4213 	}
4214 }
4215 
4216 void
4217 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4218     struct sctp_inpcb *new_inp,
4219     struct sctp_tcb *stcb,
4220     int waitflags)
4221 {
4222 	/*
4223 	 * go through our old INP and pull off any control structures that
4224 	 * belong to stcb and move then to the new inp.
4225 	 */
4226 	struct socket *old_so, *new_so;
4227 	struct sctp_queued_to_read *control, *nctl;
4228 	struct sctp_readhead tmp_queue;
4229 	struct mbuf *m;
4230 	int error = 0;
4231 
4232 	old_so = old_inp->sctp_socket;
4233 	new_so = new_inp->sctp_socket;
4234 	TAILQ_INIT(&tmp_queue);
4235 	error = sblock(&old_so->so_rcv, waitflags);
4236 	if (error) {
4237 		/*
4238 		 * Gak, can't get sblock, we have a problem. data will be
4239 		 * left stranded.. and we don't dare look at it since the
4240 		 * other thread may be reading something. Oh well, its a
4241 		 * screwed up app that does a peeloff OR a accept while
4242 		 * reading from the main socket... actually its only the
4243 		 * peeloff() case, since I think read will fail on a
4244 		 * listening socket..
4245 		 */
4246 		return;
4247 	}
4248 	/* lock the socket buffers */
4249 	SCTP_INP_READ_LOCK(old_inp);
4250 	control = TAILQ_FIRST(&old_inp->read_queue);
4251 	/* Pull off all for out target stcb */
4252 	while (control) {
4253 		nctl = TAILQ_NEXT(control, next);
4254 		if (control->stcb == stcb) {
4255 			/* remove it we want it */
4256 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4257 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4258 			m = control->data;
4259 			while (m) {
4260 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4261 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4262 				}
4263 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4264 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4265 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4266 				}
4267 				m = SCTP_BUF_NEXT(m);
4268 			}
4269 		}
4270 		control = nctl;
4271 	}
4272 	SCTP_INP_READ_UNLOCK(old_inp);
4273 	/* Remove the sb-lock on the old socket */
4274 
4275 	sbunlock(&old_so->so_rcv);
4276 	/* Now we move them over to the new socket buffer */
4277 	control = TAILQ_FIRST(&tmp_queue);
4278 	SCTP_INP_READ_LOCK(new_inp);
4279 	while (control) {
4280 		nctl = TAILQ_NEXT(control, next);
4281 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4282 		m = control->data;
4283 		while (m) {
4284 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4285 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4286 			}
4287 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4288 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4289 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4290 			}
4291 			m = SCTP_BUF_NEXT(m);
4292 		}
4293 		control = nctl;
4294 	}
4295 	SCTP_INP_READ_UNLOCK(new_inp);
4296 }
4297 
4298 void
4299 sctp_add_to_readq(struct sctp_inpcb *inp,
4300     struct sctp_tcb *stcb,
4301     struct sctp_queued_to_read *control,
4302     struct sockbuf *sb,
4303     int end,
4304     int so_locked
4305 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4306     SCTP_UNUSED
4307 #endif
4308 )
4309 {
4310 	/*
4311 	 * Here we must place the control on the end of the socket read
4312 	 * queue AND increment sb_cc so that select will work properly on
4313 	 * read.
4314 	 */
4315 	struct mbuf *m, *prev = NULL;
4316 
4317 	if (inp == NULL) {
4318 		/* Gak, TSNH!! */
4319 #ifdef INVARIANTS
4320 		panic("Gak, inp NULL on add_to_readq");
4321 #endif
4322 		return;
4323 	}
4324 	SCTP_INP_READ_LOCK(inp);
4325 	if (!(control->spec_flags & M_NOTIFICATION)) {
4326 		atomic_add_int(&inp->total_recvs, 1);
4327 		if (!control->do_not_ref_stcb) {
4328 			atomic_add_int(&stcb->total_recvs, 1);
4329 		}
4330 	}
4331 	m = control->data;
4332 	control->held_length = 0;
4333 	control->length = 0;
4334 	while (m) {
4335 		if (SCTP_BUF_LEN(m) == 0) {
4336 			/* Skip mbufs with NO length */
4337 			if (prev == NULL) {
4338 				/* First one */
4339 				control->data = sctp_m_free(m);
4340 				m = control->data;
4341 			} else {
4342 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4343 				m = SCTP_BUF_NEXT(prev);
4344 			}
4345 			if (m == NULL) {
4346 				control->tail_mbuf = prev;;
4347 			}
4348 			continue;
4349 		}
4350 		prev = m;
4351 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4352 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4353 		}
4354 		sctp_sballoc(stcb, sb, m);
4355 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4356 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4357 		}
4358 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4359 		m = SCTP_BUF_NEXT(m);
4360 	}
4361 	if (prev != NULL) {
4362 		control->tail_mbuf = prev;
4363 	} else {
4364 		/* Everything got collapsed out?? */
4365 		return;
4366 	}
4367 	if (end) {
4368 		control->end_added = 1;
4369 	}
4370 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4371 	SCTP_INP_READ_UNLOCK(inp);
4372 	if (inp && inp->sctp_socket) {
4373 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4374 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4375 		} else {
4376 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4377 			struct socket *so;
4378 
4379 			so = SCTP_INP_SO(inp);
4380 			if (!so_locked) {
4381 				atomic_add_int(&stcb->asoc.refcnt, 1);
4382 				SCTP_TCB_UNLOCK(stcb);
4383 				SCTP_SOCKET_LOCK(so, 1);
4384 				SCTP_TCB_LOCK(stcb);
4385 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4386 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4387 					SCTP_SOCKET_UNLOCK(so, 1);
4388 					return;
4389 				}
4390 			}
4391 #endif
4392 			sctp_sorwakeup(inp, inp->sctp_socket);
4393 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4394 			if (!so_locked) {
4395 				SCTP_SOCKET_UNLOCK(so, 1);
4396 			}
4397 #endif
4398 		}
4399 	}
4400 }
4401 
4402 
4403 int
4404 sctp_append_to_readq(struct sctp_inpcb *inp,
4405     struct sctp_tcb *stcb,
4406     struct sctp_queued_to_read *control,
4407     struct mbuf *m,
4408     int end,
4409     int ctls_cumack,
4410     struct sockbuf *sb)
4411 {
4412 	/*
4413 	 * A partial delivery API event is underway. OR we are appending on
4414 	 * the reassembly queue.
4415 	 *
4416 	 * If PDAPI this means we need to add m to the end of the data.
4417 	 * Increase the length in the control AND increment the sb_cc.
4418 	 * Otherwise sb is NULL and all we need to do is put it at the end
4419 	 * of the mbuf chain.
4420 	 */
4421 	int len = 0;
4422 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4423 
4424 	if (inp) {
4425 		SCTP_INP_READ_LOCK(inp);
4426 	}
4427 	if (control == NULL) {
4428 get_out:
4429 		if (inp) {
4430 			SCTP_INP_READ_UNLOCK(inp);
4431 		}
4432 		return (-1);
4433 	}
4434 	if (control->end_added) {
4435 		/* huh this one is complete? */
4436 		goto get_out;
4437 	}
4438 	mm = m;
4439 	if (mm == NULL) {
4440 		goto get_out;
4441 	}
4442 	while (mm) {
4443 		if (SCTP_BUF_LEN(mm) == 0) {
4444 			/* Skip mbufs with NO lenght */
4445 			if (prev == NULL) {
4446 				/* First one */
4447 				m = sctp_m_free(mm);
4448 				mm = m;
4449 			} else {
4450 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4451 				mm = SCTP_BUF_NEXT(prev);
4452 			}
4453 			continue;
4454 		}
4455 		prev = mm;
4456 		len += SCTP_BUF_LEN(mm);
4457 		if (sb) {
4458 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4459 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4460 			}
4461 			sctp_sballoc(stcb, sb, mm);
4462 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4463 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4464 			}
4465 		}
4466 		mm = SCTP_BUF_NEXT(mm);
4467 	}
4468 	if (prev) {
4469 		tail = prev;
4470 	} else {
4471 		/* Really there should always be a prev */
4472 		if (m == NULL) {
4473 			/* Huh nothing left? */
4474 #ifdef INVARIANTS
4475 			panic("Nothing left to add?");
4476 #else
4477 			goto get_out;
4478 #endif
4479 		}
4480 		tail = m;
4481 	}
4482 	if (control->tail_mbuf) {
4483 		/* append */
4484 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4485 		control->tail_mbuf = tail;
4486 	} else {
4487 		/* nothing there */
4488 #ifdef INVARIANTS
4489 		if (control->data != NULL) {
4490 			panic("This should NOT happen");
4491 		}
4492 #endif
4493 		control->data = m;
4494 		control->tail_mbuf = tail;
4495 	}
4496 	atomic_add_int(&control->length, len);
4497 	if (end) {
4498 		/* message is complete */
4499 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4500 			stcb->asoc.control_pdapi = NULL;
4501 		}
4502 		control->held_length = 0;
4503 		control->end_added = 1;
4504 	}
4505 	if (stcb == NULL) {
4506 		control->do_not_ref_stcb = 1;
4507 	}
4508 	/*
4509 	 * When we are appending in partial delivery, the cum-ack is used
4510 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4511 	 * is populated in the outbound sinfo structure from the true cumack
4512 	 * if the association exists...
4513 	 */
4514 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4515 	if (inp) {
4516 		SCTP_INP_READ_UNLOCK(inp);
4517 	}
4518 	if (inp && inp->sctp_socket) {
4519 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4520 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4521 		} else {
4522 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4523 			struct socket *so;
4524 
4525 			so = SCTP_INP_SO(inp);
4526 			atomic_add_int(&stcb->asoc.refcnt, 1);
4527 			SCTP_TCB_UNLOCK(stcb);
4528 			SCTP_SOCKET_LOCK(so, 1);
4529 			SCTP_TCB_LOCK(stcb);
4530 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4531 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4532 				SCTP_SOCKET_UNLOCK(so, 1);
4533 				return (0);
4534 			}
4535 #endif
4536 			sctp_sorwakeup(inp, inp->sctp_socket);
4537 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4538 			SCTP_SOCKET_UNLOCK(so, 1);
4539 #endif
4540 		}
4541 	}
4542 	return (0);
4543 }
4544 
4545 
4546 
4547 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4548  *************ALTERNATE ROUTING CODE
4549  */
4550 
4551 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4552  *************ALTERNATE ROUTING CODE
4553  */
4554 
4555 struct mbuf *
4556 sctp_generate_invmanparam(int err)
4557 {
4558 	/* Return a MBUF with a invalid mandatory parameter */
4559 	struct mbuf *m;
4560 
4561 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4562 	if (m) {
4563 		struct sctp_paramhdr *ph;
4564 
4565 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4566 		ph = mtod(m, struct sctp_paramhdr *);
4567 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4568 		ph->param_type = htons(err);
4569 	}
4570 	return (m);
4571 }
4572 
4573 #ifdef SCTP_MBCNT_LOGGING
4574 void
4575 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4576     struct sctp_tmit_chunk *tp1, int chk_cnt)
4577 {
4578 	if (tp1->data == NULL) {
4579 		return;
4580 	}
4581 	asoc->chunks_on_out_queue -= chk_cnt;
4582 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4583 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4584 		    asoc->total_output_queue_size,
4585 		    tp1->book_size,
4586 		    0,
4587 		    tp1->mbcnt);
4588 	}
4589 	if (asoc->total_output_queue_size >= tp1->book_size) {
4590 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4591 	} else {
4592 		asoc->total_output_queue_size = 0;
4593 	}
4594 
4595 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4596 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4597 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4598 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4599 		} else {
4600 			stcb->sctp_socket->so_snd.sb_cc = 0;
4601 
4602 		}
4603 	}
4604 }
4605 
4606 #endif
4607 
4608 int
4609 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4610     int reason, int so_locked
4611 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4612     SCTP_UNUSED
4613 #endif
4614 )
4615 {
4616 	struct sctp_stream_out *strq;
4617 	struct sctp_tmit_chunk *chk = NULL;
4618 	struct sctp_stream_queue_pending *sp;
4619 	uint16_t stream = 0, seq = 0;
4620 	uint8_t foundeom = 0;
4621 	int ret_sz = 0;
4622 	int notdone;
4623 	int do_wakeup_routine = 0;
4624 
4625 	stream = tp1->rec.data.stream_number;
4626 	seq = tp1->rec.data.stream_seq;
4627 	do {
4628 		ret_sz += tp1->book_size;
4629 		if (tp1->data != NULL) {
4630 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4631 				sctp_flight_size_decrease(tp1);
4632 				sctp_total_flight_decrease(stcb, tp1);
4633 			}
4634 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4635 			stcb->asoc.peers_rwnd += tp1->send_size;
4636 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4637 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4638 			sctp_m_freem(tp1->data);
4639 			tp1->data = NULL;
4640 			do_wakeup_routine = 1;
4641 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4642 				stcb->asoc.sent_queue_cnt_removeable--;
4643 			}
4644 		}
4645 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4646 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4647 		    SCTP_DATA_NOT_FRAG) {
4648 			/* not frag'ed we ae done   */
4649 			notdone = 0;
4650 			foundeom = 1;
4651 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4652 			/* end of frag, we are done */
4653 			notdone = 0;
4654 			foundeom = 1;
4655 		} else {
4656 			/*
4657 			 * Its a begin or middle piece, we must mark all of
4658 			 * it
4659 			 */
4660 			notdone = 1;
4661 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4662 		}
4663 	} while (tp1 && notdone);
4664 	if (foundeom == 0) {
4665 		/*
4666 		 * The multi-part message was scattered across the send and
4667 		 * sent queue.
4668 		 */
4669 next_on_sent:
4670 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4671 		/*
4672 		 * recurse throught the send_queue too, starting at the
4673 		 * beginning.
4674 		 */
4675 		if ((tp1) &&
4676 		    (tp1->rec.data.stream_number == stream) &&
4677 		    (tp1->rec.data.stream_seq == seq)
4678 		    ) {
4679 			/*
4680 			 * save to chk in case we have some on stream out
4681 			 * queue. If so and we have an un-transmitted one we
4682 			 * don't have to fudge the TSN.
4683 			 */
4684 			chk = tp1;
4685 			ret_sz += tp1->book_size;
4686 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4687 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4688 			sctp_m_freem(tp1->data);
4689 			/* No flight involved here book the size to 0 */
4690 			tp1->book_size = 0;
4691 			tp1->data = NULL;
4692 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4693 				foundeom = 1;
4694 			}
4695 			do_wakeup_routine = 1;
4696 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4697 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4698 			/*
4699 			 * on to the sent queue so we can wait for it to be
4700 			 * passed by.
4701 			 */
4702 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4703 			    sctp_next);
4704 			stcb->asoc.send_queue_cnt--;
4705 			stcb->asoc.sent_queue_cnt++;
4706 			goto next_on_sent;
4707 		}
4708 	}
4709 	if (foundeom == 0) {
4710 		/*
4711 		 * Still no eom found. That means there is stuff left on the
4712 		 * stream out queue.. yuck.
4713 		 */
4714 		strq = &stcb->asoc.strmout[stream];
4715 		SCTP_TCB_SEND_LOCK(stcb);
4716 		sp = TAILQ_FIRST(&strq->outqueue);
4717 		while (sp->strseq <= seq) {
4718 			/* Check if its our SEQ */
4719 			if (sp->strseq == seq) {
4720 				sp->discard_rest = 1;
4721 				/*
4722 				 * We may need to put a chunk on the queue
4723 				 * that holds the TSN that would have been
4724 				 * sent with the LAST bit.
4725 				 */
4726 				if (chk == NULL) {
4727 					/* Yep, we have to */
4728 					sctp_alloc_a_chunk(stcb, chk);
4729 					if (chk == NULL) {
4730 						/*
4731 						 * we are hosed. All we can
4732 						 * do is nothing.. which
4733 						 * will cause an abort if
4734 						 * the peer is paying
4735 						 * attention.
4736 						 */
4737 						goto oh_well;
4738 					}
4739 					memset(chk, 0, sizeof(*chk));
4740 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4741 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4742 					chk->asoc = &stcb->asoc;
4743 					chk->rec.data.stream_seq = sp->strseq;
4744 					chk->rec.data.stream_number = sp->stream;
4745 					chk->rec.data.payloadtype = sp->ppid;
4746 					chk->rec.data.context = sp->context;
4747 					chk->flags = sp->act_flags;
4748 					chk->addr_over = sp->addr_over;
4749 					chk->whoTo = sp->net;
4750 					atomic_add_int(&chk->whoTo->ref_count, 1);
4751 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4752 					stcb->asoc.pr_sctp_cnt++;
4753 					chk->pr_sctp_on = 1;
4754 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4755 					stcb->asoc.sent_queue_cnt++;
4756 					stcb->asoc.pr_sctp_cnt++;
4757 				} else {
4758 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4759 				}
4760 		oh_well:
4761 				if (sp->data) {
4762 					/*
4763 					 * Pull any data to free up the SB
4764 					 * and allow sender to "add more"
4765 					 * whilc we will throw away :-)
4766 					 */
4767 					sctp_free_spbufspace(stcb, &stcb->asoc,
4768 					    sp);
4769 					ret_sz += sp->length;
4770 					do_wakeup_routine = 1;
4771 					sp->some_taken = 1;
4772 					sctp_m_freem(sp->data);
4773 					sp->length = 0;
4774 					sp->data = NULL;
4775 					sp->tail_mbuf = NULL;
4776 				}
4777 				break;
4778 			} else {
4779 				/* Next one please */
4780 				sp = TAILQ_NEXT(sp, next);
4781 			}
4782 		}		/* End while */
4783 		SCTP_TCB_SEND_UNLOCK(stcb);
4784 	}
4785 	if (do_wakeup_routine) {
4786 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4787 		struct socket *so;
4788 
4789 		so = SCTP_INP_SO(stcb->sctp_ep);
4790 		if (!so_locked) {
4791 			atomic_add_int(&stcb->asoc.refcnt, 1);
4792 			SCTP_TCB_UNLOCK(stcb);
4793 			SCTP_SOCKET_LOCK(so, 1);
4794 			SCTP_TCB_LOCK(stcb);
4795 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4796 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4797 				/* assoc was freed while we were unlocked */
4798 				SCTP_SOCKET_UNLOCK(so, 1);
4799 				return (ret_sz);
4800 			}
4801 		}
4802 #endif
4803 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4804 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4805 		if (!so_locked) {
4806 			SCTP_SOCKET_UNLOCK(so, 1);
4807 		}
4808 #endif
4809 	}
4810 	return (ret_sz);
4811 }
4812 
4813 /*
4814  * checks to see if the given address, sa, is one that is currently known by
4815  * the kernel note: can't distinguish the same address on multiple interfaces
4816  * and doesn't handle multiple addresses with different zone/scope id's note:
4817  * ifa_ifwithaddr() compares the entire sockaddr struct
4818  */
4819 struct sctp_ifa *
4820 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4821     int holds_lock)
4822 {
4823 	struct sctp_laddr *laddr;
4824 
4825 	if (holds_lock == 0) {
4826 		SCTP_INP_RLOCK(inp);
4827 	}
4828 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4829 		if (laddr->ifa == NULL)
4830 			continue;
4831 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4832 			continue;
4833 		if (addr->sa_family == AF_INET) {
4834 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4835 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4836 				/* found him. */
4837 				if (holds_lock == 0) {
4838 					SCTP_INP_RUNLOCK(inp);
4839 				}
4840 				return (laddr->ifa);
4841 				break;
4842 			}
4843 		}
4844 #ifdef INET6
4845 		if (addr->sa_family == AF_INET6) {
4846 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4847 			    &laddr->ifa->address.sin6)) {
4848 				/* found him. */
4849 				if (holds_lock == 0) {
4850 					SCTP_INP_RUNLOCK(inp);
4851 				}
4852 				return (laddr->ifa);
4853 				break;
4854 			}
4855 		}
4856 #endif
4857 	}
4858 	if (holds_lock == 0) {
4859 		SCTP_INP_RUNLOCK(inp);
4860 	}
4861 	return (NULL);
4862 }
4863 
4864 uint32_t
4865 sctp_get_ifa_hash_val(struct sockaddr *addr)
4866 {
4867 	if (addr->sa_family == AF_INET) {
4868 		struct sockaddr_in *sin;
4869 
4870 		sin = (struct sockaddr_in *)addr;
4871 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4872 	} else if (addr->sa_family == AF_INET6) {
4873 		struct sockaddr_in6 *sin6;
4874 		uint32_t hash_of_addr;
4875 
4876 		sin6 = (struct sockaddr_in6 *)addr;
4877 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4878 		    sin6->sin6_addr.s6_addr32[1] +
4879 		    sin6->sin6_addr.s6_addr32[2] +
4880 		    sin6->sin6_addr.s6_addr32[3]);
4881 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4882 		return (hash_of_addr);
4883 	}
4884 	return (0);
4885 }
4886 
4887 struct sctp_ifa *
4888 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4889 {
4890 	struct sctp_ifa *sctp_ifap;
4891 	struct sctp_vrf *vrf;
4892 	struct sctp_ifalist *hash_head;
4893 	uint32_t hash_of_addr;
4894 
4895 	if (holds_lock == 0)
4896 		SCTP_IPI_ADDR_RLOCK();
4897 
4898 	vrf = sctp_find_vrf(vrf_id);
4899 	if (vrf == NULL) {
4900 stage_right:
4901 		if (holds_lock == 0)
4902 			SCTP_IPI_ADDR_RUNLOCK();
4903 		return (NULL);
4904 	}
4905 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4906 
4907 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4908 	if (hash_head == NULL) {
4909 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4910 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4911 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4912 		sctp_print_address(addr);
4913 		SCTP_PRINTF("No such bucket for address\n");
4914 		if (holds_lock == 0)
4915 			SCTP_IPI_ADDR_RUNLOCK();
4916 
4917 		return (NULL);
4918 	}
4919 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4920 		if (sctp_ifap == NULL) {
4921 #ifdef INVARIANTS
4922 			panic("Huh LIST_FOREACH corrupt");
4923 			goto stage_right;
4924 #else
4925 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4926 			goto stage_right;
4927 #endif
4928 		}
4929 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4930 			continue;
4931 		if (addr->sa_family == AF_INET) {
4932 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4933 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4934 				/* found him. */
4935 				if (holds_lock == 0)
4936 					SCTP_IPI_ADDR_RUNLOCK();
4937 				return (sctp_ifap);
4938 				break;
4939 			}
4940 		}
4941 #ifdef INET6
4942 		if (addr->sa_family == AF_INET6) {
4943 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4944 			    &sctp_ifap->address.sin6)) {
4945 				/* found him. */
4946 				if (holds_lock == 0)
4947 					SCTP_IPI_ADDR_RUNLOCK();
4948 				return (sctp_ifap);
4949 				break;
4950 			}
4951 		}
4952 #endif
4953 	}
4954 	if (holds_lock == 0)
4955 		SCTP_IPI_ADDR_RUNLOCK();
4956 	return (NULL);
4957 }
4958 
4959 static void
4960 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4961     uint32_t rwnd_req)
4962 {
4963 	/* User pulled some data, do we need a rwnd update? */
4964 	int r_unlocked = 0;
4965 	uint32_t dif, rwnd;
4966 	struct socket *so = NULL;
4967 
4968 	if (stcb == NULL)
4969 		return;
4970 
4971 	atomic_add_int(&stcb->asoc.refcnt, 1);
4972 
4973 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4974 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4975 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4976 		/* Pre-check If we are freeing no update */
4977 		goto no_lock;
4978 	}
4979 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4980 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4981 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4982 		goto out;
4983 	}
4984 	so = stcb->sctp_socket;
4985 	if (so == NULL) {
4986 		goto out;
4987 	}
4988 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4989 	/* Have you have freed enough to look */
4990 	*freed_so_far = 0;
4991 	/* Yep, its worth a look and the lock overhead */
4992 
4993 	/* Figure out what the rwnd would be */
4994 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4995 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4996 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4997 	} else {
4998 		dif = 0;
4999 	}
5000 	if (dif >= rwnd_req) {
5001 		if (hold_rlock) {
5002 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5003 			r_unlocked = 1;
5004 		}
5005 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5006 			/*
5007 			 * One last check before we allow the guy possibly
5008 			 * to get in. There is a race, where the guy has not
5009 			 * reached the gate. In that case
5010 			 */
5011 			goto out;
5012 		}
5013 		SCTP_TCB_LOCK(stcb);
5014 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5015 			/* No reports here */
5016 			SCTP_TCB_UNLOCK(stcb);
5017 			goto out;
5018 		}
5019 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5020 		/*
5021 		 * EY if nr_sacks used then send an nr-sack , a sack
5022 		 * otherwise
5023 		 */
5024 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
5025 			sctp_send_nr_sack(stcb);
5026 		else
5027 			sctp_send_sack(stcb);
5028 
5029 		sctp_chunk_output(stcb->sctp_ep, stcb,
5030 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5031 		/* make sure no timer is running */
5032 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5033 		SCTP_TCB_UNLOCK(stcb);
5034 	} else {
5035 		/* Update how much we have pending */
5036 		stcb->freed_by_sorcv_sincelast = dif;
5037 	}
5038 out:
5039 	if (so && r_unlocked && hold_rlock) {
5040 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5041 	}
5042 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5043 no_lock:
5044 	atomic_add_int(&stcb->asoc.refcnt, -1);
5045 	return;
5046 }
5047 
5048 int
5049 sctp_sorecvmsg(struct socket *so,
5050     struct uio *uio,
5051     struct mbuf **mp,
5052     struct sockaddr *from,
5053     int fromlen,
5054     int *msg_flags,
5055     struct sctp_sndrcvinfo *sinfo,
5056     int filling_sinfo)
5057 {
5058 	/*
5059 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5060 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5061 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5062 	 * On the way out we may send out any combination of:
5063 	 * MSG_NOTIFICATION MSG_EOR
5064 	 *
5065 	 */
5066 	struct sctp_inpcb *inp = NULL;
5067 	int my_len = 0;
5068 	int cp_len = 0, error = 0;
5069 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5070 	struct mbuf *m = NULL, *embuf = NULL;
5071 	struct sctp_tcb *stcb = NULL;
5072 	int wakeup_read_socket = 0;
5073 	int freecnt_applied = 0;
5074 	int out_flags = 0, in_flags = 0;
5075 	int block_allowed = 1;
5076 	uint32_t freed_so_far = 0;
5077 	uint32_t copied_so_far = 0;
5078 	int in_eeor_mode = 0;
5079 	int no_rcv_needed = 0;
5080 	uint32_t rwnd_req = 0;
5081 	int hold_sblock = 0;
5082 	int hold_rlock = 0;
5083 	int slen = 0;
5084 	uint32_t held_length = 0;
5085 	int sockbuf_lock = 0;
5086 
5087 	if (uio == NULL) {
5088 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5089 		return (EINVAL);
5090 	}
5091 	if (msg_flags) {
5092 		in_flags = *msg_flags;
5093 		if (in_flags & MSG_PEEK)
5094 			SCTP_STAT_INCR(sctps_read_peeks);
5095 	} else {
5096 		in_flags = 0;
5097 	}
5098 	slen = uio->uio_resid;
5099 
5100 	/* Pull in and set up our int flags */
5101 	if (in_flags & MSG_OOB) {
5102 		/* Out of band's NOT supported */
5103 		return (EOPNOTSUPP);
5104 	}
5105 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5106 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5107 		return (EINVAL);
5108 	}
5109 	if ((in_flags & (MSG_DONTWAIT
5110 	    | MSG_NBIO
5111 	    )) ||
5112 	    SCTP_SO_IS_NBIO(so)) {
5113 		block_allowed = 0;
5114 	}
5115 	/* setup the endpoint */
5116 	inp = (struct sctp_inpcb *)so->so_pcb;
5117 	if (inp == NULL) {
5118 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5119 		return (EFAULT);
5120 	}
5121 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5122 	/* Must be at least a MTU's worth */
5123 	if (rwnd_req < SCTP_MIN_RWND)
5124 		rwnd_req = SCTP_MIN_RWND;
5125 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5126 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5127 		sctp_misc_ints(SCTP_SORECV_ENTER,
5128 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5129 	}
5130 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5131 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5132 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5133 	}
5134 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5135 	sockbuf_lock = 1;
5136 	if (error) {
5137 		goto release_unlocked;
5138 	}
5139 restart:
5140 
5141 
5142 restart_nosblocks:
5143 	if (hold_sblock == 0) {
5144 		SOCKBUF_LOCK(&so->so_rcv);
5145 		hold_sblock = 1;
5146 	}
5147 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5148 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5149 		goto out;
5150 	}
5151 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5152 		if (so->so_error) {
5153 			error = so->so_error;
5154 			if ((in_flags & MSG_PEEK) == 0)
5155 				so->so_error = 0;
5156 			goto out;
5157 		} else {
5158 			if (so->so_rcv.sb_cc == 0) {
5159 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5160 				/* indicate EOF */
5161 				error = 0;
5162 				goto out;
5163 			}
5164 		}
5165 	}
5166 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5167 		/* we need to wait for data */
5168 		if ((so->so_rcv.sb_cc == 0) &&
5169 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5170 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5171 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5172 				/*
5173 				 * For active open side clear flags for
5174 				 * re-use passive open is blocked by
5175 				 * connect.
5176 				 */
5177 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5178 					/*
5179 					 * You were aborted, passive side
5180 					 * always hits here
5181 					 */
5182 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5183 					error = ECONNRESET;
5184 					/*
5185 					 * You get this once if you are
5186 					 * active open side
5187 					 */
5188 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5189 						/*
5190 						 * Remove flag if on the
5191 						 * active open side
5192 						 */
5193 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5194 					}
5195 				}
5196 				so->so_state &= ~(SS_ISCONNECTING |
5197 				    SS_ISDISCONNECTING |
5198 				    SS_ISCONFIRMING |
5199 				    SS_ISCONNECTED);
5200 				if (error == 0) {
5201 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5202 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5203 						error = ENOTCONN;
5204 					} else {
5205 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5206 					}
5207 				}
5208 				goto out;
5209 			}
5210 		}
5211 		error = sbwait(&so->so_rcv);
5212 		if (error) {
5213 			goto out;
5214 		}
5215 		held_length = 0;
5216 		goto restart_nosblocks;
5217 	} else if (so->so_rcv.sb_cc == 0) {
5218 		if (so->so_error) {
5219 			error = so->so_error;
5220 			if ((in_flags & MSG_PEEK) == 0)
5221 				so->so_error = 0;
5222 		} else {
5223 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5224 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5225 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5226 					/*
5227 					 * For active open side clear flags
5228 					 * for re-use passive open is
5229 					 * blocked by connect.
5230 					 */
5231 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5232 						/*
5233 						 * You were aborted, passive
5234 						 * side always hits here
5235 						 */
5236 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5237 						error = ECONNRESET;
5238 						/*
5239 						 * You get this once if you
5240 						 * are active open side
5241 						 */
5242 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5243 							/*
5244 							 * Remove flag if on
5245 							 * the active open
5246 							 * side
5247 							 */
5248 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5249 						}
5250 					}
5251 					so->so_state &= ~(SS_ISCONNECTING |
5252 					    SS_ISDISCONNECTING |
5253 					    SS_ISCONFIRMING |
5254 					    SS_ISCONNECTED);
5255 					if (error == 0) {
5256 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5257 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5258 							error = ENOTCONN;
5259 						} else {
5260 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5261 						}
5262 					}
5263 					goto out;
5264 				}
5265 			}
5266 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5267 			error = EWOULDBLOCK;
5268 		}
5269 		goto out;
5270 	}
5271 	if (hold_sblock == 1) {
5272 		SOCKBUF_UNLOCK(&so->so_rcv);
5273 		hold_sblock = 0;
5274 	}
5275 	/* we possibly have data we can read */
5276 	/* sa_ignore FREED_MEMORY */
5277 	control = TAILQ_FIRST(&inp->read_queue);
5278 	if (control == NULL) {
5279 		/*
5280 		 * This could be happening since the appender did the
5281 		 * increment but as not yet did the tailq insert onto the
5282 		 * read_queue
5283 		 */
5284 		if (hold_rlock == 0) {
5285 			SCTP_INP_READ_LOCK(inp);
5286 			hold_rlock = 1;
5287 		}
5288 		control = TAILQ_FIRST(&inp->read_queue);
5289 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5290 #ifdef INVARIANTS
5291 			panic("Huh, its non zero and nothing on control?");
5292 #endif
5293 			so->so_rcv.sb_cc = 0;
5294 		}
5295 		SCTP_INP_READ_UNLOCK(inp);
5296 		hold_rlock = 0;
5297 		goto restart;
5298 	}
5299 	if ((control->length == 0) &&
5300 	    (control->do_not_ref_stcb)) {
5301 		/*
5302 		 * Clean up code for freeing assoc that left behind a
5303 		 * pdapi.. maybe a peer in EEOR that just closed after
5304 		 * sending and never indicated a EOR.
5305 		 */
5306 		if (hold_rlock == 0) {
5307 			hold_rlock = 1;
5308 			SCTP_INP_READ_LOCK(inp);
5309 		}
5310 		control->held_length = 0;
5311 		if (control->data) {
5312 			/* Hmm there is data here .. fix */
5313 			struct mbuf *m_tmp;
5314 			int cnt = 0;
5315 
5316 			m_tmp = control->data;
5317 			while (m_tmp) {
5318 				cnt += SCTP_BUF_LEN(m_tmp);
5319 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5320 					control->tail_mbuf = m_tmp;
5321 					control->end_added = 1;
5322 				}
5323 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5324 			}
5325 			control->length = cnt;
5326 		} else {
5327 			/* remove it */
5328 			TAILQ_REMOVE(&inp->read_queue, control, next);
5329 			/* Add back any hiddend data */
5330 			sctp_free_remote_addr(control->whoFrom);
5331 			sctp_free_a_readq(stcb, control);
5332 		}
5333 		if (hold_rlock) {
5334 			hold_rlock = 0;
5335 			SCTP_INP_READ_UNLOCK(inp);
5336 		}
5337 		goto restart;
5338 	}
5339 	if (control->length == 0) {
5340 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5341 		    (filling_sinfo)) {
5342 			/* find a more suitable one then this */
5343 			ctl = TAILQ_NEXT(control, next);
5344 			while (ctl) {
5345 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5346 				    (ctl->some_taken ||
5347 				    (ctl->spec_flags & M_NOTIFICATION) ||
5348 				    ((ctl->do_not_ref_stcb == 0) &&
5349 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5350 				    ) {
5351 					/*-
5352 					 * If we have a different TCB next, and there is data
5353 					 * present. If we have already taken some (pdapi), OR we can
5354 					 * ref the tcb and no delivery as started on this stream, we
5355 					 * take it. Note we allow a notification on a different
5356 					 * assoc to be delivered..
5357 					 */
5358 					control = ctl;
5359 					goto found_one;
5360 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5361 					    (ctl->length) &&
5362 					    ((ctl->some_taken) ||
5363 					    ((ctl->do_not_ref_stcb == 0) &&
5364 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5365 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5366 				    ) {
5367 					/*-
5368 					 * If we have the same tcb, and there is data present, and we
5369 					 * have the strm interleave feature present. Then if we have
5370 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5371 					 * not started a delivery for this stream, we can take it.
5372 					 * Note we do NOT allow a notificaiton on the same assoc to
5373 					 * be delivered.
5374 					 */
5375 					control = ctl;
5376 					goto found_one;
5377 				}
5378 				ctl = TAILQ_NEXT(ctl, next);
5379 			}
5380 		}
5381 		/*
5382 		 * if we reach here, not suitable replacement is available
5383 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5384 		 * into the our held count, and its time to sleep again.
5385 		 */
5386 		held_length = so->so_rcv.sb_cc;
5387 		control->held_length = so->so_rcv.sb_cc;
5388 		goto restart;
5389 	}
5390 	/* Clear the held length since there is something to read */
5391 	control->held_length = 0;
5392 	if (hold_rlock) {
5393 		SCTP_INP_READ_UNLOCK(inp);
5394 		hold_rlock = 0;
5395 	}
5396 found_one:
5397 	/*
5398 	 * If we reach here, control has a some data for us to read off.
5399 	 * Note that stcb COULD be NULL.
5400 	 */
5401 	control->some_taken++;
5402 	if (hold_sblock) {
5403 		SOCKBUF_UNLOCK(&so->so_rcv);
5404 		hold_sblock = 0;
5405 	}
5406 	stcb = control->stcb;
5407 	if (stcb) {
5408 		if ((control->do_not_ref_stcb == 0) &&
5409 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5410 			if (freecnt_applied == 0)
5411 				stcb = NULL;
5412 		} else if (control->do_not_ref_stcb == 0) {
5413 			/* you can't free it on me please */
5414 			/*
5415 			 * The lock on the socket buffer protects us so the
5416 			 * free code will stop. But since we used the
5417 			 * socketbuf lock and the sender uses the tcb_lock
5418 			 * to increment, we need to use the atomic add to
5419 			 * the refcnt
5420 			 */
5421 			if (freecnt_applied) {
5422 #ifdef INVARIANTS
5423 				panic("refcnt already incremented");
5424 #else
5425 				printf("refcnt already incremented?\n");
5426 #endif
5427 			} else {
5428 				atomic_add_int(&stcb->asoc.refcnt, 1);
5429 				freecnt_applied = 1;
5430 			}
5431 			/*
5432 			 * Setup to remember how much we have not yet told
5433 			 * the peer our rwnd has opened up. Note we grab the
5434 			 * value from the tcb from last time. Note too that
5435 			 * sack sending clears this when a sack is sent,
5436 			 * which is fine. Once we hit the rwnd_req, we then
5437 			 * will go to the sctp_user_rcvd() that will not
5438 			 * lock until it KNOWs it MUST send a WUP-SACK.
5439 			 */
5440 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5441 			stcb->freed_by_sorcv_sincelast = 0;
5442 		}
5443 	}
5444 	if (stcb &&
5445 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5446 	    control->do_not_ref_stcb == 0) {
5447 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5448 	}
5449 	/* First lets get off the sinfo and sockaddr info */
5450 	if ((sinfo) && filling_sinfo) {
5451 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5452 		nxt = TAILQ_NEXT(control, next);
5453 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5454 			struct sctp_extrcvinfo *s_extra;
5455 
5456 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5457 			if ((nxt) &&
5458 			    (nxt->length)) {
5459 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5460 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5461 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5462 				}
5463 				if (nxt->spec_flags & M_NOTIFICATION) {
5464 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5465 				}
5466 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5467 				s_extra->sreinfo_next_length = nxt->length;
5468 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5469 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5470 				if (nxt->tail_mbuf != NULL) {
5471 					if (nxt->end_added) {
5472 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5473 					}
5474 				}
5475 			} else {
5476 				/*
5477 				 * we explicitly 0 this, since the memcpy
5478 				 * got some other things beyond the older
5479 				 * sinfo_ that is on the control's structure
5480 				 * :-D
5481 				 */
5482 				nxt = NULL;
5483 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5484 				s_extra->sreinfo_next_aid = 0;
5485 				s_extra->sreinfo_next_length = 0;
5486 				s_extra->sreinfo_next_ppid = 0;
5487 				s_extra->sreinfo_next_stream = 0;
5488 			}
5489 		}
5490 		/*
5491 		 * update off the real current cum-ack, if we have an stcb.
5492 		 */
5493 		if ((control->do_not_ref_stcb == 0) && stcb)
5494 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5495 		/*
5496 		 * mask off the high bits, we keep the actual chunk bits in
5497 		 * there.
5498 		 */
5499 		sinfo->sinfo_flags &= 0x00ff;
5500 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5501 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5502 		}
5503 	}
5504 #ifdef SCTP_ASOCLOG_OF_TSNS
5505 	{
5506 		int index, newindex;
5507 		struct sctp_pcbtsn_rlog *entry;
5508 
5509 		do {
5510 			index = inp->readlog_index;
5511 			newindex = index + 1;
5512 			if (newindex >= SCTP_READ_LOG_SIZE) {
5513 				newindex = 0;
5514 			}
5515 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5516 		entry = &inp->readlog[index];
5517 		entry->vtag = control->sinfo_assoc_id;
5518 		entry->strm = control->sinfo_stream;
5519 		entry->seq = control->sinfo_ssn;
5520 		entry->sz = control->length;
5521 		entry->flgs = control->sinfo_flags;
5522 	}
5523 #endif
5524 	if (fromlen && from) {
5525 		struct sockaddr *to;
5526 
5527 #ifdef INET
5528 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5529 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5530 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5531 #else
5532 		/* No AF_INET use AF_INET6 */
5533 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5534 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5535 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5536 #endif
5537 
5538 		to = from;
5539 #if defined(INET) && defined(INET6)
5540 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5541 		    (to->sa_family == AF_INET) &&
5542 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5543 			struct sockaddr_in *sin;
5544 			struct sockaddr_in6 sin6;
5545 
5546 			sin = (struct sockaddr_in *)to;
5547 			bzero(&sin6, sizeof(sin6));
5548 			sin6.sin6_family = AF_INET6;
5549 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5550 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5551 			bcopy(&sin->sin_addr,
5552 			    &sin6.sin6_addr.s6_addr32[3],
5553 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5554 			sin6.sin6_port = sin->sin_port;
5555 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5556 		}
5557 #endif
5558 #if defined(INET6)
5559 		{
5560 			struct sockaddr_in6 lsa6, *to6;
5561 
5562 			to6 = (struct sockaddr_in6 *)to;
5563 			sctp_recover_scope_mac(to6, (&lsa6));
5564 		}
5565 #endif
5566 	}
5567 	/* now copy out what data we can */
5568 	if (mp == NULL) {
5569 		/* copy out each mbuf in the chain up to length */
5570 get_more_data:
5571 		m = control->data;
5572 		while (m) {
5573 			/* Move out all we can */
5574 			cp_len = (int)uio->uio_resid;
5575 			my_len = (int)SCTP_BUF_LEN(m);
5576 			if (cp_len > my_len) {
5577 				/* not enough in this buf */
5578 				cp_len = my_len;
5579 			}
5580 			if (hold_rlock) {
5581 				SCTP_INP_READ_UNLOCK(inp);
5582 				hold_rlock = 0;
5583 			}
5584 			if (cp_len > 0)
5585 				error = uiomove(mtod(m, char *), cp_len, uio);
5586 			/* re-read */
5587 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5588 				goto release;
5589 			}
5590 			if ((control->do_not_ref_stcb == 0) && stcb &&
5591 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5592 				no_rcv_needed = 1;
5593 			}
5594 			if (error) {
5595 				/* error we are out of here */
5596 				goto release;
5597 			}
5598 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5599 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5600 			    ((control->end_added == 0) ||
5601 			    (control->end_added &&
5602 			    (TAILQ_NEXT(control, next) == NULL)))
5603 			    ) {
5604 				SCTP_INP_READ_LOCK(inp);
5605 				hold_rlock = 1;
5606 			}
5607 			if (cp_len == SCTP_BUF_LEN(m)) {
5608 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5609 				    (control->end_added)) {
5610 					out_flags |= MSG_EOR;
5611 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5612 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5613 				}
5614 				if (control->spec_flags & M_NOTIFICATION) {
5615 					out_flags |= MSG_NOTIFICATION;
5616 				}
5617 				/* we ate up the mbuf */
5618 				if (in_flags & MSG_PEEK) {
5619 					/* just looking */
5620 					m = SCTP_BUF_NEXT(m);
5621 					copied_so_far += cp_len;
5622 				} else {
5623 					/* dispose of the mbuf */
5624 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5625 						sctp_sblog(&so->so_rcv,
5626 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5627 					}
5628 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5629 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5630 						sctp_sblog(&so->so_rcv,
5631 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5632 					}
5633 					embuf = m;
5634 					copied_so_far += cp_len;
5635 					freed_so_far += cp_len;
5636 					freed_so_far += MSIZE;
5637 					atomic_subtract_int(&control->length, cp_len);
5638 					control->data = sctp_m_free(m);
5639 					m = control->data;
5640 					/*
5641 					 * been through it all, must hold sb
5642 					 * lock ok to null tail
5643 					 */
5644 					if (control->data == NULL) {
5645 #ifdef INVARIANTS
5646 						if ((control->end_added == 0) ||
5647 						    (TAILQ_NEXT(control, next) == NULL)) {
5648 							/*
5649 							 * If the end is not
5650 							 * added, OR the
5651 							 * next is NOT null
5652 							 * we MUST have the
5653 							 * lock.
5654 							 */
5655 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5656 								panic("Hmm we don't own the lock?");
5657 							}
5658 						}
5659 #endif
5660 						control->tail_mbuf = NULL;
5661 #ifdef INVARIANTS
5662 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5663 							panic("end_added, nothing left and no MSG_EOR");
5664 						}
5665 #endif
5666 					}
5667 				}
5668 			} else {
5669 				/* Do we need to trim the mbuf? */
5670 				if (control->spec_flags & M_NOTIFICATION) {
5671 					out_flags |= MSG_NOTIFICATION;
5672 				}
5673 				if ((in_flags & MSG_PEEK) == 0) {
5674 					SCTP_BUF_RESV_UF(m, cp_len);
5675 					SCTP_BUF_LEN(m) -= cp_len;
5676 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5677 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5678 					}
5679 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5680 					if ((control->do_not_ref_stcb == 0) &&
5681 					    stcb) {
5682 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5683 					}
5684 					copied_so_far += cp_len;
5685 					embuf = m;
5686 					freed_so_far += cp_len;
5687 					freed_so_far += MSIZE;
5688 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5689 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5690 						    SCTP_LOG_SBRESULT, 0);
5691 					}
5692 					atomic_subtract_int(&control->length, cp_len);
5693 				} else {
5694 					copied_so_far += cp_len;
5695 				}
5696 			}
5697 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5698 				break;
5699 			}
5700 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5701 			    (control->do_not_ref_stcb == 0) &&
5702 			    (freed_so_far >= rwnd_req)) {
5703 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5704 			}
5705 		}		/* end while(m) */
5706 		/*
5707 		 * At this point we have looked at it all and we either have
5708 		 * a MSG_EOR/or read all the user wants... <OR>
5709 		 * control->length == 0.
5710 		 */
5711 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5712 			/* we are done with this control */
5713 			if (control->length == 0) {
5714 				if (control->data) {
5715 #ifdef INVARIANTS
5716 					panic("control->data not null at read eor?");
5717 #else
5718 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5719 					sctp_m_freem(control->data);
5720 					control->data = NULL;
5721 #endif
5722 				}
5723 		done_with_control:
5724 				if (TAILQ_NEXT(control, next) == NULL) {
5725 					/*
5726 					 * If we don't have a next we need a
5727 					 * lock, if there is a next
5728 					 * interrupt is filling ahead of us
5729 					 * and we don't need a lock to
5730 					 * remove this guy (which is the
5731 					 * head of the queue).
5732 					 */
5733 					if (hold_rlock == 0) {
5734 						SCTP_INP_READ_LOCK(inp);
5735 						hold_rlock = 1;
5736 					}
5737 				}
5738 				TAILQ_REMOVE(&inp->read_queue, control, next);
5739 				/* Add back any hiddend data */
5740 				if (control->held_length) {
5741 					held_length = 0;
5742 					control->held_length = 0;
5743 					wakeup_read_socket = 1;
5744 				}
5745 				if (control->aux_data) {
5746 					sctp_m_free(control->aux_data);
5747 					control->aux_data = NULL;
5748 				}
5749 				no_rcv_needed = control->do_not_ref_stcb;
5750 				sctp_free_remote_addr(control->whoFrom);
5751 				control->data = NULL;
5752 				sctp_free_a_readq(stcb, control);
5753 				control = NULL;
5754 				if ((freed_so_far >= rwnd_req) &&
5755 				    (no_rcv_needed == 0))
5756 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5757 
5758 			} else {
5759 				/*
5760 				 * The user did not read all of this
5761 				 * message, turn off the returned MSG_EOR
5762 				 * since we are leaving more behind on the
5763 				 * control to read.
5764 				 */
5765 #ifdef INVARIANTS
5766 				if (control->end_added &&
5767 				    (control->data == NULL) &&
5768 				    (control->tail_mbuf == NULL)) {
5769 					panic("Gak, control->length is corrupt?");
5770 				}
5771 #endif
5772 				no_rcv_needed = control->do_not_ref_stcb;
5773 				out_flags &= ~MSG_EOR;
5774 			}
5775 		}
5776 		if (out_flags & MSG_EOR) {
5777 			goto release;
5778 		}
5779 		if ((uio->uio_resid == 0) ||
5780 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5781 		    ) {
5782 			goto release;
5783 		}
5784 		/*
5785 		 * If I hit here the receiver wants more and this message is
5786 		 * NOT done (pd-api). So two questions. Can we block? if not
5787 		 * we are done. Did the user NOT set MSG_WAITALL?
5788 		 */
5789 		if (block_allowed == 0) {
5790 			goto release;
5791 		}
5792 		/*
5793 		 * We need to wait for more data a few things: - We don't
5794 		 * sbunlock() so we don't get someone else reading. - We
5795 		 * must be sure to account for the case where what is added
5796 		 * is NOT to our control when we wakeup.
5797 		 */
5798 
5799 		/*
5800 		 * Do we need to tell the transport a rwnd update might be
5801 		 * needed before we go to sleep?
5802 		 */
5803 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5804 		    ((freed_so_far >= rwnd_req) &&
5805 		    (control->do_not_ref_stcb == 0) &&
5806 		    (no_rcv_needed == 0))) {
5807 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5808 		}
5809 wait_some_more:
5810 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5811 			goto release;
5812 		}
5813 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5814 			goto release;
5815 
5816 		if (hold_rlock == 1) {
5817 			SCTP_INP_READ_UNLOCK(inp);
5818 			hold_rlock = 0;
5819 		}
5820 		if (hold_sblock == 0) {
5821 			SOCKBUF_LOCK(&so->so_rcv);
5822 			hold_sblock = 1;
5823 		}
5824 		if ((copied_so_far) && (control->length == 0) &&
5825 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5826 		    ) {
5827 			goto release;
5828 		}
5829 		if (so->so_rcv.sb_cc <= control->held_length) {
5830 			error = sbwait(&so->so_rcv);
5831 			if (error) {
5832 				goto release;
5833 			}
5834 			control->held_length = 0;
5835 		}
5836 		if (hold_sblock) {
5837 			SOCKBUF_UNLOCK(&so->so_rcv);
5838 			hold_sblock = 0;
5839 		}
5840 		if (control->length == 0) {
5841 			/* still nothing here */
5842 			if (control->end_added == 1) {
5843 				/* he aborted, or is done i.e.did a shutdown */
5844 				out_flags |= MSG_EOR;
5845 				if (control->pdapi_aborted) {
5846 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5847 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5848 
5849 					out_flags |= MSG_TRUNC;
5850 				} else {
5851 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5852 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5853 				}
5854 				goto done_with_control;
5855 			}
5856 			if (so->so_rcv.sb_cc > held_length) {
5857 				control->held_length = so->so_rcv.sb_cc;
5858 				held_length = 0;
5859 			}
5860 			goto wait_some_more;
5861 		} else if (control->data == NULL) {
5862 			/*
5863 			 * we must re-sync since data is probably being
5864 			 * added
5865 			 */
5866 			SCTP_INP_READ_LOCK(inp);
5867 			if ((control->length > 0) && (control->data == NULL)) {
5868 				/*
5869 				 * big trouble.. we have the lock and its
5870 				 * corrupt?
5871 				 */
5872 #ifdef INVARIANTS
5873 				panic("Impossible data==NULL length !=0");
5874 #endif
5875 				out_flags |= MSG_EOR;
5876 				out_flags |= MSG_TRUNC;
5877 				control->length = 0;
5878 				SCTP_INP_READ_UNLOCK(inp);
5879 				goto done_with_control;
5880 			}
5881 			SCTP_INP_READ_UNLOCK(inp);
5882 			/* We will fall around to get more data */
5883 		}
5884 		goto get_more_data;
5885 	} else {
5886 		/*-
5887 		 * Give caller back the mbuf chain,
5888 		 * store in uio_resid the length
5889 		 */
5890 		wakeup_read_socket = 0;
5891 		if ((control->end_added == 0) ||
5892 		    (TAILQ_NEXT(control, next) == NULL)) {
5893 			/* Need to get rlock */
5894 			if (hold_rlock == 0) {
5895 				SCTP_INP_READ_LOCK(inp);
5896 				hold_rlock = 1;
5897 			}
5898 		}
5899 		if (control->end_added) {
5900 			out_flags |= MSG_EOR;
5901 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5902 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5903 		}
5904 		if (control->spec_flags & M_NOTIFICATION) {
5905 			out_flags |= MSG_NOTIFICATION;
5906 		}
5907 		uio->uio_resid = control->length;
5908 		*mp = control->data;
5909 		m = control->data;
5910 		while (m) {
5911 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5912 				sctp_sblog(&so->so_rcv,
5913 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5914 			}
5915 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5916 			freed_so_far += SCTP_BUF_LEN(m);
5917 			freed_so_far += MSIZE;
5918 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5919 				sctp_sblog(&so->so_rcv,
5920 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5921 			}
5922 			m = SCTP_BUF_NEXT(m);
5923 		}
5924 		control->data = control->tail_mbuf = NULL;
5925 		control->length = 0;
5926 		if (out_flags & MSG_EOR) {
5927 			/* Done with this control */
5928 			goto done_with_control;
5929 		}
5930 	}
5931 release:
5932 	if (hold_rlock == 1) {
5933 		SCTP_INP_READ_UNLOCK(inp);
5934 		hold_rlock = 0;
5935 	}
5936 	if (hold_sblock == 1) {
5937 		SOCKBUF_UNLOCK(&so->so_rcv);
5938 		hold_sblock = 0;
5939 	}
5940 	sbunlock(&so->so_rcv);
5941 	sockbuf_lock = 0;
5942 
5943 release_unlocked:
5944 	if (hold_sblock) {
5945 		SOCKBUF_UNLOCK(&so->so_rcv);
5946 		hold_sblock = 0;
5947 	}
5948 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5949 		if ((freed_so_far >= rwnd_req) &&
5950 		    (control && (control->do_not_ref_stcb == 0)) &&
5951 		    (no_rcv_needed == 0))
5952 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5953 	}
5954 out:
5955 	if (msg_flags) {
5956 		*msg_flags = out_flags;
5957 	}
5958 	if (((out_flags & MSG_EOR) == 0) &&
5959 	    ((in_flags & MSG_PEEK) == 0) &&
5960 	    (sinfo) &&
5961 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5962 		struct sctp_extrcvinfo *s_extra;
5963 
5964 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5965 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5966 	}
5967 	if (hold_rlock == 1) {
5968 		SCTP_INP_READ_UNLOCK(inp);
5969 		hold_rlock = 0;
5970 	}
5971 	if (hold_sblock) {
5972 		SOCKBUF_UNLOCK(&so->so_rcv);
5973 		hold_sblock = 0;
5974 	}
5975 	if (sockbuf_lock) {
5976 		sbunlock(&so->so_rcv);
5977 	}
5978 	if (freecnt_applied) {
5979 		/*
5980 		 * The lock on the socket buffer protects us so the free
5981 		 * code will stop. But since we used the socketbuf lock and
5982 		 * the sender uses the tcb_lock to increment, we need to use
5983 		 * the atomic add to the refcnt.
5984 		 */
5985 		if (stcb == NULL) {
5986 #ifdef INVARIANTS
5987 			panic("stcb for refcnt has gone NULL?");
5988 			goto stage_left;
5989 #else
5990 			goto stage_left;
5991 #endif
5992 		}
5993 		atomic_add_int(&stcb->asoc.refcnt, -1);
5994 		freecnt_applied = 0;
5995 		/* Save the value back for next time */
5996 		stcb->freed_by_sorcv_sincelast = freed_so_far;
5997 	}
5998 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5999 		if (stcb) {
6000 			sctp_misc_ints(SCTP_SORECV_DONE,
6001 			    freed_so_far,
6002 			    ((uio) ? (slen - uio->uio_resid) : slen),
6003 			    stcb->asoc.my_rwnd,
6004 			    so->so_rcv.sb_cc);
6005 		} else {
6006 			sctp_misc_ints(SCTP_SORECV_DONE,
6007 			    freed_so_far,
6008 			    ((uio) ? (slen - uio->uio_resid) : slen),
6009 			    0,
6010 			    so->so_rcv.sb_cc);
6011 		}
6012 	}
6013 stage_left:
6014 	if (wakeup_read_socket) {
6015 		sctp_sorwakeup(inp, so);
6016 	}
6017 	return (error);
6018 }
6019 
6020 
6021 #ifdef SCTP_MBUF_LOGGING
6022 struct mbuf *
6023 sctp_m_free(struct mbuf *m)
6024 {
6025 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6026 		if (SCTP_BUF_IS_EXTENDED(m)) {
6027 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6028 		}
6029 	}
6030 	return (m_free(m));
6031 }
6032 
6033 void
6034 sctp_m_freem(struct mbuf *mb)
6035 {
6036 	while (mb != NULL)
6037 		mb = sctp_m_free(mb);
6038 }
6039 
6040 #endif
6041 
6042 int
6043 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6044 {
6045 	/*
6046 	 * Given a local address. For all associations that holds the
6047 	 * address, request a peer-set-primary.
6048 	 */
6049 	struct sctp_ifa *ifa;
6050 	struct sctp_laddr *wi;
6051 
6052 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6053 	if (ifa == NULL) {
6054 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6055 		return (EADDRNOTAVAIL);
6056 	}
6057 	/*
6058 	 * Now that we have the ifa we must awaken the iterator with this
6059 	 * message.
6060 	 */
6061 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6062 	if (wi == NULL) {
6063 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6064 		return (ENOMEM);
6065 	}
6066 	/* Now incr the count and int wi structure */
6067 	SCTP_INCR_LADDR_COUNT();
6068 	bzero(wi, sizeof(*wi));
6069 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6070 	wi->ifa = ifa;
6071 	wi->action = SCTP_SET_PRIM_ADDR;
6072 	atomic_add_int(&ifa->refcount, 1);
6073 
6074 	/* Now add it to the work queue */
6075 	SCTP_IPI_ITERATOR_WQ_LOCK();
6076 	/*
6077 	 * Should this really be a tailq? As it is we will process the
6078 	 * newest first :-0
6079 	 */
6080 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6081 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6082 	    (struct sctp_inpcb *)NULL,
6083 	    (struct sctp_tcb *)NULL,
6084 	    (struct sctp_nets *)NULL);
6085 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
6086 	return (0);
6087 }
6088 
6089 
6090 int
6091 sctp_soreceive(struct socket *so,
6092     struct sockaddr **psa,
6093     struct uio *uio,
6094     struct mbuf **mp0,
6095     struct mbuf **controlp,
6096     int *flagsp)
6097 {
6098 	int error, fromlen;
6099 	uint8_t sockbuf[256];
6100 	struct sockaddr *from;
6101 	struct sctp_extrcvinfo sinfo;
6102 	int filling_sinfo = 1;
6103 	struct sctp_inpcb *inp;
6104 
6105 	inp = (struct sctp_inpcb *)so->so_pcb;
6106 	/* pickup the assoc we are reading from */
6107 	if (inp == NULL) {
6108 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6109 		return (EINVAL);
6110 	}
6111 	if ((sctp_is_feature_off(inp,
6112 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6113 	    (controlp == NULL)) {
6114 		/* user does not want the sndrcv ctl */
6115 		filling_sinfo = 0;
6116 	}
6117 	if (psa) {
6118 		from = (struct sockaddr *)sockbuf;
6119 		fromlen = sizeof(sockbuf);
6120 		from->sa_len = 0;
6121 	} else {
6122 		from = NULL;
6123 		fromlen = 0;
6124 	}
6125 
6126 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6127 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6128 	if ((controlp) && (filling_sinfo)) {
6129 		/* copy back the sinfo in a CMSG format */
6130 		if (filling_sinfo)
6131 			*controlp = sctp_build_ctl_nchunk(inp,
6132 			    (struct sctp_sndrcvinfo *)&sinfo);
6133 		else
6134 			*controlp = NULL;
6135 	}
6136 	if (psa) {
6137 		/* copy back the address info */
6138 		if (from && from->sa_len) {
6139 			*psa = sodupsockaddr(from, M_NOWAIT);
6140 		} else {
6141 			*psa = NULL;
6142 		}
6143 	}
6144 	return (error);
6145 }
6146 
6147 
6148 int
6149 sctp_l_soreceive(struct socket *so,
6150     struct sockaddr **name,
6151     struct uio *uio,
6152     char **controlp,
6153     int *controllen,
6154     int *flag)
6155 {
6156 	int error, fromlen;
6157 	uint8_t sockbuf[256];
6158 	struct sockaddr *from;
6159 	struct sctp_extrcvinfo sinfo;
6160 	int filling_sinfo = 1;
6161 	struct sctp_inpcb *inp;
6162 
6163 	inp = (struct sctp_inpcb *)so->so_pcb;
6164 	/* pickup the assoc we are reading from */
6165 	if (inp == NULL) {
6166 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6167 		return (EINVAL);
6168 	}
6169 	if ((sctp_is_feature_off(inp,
6170 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6171 	    (controlp == NULL)) {
6172 		/* user does not want the sndrcv ctl */
6173 		filling_sinfo = 0;
6174 	}
6175 	if (name) {
6176 		from = (struct sockaddr *)sockbuf;
6177 		fromlen = sizeof(sockbuf);
6178 		from->sa_len = 0;
6179 	} else {
6180 		from = NULL;
6181 		fromlen = 0;
6182 	}
6183 
6184 	error = sctp_sorecvmsg(so, uio,
6185 	    (struct mbuf **)NULL,
6186 	    from, fromlen, flag,
6187 	    (struct sctp_sndrcvinfo *)&sinfo,
6188 	    filling_sinfo);
6189 	if ((controlp) && (filling_sinfo)) {
6190 		/*
6191 		 * copy back the sinfo in a CMSG format note that the caller
6192 		 * has reponsibility for freeing the memory.
6193 		 */
6194 		if (filling_sinfo)
6195 			*controlp = sctp_build_ctl_cchunk(inp,
6196 			    controllen,
6197 			    (struct sctp_sndrcvinfo *)&sinfo);
6198 	}
6199 	if (name) {
6200 		/* copy back the address info */
6201 		if (from && from->sa_len) {
6202 			*name = sodupsockaddr(from, M_WAIT);
6203 		} else {
6204 			*name = NULL;
6205 		}
6206 	}
6207 	return (error);
6208 }
6209 
6210 
6211 
6212 
6213 
6214 
6215 
6216 int
6217 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6218     int totaddr, int *error)
6219 {
6220 	int added = 0;
6221 	int i;
6222 	struct sctp_inpcb *inp;
6223 	struct sockaddr *sa;
6224 	size_t incr = 0;
6225 
6226 	sa = addr;
6227 	inp = stcb->sctp_ep;
6228 	*error = 0;
6229 	for (i = 0; i < totaddr; i++) {
6230 		if (sa->sa_family == AF_INET) {
6231 			incr = sizeof(struct sockaddr_in);
6232 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6233 				/* assoc gone no un-lock */
6234 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6235 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6236 				*error = ENOBUFS;
6237 				goto out_now;
6238 			}
6239 			added++;
6240 		} else if (sa->sa_family == AF_INET6) {
6241 			incr = sizeof(struct sockaddr_in6);
6242 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6243 				/* assoc gone no un-lock */
6244 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6245 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6246 				*error = ENOBUFS;
6247 				goto out_now;
6248 			}
6249 			added++;
6250 		}
6251 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6252 	}
6253 out_now:
6254 	return (added);
6255 }
6256 
6257 struct sctp_tcb *
6258 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6259     int *totaddr, int *num_v4, int *num_v6, int *error,
6260     int limit, int *bad_addr)
6261 {
6262 	struct sockaddr *sa;
6263 	struct sctp_tcb *stcb = NULL;
6264 	size_t incr, at, i;
6265 
6266 	at = incr = 0;
6267 	sa = addr;
6268 	*error = *num_v6 = *num_v4 = 0;
6269 	/* account and validate addresses */
6270 	for (i = 0; i < (size_t)*totaddr; i++) {
6271 		if (sa->sa_family == AF_INET) {
6272 			(*num_v4) += 1;
6273 			incr = sizeof(struct sockaddr_in);
6274 			if (sa->sa_len != incr) {
6275 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6276 				*error = EINVAL;
6277 				*bad_addr = 1;
6278 				return (NULL);
6279 			}
6280 		} else if (sa->sa_family == AF_INET6) {
6281 			struct sockaddr_in6 *sin6;
6282 
6283 			sin6 = (struct sockaddr_in6 *)sa;
6284 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6285 				/* Must be non-mapped for connectx */
6286 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6287 				*error = EINVAL;
6288 				*bad_addr = 1;
6289 				return (NULL);
6290 			}
6291 			(*num_v6) += 1;
6292 			incr = sizeof(struct sockaddr_in6);
6293 			if (sa->sa_len != incr) {
6294 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6295 				*error = EINVAL;
6296 				*bad_addr = 1;
6297 				return (NULL);
6298 			}
6299 		} else {
6300 			*totaddr = i;
6301 			/* we are done */
6302 			break;
6303 		}
6304 		SCTP_INP_INCR_REF(inp);
6305 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6306 		if (stcb != NULL) {
6307 			/* Already have or am bring up an association */
6308 			return (stcb);
6309 		} else {
6310 			SCTP_INP_DECR_REF(inp);
6311 		}
6312 		if ((at + incr) > (size_t)limit) {
6313 			*totaddr = i;
6314 			break;
6315 		}
6316 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6317 	}
6318 	return ((struct sctp_tcb *)NULL);
6319 }
6320 
6321 /*
6322  * sctp_bindx(ADD) for one address.
6323  * assumes all arguments are valid/checked by caller.
6324  */
6325 void
6326 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6327     struct sockaddr *sa, sctp_assoc_t assoc_id,
6328     uint32_t vrf_id, int *error, void *p)
6329 {
6330 	struct sockaddr *addr_touse;
6331 
6332 #ifdef INET6
6333 	struct sockaddr_in sin;
6334 
6335 #endif
6336 
6337 	/* see if we're bound all already! */
6338 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6339 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6340 		*error = EINVAL;
6341 		return;
6342 	}
6343 	addr_touse = sa;
6344 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6345 	if (sa->sa_family == AF_INET6) {
6346 		struct sockaddr_in6 *sin6;
6347 
6348 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6349 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6350 			*error = EINVAL;
6351 			return;
6352 		}
6353 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6354 			/* can only bind v6 on PF_INET6 sockets */
6355 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6356 			*error = EINVAL;
6357 			return;
6358 		}
6359 		sin6 = (struct sockaddr_in6 *)addr_touse;
6360 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6361 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6362 			    SCTP_IPV6_V6ONLY(inp)) {
6363 				/* can't bind v4-mapped on PF_INET sockets */
6364 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6365 				*error = EINVAL;
6366 				return;
6367 			}
6368 			in6_sin6_2_sin(&sin, sin6);
6369 			addr_touse = (struct sockaddr *)&sin;
6370 		}
6371 	}
6372 #endif
6373 	if (sa->sa_family == AF_INET) {
6374 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6375 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6376 			*error = EINVAL;
6377 			return;
6378 		}
6379 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6380 		    SCTP_IPV6_V6ONLY(inp)) {
6381 			/* can't bind v4 on PF_INET sockets */
6382 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6383 			*error = EINVAL;
6384 			return;
6385 		}
6386 	}
6387 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6388 		if (p == NULL) {
6389 			/* Can't get proc for Net/Open BSD */
6390 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6391 			*error = EINVAL;
6392 			return;
6393 		}
6394 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6395 		return;
6396 	}
6397 	/*
6398 	 * No locks required here since bind and mgmt_ep_sa all do their own
6399 	 * locking. If we do something for the FIX: below we may need to
6400 	 * lock in that case.
6401 	 */
6402 	if (assoc_id == 0) {
6403 		/* add the address */
6404 		struct sctp_inpcb *lep;
6405 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6406 
6407 		/* validate the incoming port */
6408 		if ((lsin->sin_port != 0) &&
6409 		    (lsin->sin_port != inp->sctp_lport)) {
6410 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6411 			*error = EINVAL;
6412 			return;
6413 		} else {
6414 			/* user specified 0 port, set it to existing port */
6415 			lsin->sin_port = inp->sctp_lport;
6416 		}
6417 
6418 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6419 		if (lep != NULL) {
6420 			/*
6421 			 * We must decrement the refcount since we have the
6422 			 * ep already and are binding. No remove going on
6423 			 * here.
6424 			 */
6425 			SCTP_INP_DECR_REF(lep);
6426 		}
6427 		if (lep == inp) {
6428 			/* already bound to it.. ok */
6429 			return;
6430 		} else if (lep == NULL) {
6431 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6432 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6433 			    SCTP_ADD_IP_ADDRESS,
6434 			    vrf_id, NULL);
6435 		} else {
6436 			*error = EADDRINUSE;
6437 		}
6438 		if (*error)
6439 			return;
6440 	} else {
6441 		/*
6442 		 * FIX: decide whether we allow assoc based bindx
6443 		 */
6444 	}
6445 }
6446 
6447 /*
6448  * sctp_bindx(DELETE) for one address.
6449  * assumes all arguments are valid/checked by caller.
6450  */
6451 void
6452 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6453     struct sockaddr *sa, sctp_assoc_t assoc_id,
6454     uint32_t vrf_id, int *error)
6455 {
6456 	struct sockaddr *addr_touse;
6457 
6458 #ifdef INET6
6459 	struct sockaddr_in sin;
6460 
6461 #endif
6462 
6463 	/* see if we're bound all already! */
6464 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6465 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6466 		*error = EINVAL;
6467 		return;
6468 	}
6469 	addr_touse = sa;
6470 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6471 	if (sa->sa_family == AF_INET6) {
6472 		struct sockaddr_in6 *sin6;
6473 
6474 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6475 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6476 			*error = EINVAL;
6477 			return;
6478 		}
6479 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6480 			/* can only bind v6 on PF_INET6 sockets */
6481 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6482 			*error = EINVAL;
6483 			return;
6484 		}
6485 		sin6 = (struct sockaddr_in6 *)addr_touse;
6486 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6487 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6488 			    SCTP_IPV6_V6ONLY(inp)) {
6489 				/* can't bind mapped-v4 on PF_INET sockets */
6490 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6491 				*error = EINVAL;
6492 				return;
6493 			}
6494 			in6_sin6_2_sin(&sin, sin6);
6495 			addr_touse = (struct sockaddr *)&sin;
6496 		}
6497 	}
6498 #endif
6499 	if (sa->sa_family == AF_INET) {
6500 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6501 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6502 			*error = EINVAL;
6503 			return;
6504 		}
6505 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6506 		    SCTP_IPV6_V6ONLY(inp)) {
6507 			/* can't bind v4 on PF_INET sockets */
6508 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6509 			*error = EINVAL;
6510 			return;
6511 		}
6512 	}
6513 	/*
6514 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6515 	 * below is ever changed we may need to lock before calling
6516 	 * association level binding.
6517 	 */
6518 	if (assoc_id == 0) {
6519 		/* delete the address */
6520 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6521 		    SCTP_DEL_IP_ADDRESS,
6522 		    vrf_id, NULL);
6523 	} else {
6524 		/*
6525 		 * FIX: decide whether we allow assoc based bindx
6526 		 */
6527 	}
6528 }
6529 
6530 /*
6531  * returns the valid local address count for an assoc, taking into account
6532  * all scoping rules
6533  */
6534 int
6535 sctp_local_addr_count(struct sctp_tcb *stcb)
6536 {
6537 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6538 	int ipv4_addr_legal, ipv6_addr_legal;
6539 	struct sctp_vrf *vrf;
6540 	struct sctp_ifn *sctp_ifn;
6541 	struct sctp_ifa *sctp_ifa;
6542 	int count = 0;
6543 
6544 	/* Turn on all the appropriate scopes */
6545 	loopback_scope = stcb->asoc.loopback_scope;
6546 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6547 	local_scope = stcb->asoc.local_scope;
6548 	site_scope = stcb->asoc.site_scope;
6549 	ipv4_addr_legal = ipv6_addr_legal = 0;
6550 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6551 		ipv6_addr_legal = 1;
6552 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6553 			ipv4_addr_legal = 1;
6554 		}
6555 	} else {
6556 		ipv4_addr_legal = 1;
6557 	}
6558 
6559 	SCTP_IPI_ADDR_RLOCK();
6560 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6561 	if (vrf == NULL) {
6562 		/* no vrf, no addresses */
6563 		SCTP_IPI_ADDR_RUNLOCK();
6564 		return (0);
6565 	}
6566 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6567 		/*
6568 		 * bound all case: go through all ifns on the vrf
6569 		 */
6570 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6571 			if ((loopback_scope == 0) &&
6572 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6573 				continue;
6574 			}
6575 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6576 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6577 					continue;
6578 				switch (sctp_ifa->address.sa.sa_family) {
6579 				case AF_INET:
6580 					if (ipv4_addr_legal) {
6581 						struct sockaddr_in *sin;
6582 
6583 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6584 						if (sin->sin_addr.s_addr == 0) {
6585 							/*
6586 							 * skip unspecified
6587 							 * addrs
6588 							 */
6589 							continue;
6590 						}
6591 						if ((ipv4_local_scope == 0) &&
6592 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6593 							continue;
6594 						}
6595 						/* count this one */
6596 						count++;
6597 					} else {
6598 						continue;
6599 					}
6600 					break;
6601 #ifdef INET6
6602 				case AF_INET6:
6603 					if (ipv6_addr_legal) {
6604 						struct sockaddr_in6 *sin6;
6605 
6606 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6607 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6608 							continue;
6609 						}
6610 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6611 							if (local_scope == 0)
6612 								continue;
6613 							if (sin6->sin6_scope_id == 0) {
6614 								if (sa6_recoverscope(sin6) != 0)
6615 									/*
6616 									 *
6617 									 * bad
6618 									 *
6619 									 * li
6620 									 * nk
6621 									 *
6622 									 * loc
6623 									 * al
6624 									 *
6625 									 * add
6626 									 * re
6627 									 * ss
6628 									 * */
6629 									continue;
6630 							}
6631 						}
6632 						if ((site_scope == 0) &&
6633 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6634 							continue;
6635 						}
6636 						/* count this one */
6637 						count++;
6638 					}
6639 					break;
6640 #endif
6641 				default:
6642 					/* TSNH */
6643 					break;
6644 				}
6645 			}
6646 		}
6647 	} else {
6648 		/*
6649 		 * subset bound case
6650 		 */
6651 		struct sctp_laddr *laddr;
6652 
6653 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6654 		    sctp_nxt_addr) {
6655 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6656 				continue;
6657 			}
6658 			/* count this one */
6659 			count++;
6660 		}
6661 	}
6662 	SCTP_IPI_ADDR_RUNLOCK();
6663 	return (count);
6664 }
6665 
6666 #if defined(SCTP_LOCAL_TRACE_BUF)
6667 
6668 void
6669 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6670 {
6671 	uint32_t saveindex, newindex;
6672 
6673 	do {
6674 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6675 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6676 			newindex = 1;
6677 		} else {
6678 			newindex = saveindex + 1;
6679 		}
6680 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6681 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6682 		saveindex = 0;
6683 	}
6684 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6685 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6686 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6687 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6688 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6689 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6690 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6691 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6692 }
6693 
6694 #endif
6695 /* We will need to add support
6696  * to bind the ports and such here
6697  * so we can do UDP tunneling. In
6698  * the mean-time, we return error
6699  */
6700 #include <netinet/udp.h>
6701 #include <netinet/udp_var.h>
6702 #include <sys/proc.h>
6703 #ifdef INET6
6704 #include <netinet6/sctp6_var.h>
6705 #endif
6706 
6707 static void
6708 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6709 {
6710 	struct ip *iph;
6711 	struct mbuf *sp, *last;
6712 	struct udphdr *uhdr;
6713 	uint16_t port = 0, len;
6714 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6715 
6716 	/*
6717 	 * Split out the mbuf chain. Leave the IP header in m, place the
6718 	 * rest in the sp.
6719 	 */
6720 	if ((m->m_flags & M_PKTHDR) == 0) {
6721 		/* Can't handle one that is not a pkt hdr */
6722 		goto out;
6723 	}
6724 	/* pull the src port */
6725 	iph = mtod(m, struct ip *);
6726 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6727 
6728 	port = uhdr->uh_sport;
6729 	sp = m_split(m, off, M_DONTWAIT);
6730 	if (sp == NULL) {
6731 		/* Gak, drop packet, we can't do a split */
6732 		goto out;
6733 	}
6734 	if (sp->m_pkthdr.len < header_size) {
6735 		/* Gak, packet can't have an SCTP header in it - to small */
6736 		m_freem(sp);
6737 		goto out;
6738 	}
6739 	/* ok now pull up the UDP header and SCTP header together */
6740 	sp = m_pullup(sp, header_size);
6741 	if (sp == NULL) {
6742 		/* Gak pullup failed */
6743 		goto out;
6744 	}
6745 	/* trim out the UDP header */
6746 	m_adj(sp, sizeof(struct udphdr));
6747 
6748 	/* Now reconstruct the mbuf chain */
6749 	/* 1) find last one */
6750 	last = m;
6751 	while (last->m_next != NULL) {
6752 		last = last->m_next;
6753 	}
6754 	last->m_next = sp;
6755 	m->m_pkthdr.len += sp->m_pkthdr.len;
6756 	last = m;
6757 	while (last != NULL) {
6758 		last = last->m_next;
6759 	}
6760 	/* Now its ready for sctp_input or sctp6_input */
6761 	iph = mtod(m, struct ip *);
6762 	switch (iph->ip_v) {
6763 	case IPVERSION:
6764 		{
6765 			/* its IPv4 */
6766 			len = SCTP_GET_IPV4_LENGTH(iph);
6767 			len -= sizeof(struct udphdr);
6768 			SCTP_GET_IPV4_LENGTH(iph) = len;
6769 			sctp_input_with_port(m, off, port);
6770 			break;
6771 		}
6772 #ifdef INET6
6773 	case IPV6_VERSION >> 4:
6774 		{
6775 			/* its IPv6 - NOT supported */
6776 			goto out;
6777 			break;
6778 
6779 		}
6780 #endif
6781 	default:
6782 		{
6783 			m_freem(m);
6784 			break;
6785 		}
6786 	}
6787 	return;
6788 out:
6789 	m_freem(m);
6790 }
6791 
6792 void
6793 sctp_over_udp_stop(void)
6794 {
6795 	struct socket *sop;
6796 
6797 	/*
6798 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6799 	 * for writting!
6800 	 */
6801 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6802 		/* Nothing to do */
6803 		return;
6804 	}
6805 	sop = SCTP_BASE_INFO(udp_tun_socket);
6806 	soclose(sop);
6807 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6808 }
6809 int
6810 sctp_over_udp_start(void)
6811 {
6812 	uint16_t port;
6813 	int ret;
6814 	struct sockaddr_in sin;
6815 	struct socket *sop = NULL;
6816 	struct thread *th;
6817 	struct ucred *cred;
6818 
6819 	/*
6820 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6821 	 * for writting!
6822 	 */
6823 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6824 	if (port == 0) {
6825 		/* Must have a port set */
6826 		return (EINVAL);
6827 	}
6828 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6829 		/* Already running -- must stop first */
6830 		return (EALREADY);
6831 	}
6832 	th = curthread;
6833 	cred = th->td_ucred;
6834 	if ((ret = socreate(PF_INET, &sop,
6835 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6836 		return (ret);
6837 	}
6838 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6839 	/* call the special UDP hook */
6840 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6841 	if (ret) {
6842 		goto exit_stage_left;
6843 	}
6844 	/* Ok we have a socket, bind it to the port */
6845 	memset(&sin, 0, sizeof(sin));
6846 	sin.sin_len = sizeof(sin);
6847 	sin.sin_family = AF_INET;
6848 	sin.sin_port = htons(port);
6849 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6850 	if (ret) {
6851 		/* Close up we cant get the port */
6852 exit_stage_left:
6853 		sctp_over_udp_stop();
6854 		return (ret);
6855 	}
6856 	/*
6857 	 * Ok we should now get UDP packets directly to our input routine
6858 	 * sctp_recv_upd_tunneled_packet().
6859 	 */
6860 	return (0);
6861 }
6862