xref: /freebsd/sys/netinet/sctputil.c (revision 830940567b49bb0c08dfaed40418999e76616909)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 
52 #define NUMBER_OF_MTU_SIZES 18
53 
54 
55 #if defined(__Windows__) && !defined(SCTP_LOCAL_TRACE_BUF)
56 #include "eventrace_netinet.h"
57 #include "sctputil.tmh"		/* this is the file that will be auto
58 				 * generated */
59 #else
60 #ifndef KTR_SCTP
61 #define KTR_SCTP KTR_SUBSYS
62 #endif
63 #endif
64 
65 void
66 sctp_sblog(struct sockbuf *sb,
67     struct sctp_tcb *stcb, int from, int incr)
68 {
69 	struct sctp_cwnd_log sctp_clog;
70 
71 	sctp_clog.x.sb.stcb = stcb;
72 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
73 	if (stcb)
74 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
75 	else
76 		sctp_clog.x.sb.stcb_sbcc = 0;
77 	sctp_clog.x.sb.incr = incr;
78 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
79 	    SCTP_LOG_EVENT_SB,
80 	    from,
81 	    sctp_clog.x.misc.log1,
82 	    sctp_clog.x.misc.log2,
83 	    sctp_clog.x.misc.log3,
84 	    sctp_clog.x.misc.log4);
85 }
86 
87 void
88 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
89 {
90 	struct sctp_cwnd_log sctp_clog;
91 
92 	sctp_clog.x.close.inp = (void *)inp;
93 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
94 	if (stcb) {
95 		sctp_clog.x.close.stcb = (void *)stcb;
96 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
97 	} else {
98 		sctp_clog.x.close.stcb = 0;
99 		sctp_clog.x.close.state = 0;
100 	}
101 	sctp_clog.x.close.loc = loc;
102 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
103 	    SCTP_LOG_EVENT_CLOSE,
104 	    0,
105 	    sctp_clog.x.misc.log1,
106 	    sctp_clog.x.misc.log2,
107 	    sctp_clog.x.misc.log3,
108 	    sctp_clog.x.misc.log4);
109 }
110 
111 
112 void
113 rto_logging(struct sctp_nets *net, int from)
114 {
115 	struct sctp_cwnd_log sctp_clog;
116 
117 	memset(&sctp_clog, 0, sizeof(sctp_clog));
118 	sctp_clog.x.rto.net = (void *)net;
119 	sctp_clog.x.rto.rtt = net->prev_rtt;
120 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
121 	    SCTP_LOG_EVENT_RTT,
122 	    from,
123 	    sctp_clog.x.misc.log1,
124 	    sctp_clog.x.misc.log2,
125 	    sctp_clog.x.misc.log3,
126 	    sctp_clog.x.misc.log4);
127 
128 }
129 
130 void
131 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
132 {
133 	struct sctp_cwnd_log sctp_clog;
134 
135 	sctp_clog.x.strlog.stcb = stcb;
136 	sctp_clog.x.strlog.n_tsn = tsn;
137 	sctp_clog.x.strlog.n_sseq = sseq;
138 	sctp_clog.x.strlog.e_tsn = 0;
139 	sctp_clog.x.strlog.e_sseq = 0;
140 	sctp_clog.x.strlog.strm = stream;
141 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
142 	    SCTP_LOG_EVENT_STRM,
143 	    from,
144 	    sctp_clog.x.misc.log1,
145 	    sctp_clog.x.misc.log2,
146 	    sctp_clog.x.misc.log3,
147 	    sctp_clog.x.misc.log4);
148 
149 }
150 
151 void
152 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
153 {
154 	struct sctp_cwnd_log sctp_clog;
155 
156 	sctp_clog.x.nagle.stcb = (void *)stcb;
157 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
158 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
159 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
160 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
161 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
162 	    SCTP_LOG_EVENT_NAGLE,
163 	    action,
164 	    sctp_clog.x.misc.log1,
165 	    sctp_clog.x.misc.log2,
166 	    sctp_clog.x.misc.log3,
167 	    sctp_clog.x.misc.log4);
168 }
169 
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	struct sctp_cwnd_log sctp_clog;
175 
176 	sctp_clog.x.sack.cumack = cumack;
177 	sctp_clog.x.sack.oldcumack = old_cumack;
178 	sctp_clog.x.sack.tsn = tsn;
179 	sctp_clog.x.sack.numGaps = gaps;
180 	sctp_clog.x.sack.numDups = dups;
181 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 	    SCTP_LOG_EVENT_SACK,
183 	    from,
184 	    sctp_clog.x.misc.log1,
185 	    sctp_clog.x.misc.log2,
186 	    sctp_clog.x.misc.log3,
187 	    sctp_clog.x.misc.log4);
188 }
189 
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 	struct sctp_cwnd_log sctp_clog;
194 
195 	memset(&sctp_clog, 0, sizeof(sctp_clog));
196 	sctp_clog.x.map.base = map;
197 	sctp_clog.x.map.cum = cum;
198 	sctp_clog.x.map.high = high;
199 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 	    SCTP_LOG_EVENT_MAP,
201 	    from,
202 	    sctp_clog.x.misc.log1,
203 	    sctp_clog.x.misc.log2,
204 	    sctp_clog.x.misc.log3,
205 	    sctp_clog.x.misc.log4);
206 }
207 
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
210     int from)
211 {
212 	struct sctp_cwnd_log sctp_clog;
213 
214 	memset(&sctp_clog, 0, sizeof(sctp_clog));
215 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
216 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
217 	sctp_clog.x.fr.tsn = tsn;
218 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
219 	    SCTP_LOG_EVENT_FR,
220 	    from,
221 	    sctp_clog.x.misc.log1,
222 	    sctp_clog.x.misc.log2,
223 	    sctp_clog.x.misc.log3,
224 	    sctp_clog.x.misc.log4);
225 
226 }
227 
228 
229 void
230 sctp_log_mb(struct mbuf *m, int from)
231 {
232 	struct sctp_cwnd_log sctp_clog;
233 
234 	sctp_clog.x.mb.mp = m;
235 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
236 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
237 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
238 	if (SCTP_BUF_IS_EXTENDED(m)) {
239 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
240 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
241 	} else {
242 		sctp_clog.x.mb.ext = 0;
243 		sctp_clog.x.mb.refcnt = 0;
244 	}
245 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
246 	    SCTP_LOG_EVENT_MBUF,
247 	    from,
248 	    sctp_clog.x.misc.log1,
249 	    sctp_clog.x.misc.log2,
250 	    sctp_clog.x.misc.log3,
251 	    sctp_clog.x.misc.log4);
252 }
253 
254 
255 void
256 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
257     int from)
258 {
259 	struct sctp_cwnd_log sctp_clog;
260 
261 	if (control == NULL) {
262 		SCTP_PRINTF("Gak log of NULL?\n");
263 		return;
264 	}
265 	sctp_clog.x.strlog.stcb = control->stcb;
266 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
267 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
268 	sctp_clog.x.strlog.strm = control->sinfo_stream;
269 	if (poschk != NULL) {
270 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
271 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
272 	} else {
273 		sctp_clog.x.strlog.e_tsn = 0;
274 		sctp_clog.x.strlog.e_sseq = 0;
275 	}
276 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
277 	    SCTP_LOG_EVENT_STRM,
278 	    from,
279 	    sctp_clog.x.misc.log1,
280 	    sctp_clog.x.misc.log2,
281 	    sctp_clog.x.misc.log3,
282 	    sctp_clog.x.misc.log4);
283 
284 }
285 
286 void
287 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
288 {
289 	struct sctp_cwnd_log sctp_clog;
290 
291 	sctp_clog.x.cwnd.net = net;
292 	if (stcb->asoc.send_queue_cnt > 255)
293 		sctp_clog.x.cwnd.cnt_in_send = 255;
294 	else
295 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
296 	if (stcb->asoc.stream_queue_cnt > 255)
297 		sctp_clog.x.cwnd.cnt_in_str = 255;
298 	else
299 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
300 
301 	if (net) {
302 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
303 		sctp_clog.x.cwnd.inflight = net->flight_size;
304 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
305 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
306 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
307 	}
308 	if (SCTP_CWNDLOG_PRESEND == from) {
309 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
310 	}
311 	sctp_clog.x.cwnd.cwnd_augment = augment;
312 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
313 	    SCTP_LOG_EVENT_CWND,
314 	    from,
315 	    sctp_clog.x.misc.log1,
316 	    sctp_clog.x.misc.log2,
317 	    sctp_clog.x.misc.log3,
318 	    sctp_clog.x.misc.log4);
319 
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp->sctp_socket) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 
365 }
366 
367 void
368 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
369 {
370 	struct sctp_cwnd_log sctp_clog;
371 
372 	memset(&sctp_clog, 0, sizeof(sctp_clog));
373 	sctp_clog.x.cwnd.net = net;
374 	sctp_clog.x.cwnd.cwnd_new_value = error;
375 	sctp_clog.x.cwnd.inflight = net->flight_size;
376 	sctp_clog.x.cwnd.cwnd_augment = burst;
377 	if (stcb->asoc.send_queue_cnt > 255)
378 		sctp_clog.x.cwnd.cnt_in_send = 255;
379 	else
380 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
381 	if (stcb->asoc.stream_queue_cnt > 255)
382 		sctp_clog.x.cwnd.cnt_in_str = 255;
383 	else
384 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
385 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
386 	    SCTP_LOG_EVENT_MAXBURST,
387 	    from,
388 	    sctp_clog.x.misc.log1,
389 	    sctp_clog.x.misc.log2,
390 	    sctp_clog.x.misc.log3,
391 	    sctp_clog.x.misc.log4);
392 
393 }
394 
395 void
396 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
397 {
398 	struct sctp_cwnd_log sctp_clog;
399 
400 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
401 	sctp_clog.x.rwnd.send_size = snd_size;
402 	sctp_clog.x.rwnd.overhead = overhead;
403 	sctp_clog.x.rwnd.new_rwnd = 0;
404 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
405 	    SCTP_LOG_EVENT_RWND,
406 	    from,
407 	    sctp_clog.x.misc.log1,
408 	    sctp_clog.x.misc.log2,
409 	    sctp_clog.x.misc.log3,
410 	    sctp_clog.x.misc.log4);
411 }
412 
413 void
414 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
415 {
416 	struct sctp_cwnd_log sctp_clog;
417 
418 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
419 	sctp_clog.x.rwnd.send_size = flight_size;
420 	sctp_clog.x.rwnd.overhead = overhead;
421 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
422 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
423 	    SCTP_LOG_EVENT_RWND,
424 	    from,
425 	    sctp_clog.x.misc.log1,
426 	    sctp_clog.x.misc.log2,
427 	    sctp_clog.x.misc.log3,
428 	    sctp_clog.x.misc.log4);
429 }
430 
431 void
432 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
433 {
434 	struct sctp_cwnd_log sctp_clog;
435 
436 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
437 	sctp_clog.x.mbcnt.size_change = book;
438 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
439 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
440 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
441 	    SCTP_LOG_EVENT_MBCNT,
442 	    from,
443 	    sctp_clog.x.misc.log1,
444 	    sctp_clog.x.misc.log2,
445 	    sctp_clog.x.misc.log3,
446 	    sctp_clog.x.misc.log4);
447 
448 }
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 
504 }
505 
506 void
507 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
508 {
509 	struct sctp_cwnd_log sctp_clog;
510 
511 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
512 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
513 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
514 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
515 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
516 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
517 	sctp_clog.x.blk.sndlen = sendlen;
518 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
519 	    SCTP_LOG_EVENT_BLOCK,
520 	    from,
521 	    sctp_clog.x.misc.log1,
522 	    sctp_clog.x.misc.log2,
523 	    sctp_clog.x.misc.log3,
524 	    sctp_clog.x.misc.log4);
525 
526 }
527 
528 int
529 sctp_fill_stat_log(void *optval, size_t *optsize)
530 {
531 	/* May need to fix this if ktrdump does not work */
532 	return (0);
533 }
534 
535 #ifdef SCTP_AUDITING_ENABLED
536 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
537 static int sctp_audit_indx = 0;
538 
539 static
540 void
541 sctp_print_audit_report(void)
542 {
543 	int i;
544 	int cnt;
545 
546 	cnt = 0;
547 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
548 		if ((sctp_audit_data[i][0] == 0xe0) &&
549 		    (sctp_audit_data[i][1] == 0x01)) {
550 			cnt = 0;
551 			SCTP_PRINTF("\n");
552 		} else if (sctp_audit_data[i][0] == 0xf0) {
553 			cnt = 0;
554 			SCTP_PRINTF("\n");
555 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
556 		    (sctp_audit_data[i][1] == 0x01)) {
557 			SCTP_PRINTF("\n");
558 			cnt = 0;
559 		}
560 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
561 		    (uint32_t) sctp_audit_data[i][1]);
562 		cnt++;
563 		if ((cnt % 14) == 0)
564 			SCTP_PRINTF("\n");
565 	}
566 	for (i = 0; i < sctp_audit_indx; i++) {
567 		if ((sctp_audit_data[i][0] == 0xe0) &&
568 		    (sctp_audit_data[i][1] == 0x01)) {
569 			cnt = 0;
570 			SCTP_PRINTF("\n");
571 		} else if (sctp_audit_data[i][0] == 0xf0) {
572 			cnt = 0;
573 			SCTP_PRINTF("\n");
574 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
575 		    (sctp_audit_data[i][1] == 0x01)) {
576 			SCTP_PRINTF("\n");
577 			cnt = 0;
578 		}
579 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
580 		    (uint32_t) sctp_audit_data[i][1]);
581 		cnt++;
582 		if ((cnt % 14) == 0)
583 			SCTP_PRINTF("\n");
584 	}
585 	SCTP_PRINTF("\n");
586 }
587 
588 void
589 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
590     struct sctp_nets *net)
591 {
592 	int resend_cnt, tot_out, rep, tot_book_cnt;
593 	struct sctp_nets *lnet;
594 	struct sctp_tmit_chunk *chk;
595 
596 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
597 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
598 	sctp_audit_indx++;
599 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
600 		sctp_audit_indx = 0;
601 	}
602 	if (inp == NULL) {
603 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
604 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
605 		sctp_audit_indx++;
606 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 			sctp_audit_indx = 0;
608 		}
609 		return;
610 	}
611 	if (stcb == NULL) {
612 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
613 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
614 		sctp_audit_indx++;
615 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
616 			sctp_audit_indx = 0;
617 		}
618 		return;
619 	}
620 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
621 	sctp_audit_data[sctp_audit_indx][1] =
622 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
623 	sctp_audit_indx++;
624 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625 		sctp_audit_indx = 0;
626 	}
627 	rep = 0;
628 	tot_book_cnt = 0;
629 	resend_cnt = tot_out = 0;
630 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
631 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
632 			resend_cnt++;
633 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
634 			tot_out += chk->book_size;
635 			tot_book_cnt++;
636 		}
637 	}
638 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
639 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
640 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
641 		sctp_audit_indx++;
642 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 			sctp_audit_indx = 0;
644 		}
645 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
646 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
647 		rep = 1;
648 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
649 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
650 		sctp_audit_data[sctp_audit_indx][1] =
651 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
652 		sctp_audit_indx++;
653 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
654 			sctp_audit_indx = 0;
655 		}
656 	}
657 	if (tot_out != stcb->asoc.total_flight) {
658 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
659 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
660 		sctp_audit_indx++;
661 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
662 			sctp_audit_indx = 0;
663 		}
664 		rep = 1;
665 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
666 		    (int)stcb->asoc.total_flight);
667 		stcb->asoc.total_flight = tot_out;
668 	}
669 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
670 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
671 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
672 		sctp_audit_indx++;
673 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
674 			sctp_audit_indx = 0;
675 		}
676 		rep = 1;
677 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
678 
679 		stcb->asoc.total_flight_count = tot_book_cnt;
680 	}
681 	tot_out = 0;
682 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
683 		tot_out += lnet->flight_size;
684 	}
685 	if (tot_out != stcb->asoc.total_flight) {
686 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
687 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
688 		sctp_audit_indx++;
689 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
690 			sctp_audit_indx = 0;
691 		}
692 		rep = 1;
693 		SCTP_PRINTF("real flight:%d net total was %d\n",
694 		    stcb->asoc.total_flight, tot_out);
695 		/* now corrective action */
696 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
697 
698 			tot_out = 0;
699 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
700 				if ((chk->whoTo == lnet) &&
701 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
702 					tot_out += chk->book_size;
703 				}
704 			}
705 			if (lnet->flight_size != tot_out) {
706 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
707 				    (uint32_t) lnet, lnet->flight_size,
708 				    tot_out);
709 				lnet->flight_size = tot_out;
710 			}
711 		}
712 	}
713 	if (rep) {
714 		sctp_print_audit_report();
715 	}
716 }
717 
718 void
719 sctp_audit_log(uint8_t ev, uint8_t fd)
720 {
721 
722 	sctp_audit_data[sctp_audit_indx][0] = ev;
723 	sctp_audit_data[sctp_audit_indx][1] = fd;
724 	sctp_audit_indx++;
725 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
726 		sctp_audit_indx = 0;
727 	}
728 }
729 
730 #endif
731 
732 /*
733  * a list of sizes based on typical mtu's, used only if next hop size not
734  * returned.
735  */
736 static int sctp_mtu_sizes[] = {
737 	68,
738 	296,
739 	508,
740 	512,
741 	544,
742 	576,
743 	1006,
744 	1492,
745 	1500,
746 	1536,
747 	2002,
748 	2048,
749 	4352,
750 	4464,
751 	8166,
752 	17914,
753 	32000,
754 	65535
755 };
756 
757 void
758 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
759 {
760 	struct sctp_association *asoc;
761 	struct sctp_nets *net;
762 
763 	asoc = &stcb->asoc;
764 
765 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
766 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
767 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
768 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
769 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
770 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
771 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
772 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
773 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
774 	}
775 }
776 
777 int
778 find_next_best_mtu(int totsz)
779 {
780 	int i, perfer;
781 
782 	/*
783 	 * if we are in here we must find the next best fit based on the
784 	 * size of the dg that failed to be sent.
785 	 */
786 	perfer = 0;
787 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
788 		if (totsz < sctp_mtu_sizes[i]) {
789 			perfer = i - 1;
790 			if (perfer < 0)
791 				perfer = 0;
792 			break;
793 		}
794 	}
795 	return (sctp_mtu_sizes[perfer]);
796 }
797 
798 void
799 sctp_fill_random_store(struct sctp_pcb *m)
800 {
801 	/*
802 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
803 	 * our counter. The result becomes our good random numbers and we
804 	 * then setup to give these out. Note that we do no locking to
805 	 * protect this. This is ok, since if competing folks call this we
806 	 * will get more gobbled gook in the random store which is what we
807 	 * want. There is a danger that two guys will use the same random
808 	 * numbers, but thats ok too since that is random as well :->
809 	 */
810 	m->store_at = 0;
811 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
812 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
813 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
814 	m->random_counter++;
815 }
816 
817 uint32_t
818 sctp_select_initial_TSN(struct sctp_pcb *inp)
819 {
820 	/*
821 	 * A true implementation should use random selection process to get
822 	 * the initial stream sequence number, using RFC1750 as a good
823 	 * guideline
824 	 */
825 	uint32_t x, *xp;
826 	uint8_t *p;
827 	int store_at, new_store;
828 
829 	if (inp->initial_sequence_debug != 0) {
830 		uint32_t ret;
831 
832 		ret = inp->initial_sequence_debug;
833 		inp->initial_sequence_debug++;
834 		return (ret);
835 	}
836 retry:
837 	store_at = inp->store_at;
838 	new_store = store_at + sizeof(uint32_t);
839 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
840 		new_store = 0;
841 	}
842 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
843 		goto retry;
844 	}
845 	if (new_store == 0) {
846 		/* Refill the random store */
847 		sctp_fill_random_store(inp);
848 	}
849 	p = &inp->random_store[store_at];
850 	xp = (uint32_t *) p;
851 	x = *xp;
852 	return (x);
853 }
854 
855 uint32_t
856 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
857 {
858 	u_long x, not_done;
859 	struct timeval now;
860 
861 	(void)SCTP_GETTIME_TIMEVAL(&now);
862 	not_done = 1;
863 	while (not_done) {
864 		x = sctp_select_initial_TSN(&inp->sctp_ep);
865 		if (x == 0) {
866 			/* we never use 0 */
867 			continue;
868 		}
869 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
870 			not_done = 0;
871 		}
872 	}
873 	return (x);
874 }
875 
876 int
877 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
878     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
879 {
880 	struct sctp_association *asoc;
881 
882 	/*
883 	 * Anything set to zero is taken care of by the allocation routine's
884 	 * bzero
885 	 */
886 
887 	/*
888 	 * Up front select what scoping to apply on addresses I tell my peer
889 	 * Not sure what to do with these right now, we will need to come up
890 	 * with a way to set them. We may need to pass them through from the
891 	 * caller in the sctp_aloc_assoc() function.
892 	 */
893 	int i;
894 
895 	asoc = &stcb->asoc;
896 	/* init all variables to a known value. */
897 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
898 	asoc->max_burst = m->sctp_ep.max_burst;
899 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
900 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
901 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
902 	/* EY Init nr_sack variable */
903 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
904 	/* JRS 5/21/07 - Init CMT PF variables */
905 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
906 	asoc->sctp_frag_point = m->sctp_frag_point;
907 #ifdef INET
908 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
909 #else
910 	asoc->default_tos = 0;
911 #endif
912 
913 #ifdef INET6
914 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
915 #else
916 	asoc->default_flowlabel = 0;
917 #endif
918 	asoc->sb_send_resv = 0;
919 	if (override_tag) {
920 		asoc->my_vtag = override_tag;
921 	} else {
922 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
923 	}
924 	/* Get the nonce tags */
925 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
926 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
927 	asoc->vrf_id = vrf_id;
928 
929 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
930 		asoc->hb_is_disabled = 1;
931 	else
932 		asoc->hb_is_disabled = 0;
933 
934 #ifdef SCTP_ASOCLOG_OF_TSNS
935 	asoc->tsn_in_at = 0;
936 	asoc->tsn_out_at = 0;
937 	asoc->tsn_in_wrapped = 0;
938 	asoc->tsn_out_wrapped = 0;
939 	asoc->cumack_log_at = 0;
940 	asoc->cumack_log_atsnt = 0;
941 #endif
942 #ifdef SCTP_FS_SPEC_LOG
943 	asoc->fs_index = 0;
944 #endif
945 	asoc->refcnt = 0;
946 	asoc->assoc_up_sent = 0;
947 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
948 	    sctp_select_initial_TSN(&m->sctp_ep);
949 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
950 	/* we are optimisitic here */
951 	asoc->peer_supports_pktdrop = 1;
952 	asoc->peer_supports_nat = 0;
953 	asoc->sent_queue_retran_cnt = 0;
954 
955 	/* for CMT */
956 	asoc->last_net_cmt_send_started = NULL;
957 
958 	/* This will need to be adjusted */
959 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
960 	asoc->last_acked_seq = asoc->init_seq_number - 1;
961 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
962 	asoc->asconf_seq_in = asoc->last_acked_seq;
963 
964 	/* here we are different, we hold the next one we expect */
965 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
966 
967 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
968 	asoc->initial_rto = m->sctp_ep.initial_rto;
969 
970 	asoc->max_init_times = m->sctp_ep.max_init_times;
971 	asoc->max_send_times = m->sctp_ep.max_send_times;
972 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
973 	asoc->free_chunk_cnt = 0;
974 
975 	asoc->iam_blocking = 0;
976 	/* ECN Nonce initialization */
977 	asoc->context = m->sctp_context;
978 	asoc->def_send = m->def_send;
979 	asoc->ecn_nonce_allowed = 0;
980 	asoc->receiver_nonce_sum = 1;
981 	asoc->nonce_sum_expect_base = 1;
982 	asoc->nonce_sum_check = 1;
983 	asoc->nonce_resync_tsn = 0;
984 	asoc->nonce_wait_for_ecne = 0;
985 	asoc->nonce_wait_tsn = 0;
986 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
987 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
988 	asoc->pr_sctp_cnt = 0;
989 	asoc->total_output_queue_size = 0;
990 
991 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
992 		struct in6pcb *inp6;
993 
994 		/* Its a V6 socket */
995 		inp6 = (struct in6pcb *)m;
996 		asoc->ipv6_addr_legal = 1;
997 		/* Now look at the binding flag to see if V4 will be legal */
998 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
999 			asoc->ipv4_addr_legal = 1;
1000 		} else {
1001 			/* V4 addresses are NOT legal on the association */
1002 			asoc->ipv4_addr_legal = 0;
1003 		}
1004 	} else {
1005 		/* Its a V4 socket, no - V6 */
1006 		asoc->ipv4_addr_legal = 1;
1007 		asoc->ipv6_addr_legal = 0;
1008 	}
1009 
1010 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1011 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1012 
1013 	asoc->smallest_mtu = m->sctp_frag_point;
1014 #ifdef SCTP_PRINT_FOR_B_AND_M
1015 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1016 	    asoc->smallest_mtu);
1017 #endif
1018 	asoc->minrto = m->sctp_ep.sctp_minrto;
1019 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1020 
1021 	asoc->locked_on_sending = NULL;
1022 	asoc->stream_locked_on = 0;
1023 	asoc->ecn_echo_cnt_onq = 0;
1024 	asoc->stream_locked = 0;
1025 
1026 	asoc->send_sack = 1;
1027 
1028 	LIST_INIT(&asoc->sctp_restricted_addrs);
1029 
1030 	TAILQ_INIT(&asoc->nets);
1031 	TAILQ_INIT(&asoc->pending_reply_queue);
1032 	TAILQ_INIT(&asoc->asconf_ack_sent);
1033 	/* Setup to fill the hb random cache at first HB */
1034 	asoc->hb_random_idx = 4;
1035 
1036 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1037 
1038 	/*
1039 	 * JRS - Pick the default congestion control module based on the
1040 	 * sysctl.
1041 	 */
1042 	switch (m->sctp_ep.sctp_default_cc_module) {
1043 		/* JRS - Standard TCP congestion control */
1044 	case SCTP_CC_RFC2581:
1045 		{
1046 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1047 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1048 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1049 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1050 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1051 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1052 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1053 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1054 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1055 			break;
1056 		}
1057 		/* JRS - High Speed TCP congestion control (Floyd) */
1058 	case SCTP_CC_HSTCP:
1059 		{
1060 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1061 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1064 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1065 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1066 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1067 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1068 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1069 			break;
1070 		}
1071 		/* JRS - HTCP congestion control */
1072 	case SCTP_CC_HTCP:
1073 		{
1074 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1075 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1078 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1079 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1080 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1081 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1082 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1083 			break;
1084 		}
1085 		/* JRS - By default, use RFC2581 */
1086 	default:
1087 		{
1088 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1089 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1092 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1093 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1094 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1095 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1096 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1097 			break;
1098 		}
1099 	}
1100 
1101 	/*
1102 	 * Now the stream parameters, here we allocate space for all streams
1103 	 * that we request by default.
1104 	 */
1105 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1106 	    m->sctp_ep.pre_open_stream_count;
1107 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1108 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1109 	    SCTP_M_STRMO);
1110 	if (asoc->strmout == NULL) {
1111 		/* big trouble no memory */
1112 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1113 		return (ENOMEM);
1114 	}
1115 	for (i = 0; i < asoc->streamoutcnt; i++) {
1116 		/*
1117 		 * inbound side must be set to 0xffff, also NOTE when we get
1118 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1119 		 * count (streamoutcnt) but first check if we sent to any of
1120 		 * the upper streams that were dropped (if some were). Those
1121 		 * that were dropped must be notified to the upper layer as
1122 		 * failed to send.
1123 		 */
1124 		asoc->strmout[i].next_sequence_sent = 0x0;
1125 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1126 		asoc->strmout[i].stream_no = i;
1127 		asoc->strmout[i].last_msg_incomplete = 0;
1128 		asoc->strmout[i].next_spoke.tqe_next = 0;
1129 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1130 	}
1131 	/* Now the mapping array */
1132 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1133 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1134 	    SCTP_M_MAP);
1135 	if (asoc->mapping_array == NULL) {
1136 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1137 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1138 		return (ENOMEM);
1139 	}
1140 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1141 	/* EY  - initialize the nr_mapping_array just like mapping array */
1142 	asoc->nr_mapping_array_size = SCTP_INITIAL_NR_MAPPING_ARRAY;
1143 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->nr_mapping_array_size,
1144 	    SCTP_M_MAP);
1145 	if (asoc->nr_mapping_array == NULL) {
1146 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1147 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1148 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1149 		return (ENOMEM);
1150 	}
1151 	memset(asoc->nr_mapping_array, 0, asoc->nr_mapping_array_size);
1152 
1153 	/* Now the init of the other outqueues */
1154 	TAILQ_INIT(&asoc->free_chunks);
1155 	TAILQ_INIT(&asoc->out_wheel);
1156 	TAILQ_INIT(&asoc->control_send_queue);
1157 	TAILQ_INIT(&asoc->asconf_send_queue);
1158 	TAILQ_INIT(&asoc->send_queue);
1159 	TAILQ_INIT(&asoc->sent_queue);
1160 	TAILQ_INIT(&asoc->reasmqueue);
1161 	TAILQ_INIT(&asoc->resetHead);
1162 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1163 	TAILQ_INIT(&asoc->asconf_queue);
1164 	/* authentication fields */
1165 	asoc->authinfo.random = NULL;
1166 	asoc->authinfo.active_keyid = 0;
1167 	asoc->authinfo.assoc_key = NULL;
1168 	asoc->authinfo.assoc_keyid = 0;
1169 	asoc->authinfo.recv_key = NULL;
1170 	asoc->authinfo.recv_keyid = 0;
1171 	LIST_INIT(&asoc->shared_keys);
1172 	asoc->marked_retrans = 0;
1173 	asoc->timoinit = 0;
1174 	asoc->timodata = 0;
1175 	asoc->timosack = 0;
1176 	asoc->timoshutdown = 0;
1177 	asoc->timoheartbeat = 0;
1178 	asoc->timocookie = 0;
1179 	asoc->timoshutdownack = 0;
1180 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1181 	asoc->discontinuity_time = asoc->start_time;
1182 	/*
1183 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1184 	 * freed later whe the association is freed.
1185 	 */
1186 	return (0);
1187 }
1188 
1189 int
1190 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1191 {
1192 	/* mapping array needs to grow */
1193 	uint8_t *new_array;
1194 	uint32_t new_size;
1195 
1196 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1197 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1198 	if (new_array == NULL) {
1199 		/* can't get more, forget it */
1200 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1201 		    new_size);
1202 		return (-1);
1203 	}
1204 	memset(new_array, 0, new_size);
1205 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1206 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1207 	asoc->mapping_array = new_array;
1208 	asoc->mapping_array_size = new_size;
1209 	if (asoc->peer_supports_nr_sack) {
1210 		new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
1211 		SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1212 		if (new_array == NULL) {
1213 			/* can't get more, forget it */
1214 			SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1215 			    new_size);
1216 			return (-1);
1217 		}
1218 		memset(new_array, 0, new_size);
1219 		memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
1220 		SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1221 		asoc->nr_mapping_array = new_array;
1222 		asoc->nr_mapping_array_size = new_size;
1223 	}
1224 	return (0);
1225 }
1226 
1227 
1228 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1229 static void
1230 sctp_iterator_work(struct sctp_iterator *it)
1231 {
1232 	int iteration_count = 0;
1233 	int inp_skip = 0;
1234 
1235 	SCTP_ITERATOR_LOCK();
1236 	if (it->inp) {
1237 		SCTP_INP_DECR_REF(it->inp);
1238 	}
1239 	if (it->inp == NULL) {
1240 		/* iterator is complete */
1241 done_with_iterator:
1242 		SCTP_ITERATOR_UNLOCK();
1243 		if (it->function_atend != NULL) {
1244 			(*it->function_atend) (it->pointer, it->val);
1245 		}
1246 		SCTP_FREE(it, SCTP_M_ITER);
1247 		return;
1248 	}
1249 select_a_new_ep:
1250 	SCTP_INP_WLOCK(it->inp);
1251 	while (((it->pcb_flags) &&
1252 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1253 	    ((it->pcb_features) &&
1254 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1255 		/* endpoint flags or features don't match, so keep looking */
1256 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1257 			SCTP_INP_WUNLOCK(it->inp);
1258 			goto done_with_iterator;
1259 		}
1260 		SCTP_INP_WUNLOCK(it->inp);
1261 		it->inp = LIST_NEXT(it->inp, sctp_list);
1262 		if (it->inp == NULL) {
1263 			goto done_with_iterator;
1264 		}
1265 		SCTP_INP_WLOCK(it->inp);
1266 	}
1267 
1268 	SCTP_INP_WUNLOCK(it->inp);
1269 	SCTP_INP_RLOCK(it->inp);
1270 
1271 	/* now go through each assoc which is in the desired state */
1272 	if (it->done_current_ep == 0) {
1273 		if (it->function_inp != NULL)
1274 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1275 		it->done_current_ep = 1;
1276 	}
1277 	if (it->stcb == NULL) {
1278 		/* run the per instance function */
1279 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1280 	}
1281 	if ((inp_skip) || it->stcb == NULL) {
1282 		if (it->function_inp_end != NULL) {
1283 			inp_skip = (*it->function_inp_end) (it->inp,
1284 			    it->pointer,
1285 			    it->val);
1286 		}
1287 		SCTP_INP_RUNLOCK(it->inp);
1288 		goto no_stcb;
1289 	}
1290 	while (it->stcb) {
1291 		SCTP_TCB_LOCK(it->stcb);
1292 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1293 			/* not in the right state... keep looking */
1294 			SCTP_TCB_UNLOCK(it->stcb);
1295 			goto next_assoc;
1296 		}
1297 		/* see if we have limited out the iterator loop */
1298 		iteration_count++;
1299 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1300 			/* Pause to let others grab the lock */
1301 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1302 			SCTP_TCB_UNLOCK(it->stcb);
1303 
1304 			SCTP_INP_INCR_REF(it->inp);
1305 			SCTP_INP_RUNLOCK(it->inp);
1306 			SCTP_ITERATOR_UNLOCK();
1307 			SCTP_ITERATOR_LOCK();
1308 			SCTP_INP_RLOCK(it->inp);
1309 
1310 			SCTP_INP_DECR_REF(it->inp);
1311 			SCTP_TCB_LOCK(it->stcb);
1312 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1313 			iteration_count = 0;
1314 		}
1315 		/* run function on this one */
1316 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1317 
1318 		/*
1319 		 * we lie here, it really needs to have its own type but
1320 		 * first I must verify that this won't effect things :-0
1321 		 */
1322 		if (it->no_chunk_output == 0)
1323 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1324 
1325 		SCTP_TCB_UNLOCK(it->stcb);
1326 next_assoc:
1327 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1328 		if (it->stcb == NULL) {
1329 			/* Run last function */
1330 			if (it->function_inp_end != NULL) {
1331 				inp_skip = (*it->function_inp_end) (it->inp,
1332 				    it->pointer,
1333 				    it->val);
1334 			}
1335 		}
1336 	}
1337 	SCTP_INP_RUNLOCK(it->inp);
1338 no_stcb:
1339 	/* done with all assocs on this endpoint, move on to next endpoint */
1340 	it->done_current_ep = 0;
1341 	SCTP_INP_WLOCK(it->inp);
1342 	SCTP_INP_WUNLOCK(it->inp);
1343 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1344 		it->inp = NULL;
1345 	} else {
1346 		SCTP_INP_INFO_RLOCK();
1347 		it->inp = LIST_NEXT(it->inp, sctp_list);
1348 		SCTP_INP_INFO_RUNLOCK();
1349 	}
1350 	if (it->inp == NULL) {
1351 		goto done_with_iterator;
1352 	}
1353 	goto select_a_new_ep;
1354 }
1355 
1356 void
1357 sctp_iterator_worker(void)
1358 {
1359 	struct sctp_iterator *it = NULL;
1360 
1361 	/* This function is called with the WQ lock in place */
1362 
1363 	SCTP_BASE_INFO(iterator_running) = 1;
1364 again:
1365 	it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1366 	while (it) {
1367 		/* now lets work on this one */
1368 		TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1369 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1370 		sctp_iterator_work(it);
1371 		SCTP_IPI_ITERATOR_WQ_LOCK();
1372 		/* sa_ignore FREED_MEMORY */
1373 		it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1374 	}
1375 	if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
1376 		goto again;
1377 	}
1378 	SCTP_BASE_INFO(iterator_running) = 0;
1379 	return;
1380 }
1381 
1382 #endif
1383 
1384 
1385 static void
1386 sctp_handle_addr_wq(void)
1387 {
1388 	/* deal with the ADDR wq from the rtsock calls */
1389 	struct sctp_laddr *wi;
1390 	struct sctp_asconf_iterator *asc;
1391 
1392 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1393 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1394 	if (asc == NULL) {
1395 		/* Try later, no memory */
1396 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1397 		    (struct sctp_inpcb *)NULL,
1398 		    (struct sctp_tcb *)NULL,
1399 		    (struct sctp_nets *)NULL);
1400 		return;
1401 	}
1402 	LIST_INIT(&asc->list_of_work);
1403 	asc->cnt = 0;
1404 	SCTP_IPI_ITERATOR_WQ_LOCK();
1405 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1406 	while (wi != NULL) {
1407 		LIST_REMOVE(wi, sctp_nxt_addr);
1408 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1409 		asc->cnt++;
1410 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1411 	}
1412 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1413 	if (asc->cnt == 0) {
1414 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1415 	} else {
1416 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1417 		    sctp_asconf_iterator_stcb,
1418 		    NULL,	/* No ep end for boundall */
1419 		    SCTP_PCB_FLAGS_BOUNDALL,
1420 		    SCTP_PCB_ANY_FEATURES,
1421 		    SCTP_ASOC_ANY_STATE,
1422 		    (void *)asc, 0,
1423 		    sctp_asconf_iterator_end, NULL, 0);
1424 	}
1425 }
1426 
1427 int retcode = 0;
1428 int cur_oerr = 0;
1429 
1430 void
1431 sctp_timeout_handler(void *t)
1432 {
1433 	struct sctp_inpcb *inp;
1434 	struct sctp_tcb *stcb;
1435 	struct sctp_nets *net;
1436 	struct sctp_timer *tmr;
1437 
1438 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1439 	struct socket *so;
1440 
1441 #endif
1442 	int did_output, type;
1443 	struct sctp_iterator *it = NULL;
1444 
1445 	tmr = (struct sctp_timer *)t;
1446 	inp = (struct sctp_inpcb *)tmr->ep;
1447 	stcb = (struct sctp_tcb *)tmr->tcb;
1448 	net = (struct sctp_nets *)tmr->net;
1449 	did_output = 1;
1450 
1451 #ifdef SCTP_AUDITING_ENABLED
1452 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1453 	sctp_auditing(3, inp, stcb, net);
1454 #endif
1455 
1456 	/* sanity checks... */
1457 	if (tmr->self != (void *)tmr) {
1458 		/*
1459 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1460 		 * tmr);
1461 		 */
1462 		return;
1463 	}
1464 	tmr->stopped_from = 0xa001;
1465 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1466 		/*
1467 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1468 		 * tmr->type);
1469 		 */
1470 		return;
1471 	}
1472 	tmr->stopped_from = 0xa002;
1473 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1474 		return;
1475 	}
1476 	/* if this is an iterator timeout, get the struct and clear inp */
1477 	tmr->stopped_from = 0xa003;
1478 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1479 		it = (struct sctp_iterator *)inp;
1480 		inp = NULL;
1481 	}
1482 	type = tmr->type;
1483 	if (inp) {
1484 		SCTP_INP_INCR_REF(inp);
1485 		if ((inp->sctp_socket == 0) &&
1486 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1487 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1488 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1489 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1490 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1491 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1492 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1493 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1494 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1495 		    ) {
1496 			SCTP_INP_DECR_REF(inp);
1497 			return;
1498 		}
1499 	}
1500 	tmr->stopped_from = 0xa004;
1501 	if (stcb) {
1502 		atomic_add_int(&stcb->asoc.refcnt, 1);
1503 		if (stcb->asoc.state == 0) {
1504 			atomic_add_int(&stcb->asoc.refcnt, -1);
1505 			if (inp) {
1506 				SCTP_INP_DECR_REF(inp);
1507 			}
1508 			return;
1509 		}
1510 	}
1511 	tmr->stopped_from = 0xa005;
1512 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1513 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1514 		if (inp) {
1515 			SCTP_INP_DECR_REF(inp);
1516 		}
1517 		if (stcb) {
1518 			atomic_add_int(&stcb->asoc.refcnt, -1);
1519 		}
1520 		return;
1521 	}
1522 	tmr->stopped_from = 0xa006;
1523 
1524 	if (stcb) {
1525 		SCTP_TCB_LOCK(stcb);
1526 		atomic_add_int(&stcb->asoc.refcnt, -1);
1527 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1528 		    ((stcb->asoc.state == 0) ||
1529 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1530 			SCTP_TCB_UNLOCK(stcb);
1531 			if (inp) {
1532 				SCTP_INP_DECR_REF(inp);
1533 			}
1534 			return;
1535 		}
1536 	}
1537 	/* record in stopped what t-o occured */
1538 	tmr->stopped_from = tmr->type;
1539 
1540 	/* mark as being serviced now */
1541 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1542 		/*
1543 		 * Callout has been rescheduled.
1544 		 */
1545 		goto get_out;
1546 	}
1547 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1548 		/*
1549 		 * Not active, so no action.
1550 		 */
1551 		goto get_out;
1552 	}
1553 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1554 
1555 	/* call the handler for the appropriate timer type */
1556 	switch (tmr->type) {
1557 	case SCTP_TIMER_TYPE_ZERO_COPY:
1558 		if (inp == NULL) {
1559 			break;
1560 		}
1561 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1562 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1563 		}
1564 		break;
1565 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1566 		if (inp == NULL) {
1567 			break;
1568 		}
1569 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1570 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1571 		}
1572 		break;
1573 	case SCTP_TIMER_TYPE_ADDR_WQ:
1574 		sctp_handle_addr_wq();
1575 		break;
1576 	case SCTP_TIMER_TYPE_ITERATOR:
1577 		SCTP_STAT_INCR(sctps_timoiterator);
1578 		sctp_iterator_timer(it);
1579 		break;
1580 	case SCTP_TIMER_TYPE_SEND:
1581 		if ((stcb == NULL) || (inp == NULL)) {
1582 			break;
1583 		}
1584 		SCTP_STAT_INCR(sctps_timodata);
1585 		stcb->asoc.timodata++;
1586 		stcb->asoc.num_send_timers_up--;
1587 		if (stcb->asoc.num_send_timers_up < 0) {
1588 			stcb->asoc.num_send_timers_up = 0;
1589 		}
1590 		SCTP_TCB_LOCK_ASSERT(stcb);
1591 		cur_oerr = stcb->asoc.overall_error_count;
1592 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1593 		if (retcode) {
1594 			/* no need to unlock on tcb its gone */
1595 
1596 			goto out_decr;
1597 		}
1598 		SCTP_TCB_LOCK_ASSERT(stcb);
1599 #ifdef SCTP_AUDITING_ENABLED
1600 		sctp_auditing(4, inp, stcb, net);
1601 #endif
1602 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1603 		if ((stcb->asoc.num_send_timers_up == 0) &&
1604 		    (stcb->asoc.sent_queue_cnt > 0)
1605 		    ) {
1606 			struct sctp_tmit_chunk *chk;
1607 
1608 			/*
1609 			 * safeguard. If there on some on the sent queue
1610 			 * somewhere but no timers running something is
1611 			 * wrong... so we start a timer on the first chunk
1612 			 * on the send queue on whatever net it is sent to.
1613 			 */
1614 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1615 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1616 			    chk->whoTo);
1617 		}
1618 		break;
1619 	case SCTP_TIMER_TYPE_INIT:
1620 		if ((stcb == NULL) || (inp == NULL)) {
1621 			break;
1622 		}
1623 		SCTP_STAT_INCR(sctps_timoinit);
1624 		stcb->asoc.timoinit++;
1625 		if (sctp_t1init_timer(inp, stcb, net)) {
1626 			/* no need to unlock on tcb its gone */
1627 			goto out_decr;
1628 		}
1629 		/* We do output but not here */
1630 		did_output = 0;
1631 		break;
1632 	case SCTP_TIMER_TYPE_RECV:
1633 		if ((stcb == NULL) || (inp == NULL)) {
1634 			break;
1635 		} {
1636 			int abort_flag;
1637 
1638 			SCTP_STAT_INCR(sctps_timosack);
1639 			stcb->asoc.timosack++;
1640 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1641 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1642 
1643 			/*
1644 			 * EY if nr_sacks used then send an nr-sack , a sack
1645 			 * otherwise
1646 			 */
1647 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
1648 				sctp_send_nr_sack(stcb);
1649 			else
1650 				sctp_send_sack(stcb);
1651 		}
1652 #ifdef SCTP_AUDITING_ENABLED
1653 		sctp_auditing(4, inp, stcb, net);
1654 #endif
1655 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1656 		break;
1657 	case SCTP_TIMER_TYPE_SHUTDOWN:
1658 		if ((stcb == NULL) || (inp == NULL)) {
1659 			break;
1660 		}
1661 		if (sctp_shutdown_timer(inp, stcb, net)) {
1662 			/* no need to unlock on tcb its gone */
1663 			goto out_decr;
1664 		}
1665 		SCTP_STAT_INCR(sctps_timoshutdown);
1666 		stcb->asoc.timoshutdown++;
1667 #ifdef SCTP_AUDITING_ENABLED
1668 		sctp_auditing(4, inp, stcb, net);
1669 #endif
1670 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1671 		break;
1672 	case SCTP_TIMER_TYPE_HEARTBEAT:
1673 		{
1674 			struct sctp_nets *lnet;
1675 			int cnt_of_unconf = 0;
1676 
1677 			if ((stcb == NULL) || (inp == NULL)) {
1678 				break;
1679 			}
1680 			SCTP_STAT_INCR(sctps_timoheartbeat);
1681 			stcb->asoc.timoheartbeat++;
1682 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1683 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1684 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1685 					cnt_of_unconf++;
1686 				}
1687 			}
1688 			if (cnt_of_unconf == 0) {
1689 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1690 				    cnt_of_unconf)) {
1691 					/* no need to unlock on tcb its gone */
1692 					goto out_decr;
1693 				}
1694 			}
1695 #ifdef SCTP_AUDITING_ENABLED
1696 			sctp_auditing(4, inp, stcb, lnet);
1697 #endif
1698 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1699 			    stcb->sctp_ep, stcb, lnet);
1700 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1701 		}
1702 		break;
1703 	case SCTP_TIMER_TYPE_COOKIE:
1704 		if ((stcb == NULL) || (inp == NULL)) {
1705 			break;
1706 		}
1707 		if (sctp_cookie_timer(inp, stcb, net)) {
1708 			/* no need to unlock on tcb its gone */
1709 			goto out_decr;
1710 		}
1711 		SCTP_STAT_INCR(sctps_timocookie);
1712 		stcb->asoc.timocookie++;
1713 #ifdef SCTP_AUDITING_ENABLED
1714 		sctp_auditing(4, inp, stcb, net);
1715 #endif
1716 		/*
1717 		 * We consider T3 and Cookie timer pretty much the same with
1718 		 * respect to where from in chunk_output.
1719 		 */
1720 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1721 		break;
1722 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1723 		{
1724 			struct timeval tv;
1725 			int i, secret;
1726 
1727 			if (inp == NULL) {
1728 				break;
1729 			}
1730 			SCTP_STAT_INCR(sctps_timosecret);
1731 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1732 			SCTP_INP_WLOCK(inp);
1733 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1734 			inp->sctp_ep.last_secret_number =
1735 			    inp->sctp_ep.current_secret_number;
1736 			inp->sctp_ep.current_secret_number++;
1737 			if (inp->sctp_ep.current_secret_number >=
1738 			    SCTP_HOW_MANY_SECRETS) {
1739 				inp->sctp_ep.current_secret_number = 0;
1740 			}
1741 			secret = (int)inp->sctp_ep.current_secret_number;
1742 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1743 				inp->sctp_ep.secret_key[secret][i] =
1744 				    sctp_select_initial_TSN(&inp->sctp_ep);
1745 			}
1746 			SCTP_INP_WUNLOCK(inp);
1747 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1748 		}
1749 		did_output = 0;
1750 		break;
1751 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1752 		if ((stcb == NULL) || (inp == NULL)) {
1753 			break;
1754 		}
1755 		SCTP_STAT_INCR(sctps_timopathmtu);
1756 		sctp_pathmtu_timer(inp, stcb, net);
1757 		did_output = 0;
1758 		break;
1759 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1760 		if ((stcb == NULL) || (inp == NULL)) {
1761 			break;
1762 		}
1763 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1764 			/* no need to unlock on tcb its gone */
1765 			goto out_decr;
1766 		}
1767 		SCTP_STAT_INCR(sctps_timoshutdownack);
1768 		stcb->asoc.timoshutdownack++;
1769 #ifdef SCTP_AUDITING_ENABLED
1770 		sctp_auditing(4, inp, stcb, net);
1771 #endif
1772 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1773 		break;
1774 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1775 		if ((stcb == NULL) || (inp == NULL)) {
1776 			break;
1777 		}
1778 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1779 		sctp_abort_an_association(inp, stcb,
1780 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1781 		/* no need to unlock on tcb its gone */
1782 		goto out_decr;
1783 
1784 	case SCTP_TIMER_TYPE_STRRESET:
1785 		if ((stcb == NULL) || (inp == NULL)) {
1786 			break;
1787 		}
1788 		if (sctp_strreset_timer(inp, stcb, net)) {
1789 			/* no need to unlock on tcb its gone */
1790 			goto out_decr;
1791 		}
1792 		SCTP_STAT_INCR(sctps_timostrmrst);
1793 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1794 		break;
1795 	case SCTP_TIMER_TYPE_EARLYFR:
1796 		/* Need to do FR of things for net */
1797 		if ((stcb == NULL) || (inp == NULL)) {
1798 			break;
1799 		}
1800 		SCTP_STAT_INCR(sctps_timoearlyfr);
1801 		sctp_early_fr_timer(inp, stcb, net);
1802 		break;
1803 	case SCTP_TIMER_TYPE_ASCONF:
1804 		if ((stcb == NULL) || (inp == NULL)) {
1805 			break;
1806 		}
1807 		if (sctp_asconf_timer(inp, stcb, net)) {
1808 			/* no need to unlock on tcb its gone */
1809 			goto out_decr;
1810 		}
1811 		SCTP_STAT_INCR(sctps_timoasconf);
1812 #ifdef SCTP_AUDITING_ENABLED
1813 		sctp_auditing(4, inp, stcb, net);
1814 #endif
1815 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1816 		break;
1817 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1818 		if ((stcb == NULL) || (inp == NULL)) {
1819 			break;
1820 		}
1821 		sctp_delete_prim_timer(inp, stcb, net);
1822 		SCTP_STAT_INCR(sctps_timodelprim);
1823 		break;
1824 
1825 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1826 		if ((stcb == NULL) || (inp == NULL)) {
1827 			break;
1828 		}
1829 		SCTP_STAT_INCR(sctps_timoautoclose);
1830 		sctp_autoclose_timer(inp, stcb, net);
1831 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1832 		did_output = 0;
1833 		break;
1834 	case SCTP_TIMER_TYPE_ASOCKILL:
1835 		if ((stcb == NULL) || (inp == NULL)) {
1836 			break;
1837 		}
1838 		SCTP_STAT_INCR(sctps_timoassockill);
1839 		/* Can we free it yet? */
1840 		SCTP_INP_DECR_REF(inp);
1841 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1842 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1843 		so = SCTP_INP_SO(inp);
1844 		atomic_add_int(&stcb->asoc.refcnt, 1);
1845 		SCTP_TCB_UNLOCK(stcb);
1846 		SCTP_SOCKET_LOCK(so, 1);
1847 		SCTP_TCB_LOCK(stcb);
1848 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1849 #endif
1850 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1851 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1852 		SCTP_SOCKET_UNLOCK(so, 1);
1853 #endif
1854 		/*
1855 		 * free asoc, always unlocks (or destroy's) so prevent
1856 		 * duplicate unlock or unlock of a free mtx :-0
1857 		 */
1858 		stcb = NULL;
1859 		goto out_no_decr;
1860 	case SCTP_TIMER_TYPE_INPKILL:
1861 		SCTP_STAT_INCR(sctps_timoinpkill);
1862 		if (inp == NULL) {
1863 			break;
1864 		}
1865 		/*
1866 		 * special case, take away our increment since WE are the
1867 		 * killer
1868 		 */
1869 		SCTP_INP_DECR_REF(inp);
1870 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1871 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1872 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1873 		inp = NULL;
1874 		goto out_no_decr;
1875 	default:
1876 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1877 		    tmr->type);
1878 		break;
1879 	};
1880 #ifdef SCTP_AUDITING_ENABLED
1881 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1882 	if (inp)
1883 		sctp_auditing(5, inp, stcb, net);
1884 #endif
1885 	if ((did_output) && stcb) {
1886 		/*
1887 		 * Now we need to clean up the control chunk chain if an
1888 		 * ECNE is on it. It must be marked as UNSENT again so next
1889 		 * call will continue to send it until such time that we get
1890 		 * a CWR, to remove it. It is, however, less likely that we
1891 		 * will find a ecn echo on the chain though.
1892 		 */
1893 		sctp_fix_ecn_echo(&stcb->asoc);
1894 	}
1895 get_out:
1896 	if (stcb) {
1897 		SCTP_TCB_UNLOCK(stcb);
1898 	}
1899 out_decr:
1900 	if (inp) {
1901 		SCTP_INP_DECR_REF(inp);
1902 	}
1903 out_no_decr:
1904 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1905 	    type);
1906 }
1907 
1908 void
1909 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1910     struct sctp_nets *net)
1911 {
1912 	int to_ticks;
1913 	struct sctp_timer *tmr;
1914 
1915 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1916 		return;
1917 
1918 	to_ticks = 0;
1919 
1920 	tmr = NULL;
1921 	if (stcb) {
1922 		SCTP_TCB_LOCK_ASSERT(stcb);
1923 	}
1924 	switch (t_type) {
1925 	case SCTP_TIMER_TYPE_ZERO_COPY:
1926 		tmr = &inp->sctp_ep.zero_copy_timer;
1927 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1928 		break;
1929 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1930 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1931 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1932 		break;
1933 	case SCTP_TIMER_TYPE_ADDR_WQ:
1934 		/* Only 1 tick away :-) */
1935 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1936 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1937 		break;
1938 	case SCTP_TIMER_TYPE_ITERATOR:
1939 		{
1940 			struct sctp_iterator *it;
1941 
1942 			it = (struct sctp_iterator *)inp;
1943 			tmr = &it->tmr;
1944 			to_ticks = SCTP_ITERATOR_TICKS;
1945 		}
1946 		break;
1947 	case SCTP_TIMER_TYPE_SEND:
1948 		/* Here we use the RTO timer */
1949 		{
1950 			int rto_val;
1951 
1952 			if ((stcb == NULL) || (net == NULL)) {
1953 				return;
1954 			}
1955 			tmr = &net->rxt_timer;
1956 			if (net->RTO == 0) {
1957 				rto_val = stcb->asoc.initial_rto;
1958 			} else {
1959 				rto_val = net->RTO;
1960 			}
1961 			to_ticks = MSEC_TO_TICKS(rto_val);
1962 		}
1963 		break;
1964 	case SCTP_TIMER_TYPE_INIT:
1965 		/*
1966 		 * Here we use the INIT timer default usually about 1
1967 		 * minute.
1968 		 */
1969 		if ((stcb == NULL) || (net == NULL)) {
1970 			return;
1971 		}
1972 		tmr = &net->rxt_timer;
1973 		if (net->RTO == 0) {
1974 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1975 		} else {
1976 			to_ticks = MSEC_TO_TICKS(net->RTO);
1977 		}
1978 		break;
1979 	case SCTP_TIMER_TYPE_RECV:
1980 		/*
1981 		 * Here we use the Delayed-Ack timer value from the inp
1982 		 * ususually about 200ms.
1983 		 */
1984 		if (stcb == NULL) {
1985 			return;
1986 		}
1987 		tmr = &stcb->asoc.dack_timer;
1988 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1989 		break;
1990 	case SCTP_TIMER_TYPE_SHUTDOWN:
1991 		/* Here we use the RTO of the destination. */
1992 		if ((stcb == NULL) || (net == NULL)) {
1993 			return;
1994 		}
1995 		if (net->RTO == 0) {
1996 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1997 		} else {
1998 			to_ticks = MSEC_TO_TICKS(net->RTO);
1999 		}
2000 		tmr = &net->rxt_timer;
2001 		break;
2002 	case SCTP_TIMER_TYPE_HEARTBEAT:
2003 		/*
2004 		 * the net is used here so that we can add in the RTO. Even
2005 		 * though we use a different timer. We also add the HB timer
2006 		 * PLUS a random jitter.
2007 		 */
2008 		if ((inp == NULL) || (stcb == NULL)) {
2009 			return;
2010 		} else {
2011 			uint32_t rndval;
2012 			uint8_t this_random;
2013 			int cnt_of_unconf = 0;
2014 			struct sctp_nets *lnet;
2015 
2016 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2017 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2018 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2019 					cnt_of_unconf++;
2020 				}
2021 			}
2022 			if (cnt_of_unconf) {
2023 				net = lnet = NULL;
2024 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2025 			}
2026 			if (stcb->asoc.hb_random_idx > 3) {
2027 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2028 				memcpy(stcb->asoc.hb_random_values, &rndval,
2029 				    sizeof(stcb->asoc.hb_random_values));
2030 				stcb->asoc.hb_random_idx = 0;
2031 			}
2032 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2033 			stcb->asoc.hb_random_idx++;
2034 			stcb->asoc.hb_ect_randombit = 0;
2035 			/*
2036 			 * this_random will be 0 - 256 ms RTO is in ms.
2037 			 */
2038 			if ((stcb->asoc.hb_is_disabled) &&
2039 			    (cnt_of_unconf == 0)) {
2040 				return;
2041 			}
2042 			if (net) {
2043 				int delay;
2044 
2045 				delay = stcb->asoc.heart_beat_delay;
2046 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2047 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2048 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2049 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2050 						delay = 0;
2051 					}
2052 				}
2053 				if (net->RTO == 0) {
2054 					/* Never been checked */
2055 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2056 				} else {
2057 					/* set rto_val to the ms */
2058 					to_ticks = delay + net->RTO + this_random;
2059 				}
2060 			} else {
2061 				if (cnt_of_unconf) {
2062 					to_ticks = this_random + stcb->asoc.initial_rto;
2063 				} else {
2064 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2065 				}
2066 			}
2067 			/*
2068 			 * Now we must convert the to_ticks that are now in
2069 			 * ms to ticks.
2070 			 */
2071 			to_ticks = MSEC_TO_TICKS(to_ticks);
2072 			tmr = &stcb->asoc.hb_timer;
2073 		}
2074 		break;
2075 	case SCTP_TIMER_TYPE_COOKIE:
2076 		/*
2077 		 * Here we can use the RTO timer from the network since one
2078 		 * RTT was compelete. If a retran happened then we will be
2079 		 * using the RTO initial value.
2080 		 */
2081 		if ((stcb == NULL) || (net == NULL)) {
2082 			return;
2083 		}
2084 		if (net->RTO == 0) {
2085 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2086 		} else {
2087 			to_ticks = MSEC_TO_TICKS(net->RTO);
2088 		}
2089 		tmr = &net->rxt_timer;
2090 		break;
2091 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2092 		/*
2093 		 * nothing needed but the endpoint here ususually about 60
2094 		 * minutes.
2095 		 */
2096 		if (inp == NULL) {
2097 			return;
2098 		}
2099 		tmr = &inp->sctp_ep.signature_change;
2100 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2101 		break;
2102 	case SCTP_TIMER_TYPE_ASOCKILL:
2103 		if (stcb == NULL) {
2104 			return;
2105 		}
2106 		tmr = &stcb->asoc.strreset_timer;
2107 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2108 		break;
2109 	case SCTP_TIMER_TYPE_INPKILL:
2110 		/*
2111 		 * The inp is setup to die. We re-use the signature_chage
2112 		 * timer since that has stopped and we are in the GONE
2113 		 * state.
2114 		 */
2115 		if (inp == NULL) {
2116 			return;
2117 		}
2118 		tmr = &inp->sctp_ep.signature_change;
2119 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2120 		break;
2121 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2122 		/*
2123 		 * Here we use the value found in the EP for PMTU ususually
2124 		 * about 10 minutes.
2125 		 */
2126 		if ((stcb == NULL) || (inp == NULL)) {
2127 			return;
2128 		}
2129 		if (net == NULL) {
2130 			return;
2131 		}
2132 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2133 		tmr = &net->pmtu_timer;
2134 		break;
2135 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2136 		/* Here we use the RTO of the destination */
2137 		if ((stcb == NULL) || (net == NULL)) {
2138 			return;
2139 		}
2140 		if (net->RTO == 0) {
2141 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2142 		} else {
2143 			to_ticks = MSEC_TO_TICKS(net->RTO);
2144 		}
2145 		tmr = &net->rxt_timer;
2146 		break;
2147 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2148 		/*
2149 		 * Here we use the endpoints shutdown guard timer usually
2150 		 * about 3 minutes.
2151 		 */
2152 		if ((inp == NULL) || (stcb == NULL)) {
2153 			return;
2154 		}
2155 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2156 		tmr = &stcb->asoc.shut_guard_timer;
2157 		break;
2158 	case SCTP_TIMER_TYPE_STRRESET:
2159 		/*
2160 		 * Here the timer comes from the stcb but its value is from
2161 		 * the net's RTO.
2162 		 */
2163 		if ((stcb == NULL) || (net == NULL)) {
2164 			return;
2165 		}
2166 		if (net->RTO == 0) {
2167 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2168 		} else {
2169 			to_ticks = MSEC_TO_TICKS(net->RTO);
2170 		}
2171 		tmr = &stcb->asoc.strreset_timer;
2172 		break;
2173 
2174 	case SCTP_TIMER_TYPE_EARLYFR:
2175 		{
2176 			unsigned int msec;
2177 
2178 			if ((stcb == NULL) || (net == NULL)) {
2179 				return;
2180 			}
2181 			if (net->flight_size > net->cwnd) {
2182 				/* no need to start */
2183 				return;
2184 			}
2185 			SCTP_STAT_INCR(sctps_earlyfrstart);
2186 			if (net->lastsa == 0) {
2187 				/* Hmm no rtt estimate yet? */
2188 				msec = stcb->asoc.initial_rto >> 2;
2189 			} else {
2190 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2191 			}
2192 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2193 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2194 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2195 					msec = SCTP_MINFR_MSEC_FLOOR;
2196 				}
2197 			}
2198 			to_ticks = MSEC_TO_TICKS(msec);
2199 			tmr = &net->fr_timer;
2200 		}
2201 		break;
2202 	case SCTP_TIMER_TYPE_ASCONF:
2203 		/*
2204 		 * Here the timer comes from the stcb but its value is from
2205 		 * the net's RTO.
2206 		 */
2207 		if ((stcb == NULL) || (net == NULL)) {
2208 			return;
2209 		}
2210 		if (net->RTO == 0) {
2211 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2212 		} else {
2213 			to_ticks = MSEC_TO_TICKS(net->RTO);
2214 		}
2215 		tmr = &stcb->asoc.asconf_timer;
2216 		break;
2217 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2218 		if ((stcb == NULL) || (net != NULL)) {
2219 			return;
2220 		}
2221 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2222 		tmr = &stcb->asoc.delete_prim_timer;
2223 		break;
2224 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2225 		if (stcb == NULL) {
2226 			return;
2227 		}
2228 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2229 			/*
2230 			 * Really an error since stcb is NOT set to
2231 			 * autoclose
2232 			 */
2233 			return;
2234 		}
2235 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2236 		tmr = &stcb->asoc.autoclose_timer;
2237 		break;
2238 	default:
2239 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2240 		    __FUNCTION__, t_type);
2241 		return;
2242 		break;
2243 	};
2244 	if ((to_ticks <= 0) || (tmr == NULL)) {
2245 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2246 		    __FUNCTION__, t_type, to_ticks, tmr);
2247 		return;
2248 	}
2249 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2250 		/*
2251 		 * we do NOT allow you to have it already running. if it is
2252 		 * we leave the current one up unchanged
2253 		 */
2254 		return;
2255 	}
2256 	/* At this point we can proceed */
2257 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2258 		stcb->asoc.num_send_timers_up++;
2259 	}
2260 	tmr->stopped_from = 0;
2261 	tmr->type = t_type;
2262 	tmr->ep = (void *)inp;
2263 	tmr->tcb = (void *)stcb;
2264 	tmr->net = (void *)net;
2265 	tmr->self = (void *)tmr;
2266 	tmr->ticks = sctp_get_tick_count();
2267 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2268 	return;
2269 }
2270 
2271 void
2272 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2273     struct sctp_nets *net, uint32_t from)
2274 {
2275 	struct sctp_timer *tmr;
2276 
2277 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2278 	    (inp == NULL))
2279 		return;
2280 
2281 	tmr = NULL;
2282 	if (stcb) {
2283 		SCTP_TCB_LOCK_ASSERT(stcb);
2284 	}
2285 	switch (t_type) {
2286 	case SCTP_TIMER_TYPE_ZERO_COPY:
2287 		tmr = &inp->sctp_ep.zero_copy_timer;
2288 		break;
2289 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2290 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2291 		break;
2292 	case SCTP_TIMER_TYPE_ADDR_WQ:
2293 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2294 		break;
2295 	case SCTP_TIMER_TYPE_EARLYFR:
2296 		if ((stcb == NULL) || (net == NULL)) {
2297 			return;
2298 		}
2299 		tmr = &net->fr_timer;
2300 		SCTP_STAT_INCR(sctps_earlyfrstop);
2301 		break;
2302 	case SCTP_TIMER_TYPE_ITERATOR:
2303 		{
2304 			struct sctp_iterator *it;
2305 
2306 			it = (struct sctp_iterator *)inp;
2307 			tmr = &it->tmr;
2308 		}
2309 		break;
2310 	case SCTP_TIMER_TYPE_SEND:
2311 		if ((stcb == NULL) || (net == NULL)) {
2312 			return;
2313 		}
2314 		tmr = &net->rxt_timer;
2315 		break;
2316 	case SCTP_TIMER_TYPE_INIT:
2317 		if ((stcb == NULL) || (net == NULL)) {
2318 			return;
2319 		}
2320 		tmr = &net->rxt_timer;
2321 		break;
2322 	case SCTP_TIMER_TYPE_RECV:
2323 		if (stcb == NULL) {
2324 			return;
2325 		}
2326 		tmr = &stcb->asoc.dack_timer;
2327 		break;
2328 	case SCTP_TIMER_TYPE_SHUTDOWN:
2329 		if ((stcb == NULL) || (net == NULL)) {
2330 			return;
2331 		}
2332 		tmr = &net->rxt_timer;
2333 		break;
2334 	case SCTP_TIMER_TYPE_HEARTBEAT:
2335 		if (stcb == NULL) {
2336 			return;
2337 		}
2338 		tmr = &stcb->asoc.hb_timer;
2339 		break;
2340 	case SCTP_TIMER_TYPE_COOKIE:
2341 		if ((stcb == NULL) || (net == NULL)) {
2342 			return;
2343 		}
2344 		tmr = &net->rxt_timer;
2345 		break;
2346 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2347 		/* nothing needed but the endpoint here */
2348 		tmr = &inp->sctp_ep.signature_change;
2349 		/*
2350 		 * We re-use the newcookie timer for the INP kill timer. We
2351 		 * must assure that we do not kill it by accident.
2352 		 */
2353 		break;
2354 	case SCTP_TIMER_TYPE_ASOCKILL:
2355 		/*
2356 		 * Stop the asoc kill timer.
2357 		 */
2358 		if (stcb == NULL) {
2359 			return;
2360 		}
2361 		tmr = &stcb->asoc.strreset_timer;
2362 		break;
2363 
2364 	case SCTP_TIMER_TYPE_INPKILL:
2365 		/*
2366 		 * The inp is setup to die. We re-use the signature_chage
2367 		 * timer since that has stopped and we are in the GONE
2368 		 * state.
2369 		 */
2370 		tmr = &inp->sctp_ep.signature_change;
2371 		break;
2372 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2373 		if ((stcb == NULL) || (net == NULL)) {
2374 			return;
2375 		}
2376 		tmr = &net->pmtu_timer;
2377 		break;
2378 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2379 		if ((stcb == NULL) || (net == NULL)) {
2380 			return;
2381 		}
2382 		tmr = &net->rxt_timer;
2383 		break;
2384 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2385 		if (stcb == NULL) {
2386 			return;
2387 		}
2388 		tmr = &stcb->asoc.shut_guard_timer;
2389 		break;
2390 	case SCTP_TIMER_TYPE_STRRESET:
2391 		if (stcb == NULL) {
2392 			return;
2393 		}
2394 		tmr = &stcb->asoc.strreset_timer;
2395 		break;
2396 	case SCTP_TIMER_TYPE_ASCONF:
2397 		if (stcb == NULL) {
2398 			return;
2399 		}
2400 		tmr = &stcb->asoc.asconf_timer;
2401 		break;
2402 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2403 		if (stcb == NULL) {
2404 			return;
2405 		}
2406 		tmr = &stcb->asoc.delete_prim_timer;
2407 		break;
2408 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2409 		if (stcb == NULL) {
2410 			return;
2411 		}
2412 		tmr = &stcb->asoc.autoclose_timer;
2413 		break;
2414 	default:
2415 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2416 		    __FUNCTION__, t_type);
2417 		break;
2418 	};
2419 	if (tmr == NULL) {
2420 		return;
2421 	}
2422 	if ((tmr->type != t_type) && tmr->type) {
2423 		/*
2424 		 * Ok we have a timer that is under joint use. Cookie timer
2425 		 * per chance with the SEND timer. We therefore are NOT
2426 		 * running the timer that the caller wants stopped.  So just
2427 		 * return.
2428 		 */
2429 		return;
2430 	}
2431 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2432 		stcb->asoc.num_send_timers_up--;
2433 		if (stcb->asoc.num_send_timers_up < 0) {
2434 			stcb->asoc.num_send_timers_up = 0;
2435 		}
2436 	}
2437 	tmr->self = NULL;
2438 	tmr->stopped_from = from;
2439 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2440 	return;
2441 }
2442 
2443 uint32_t
2444 sctp_calculate_len(struct mbuf *m)
2445 {
2446 	uint32_t tlen = 0;
2447 	struct mbuf *at;
2448 
2449 	at = m;
2450 	while (at) {
2451 		tlen += SCTP_BUF_LEN(at);
2452 		at = SCTP_BUF_NEXT(at);
2453 	}
2454 	return (tlen);
2455 }
2456 
2457 void
2458 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2459     struct sctp_association *asoc, uint32_t mtu)
2460 {
2461 	/*
2462 	 * Reset the P-MTU size on this association, this involves changing
2463 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2464 	 * allow the DF flag to be cleared.
2465 	 */
2466 	struct sctp_tmit_chunk *chk;
2467 	unsigned int eff_mtu, ovh;
2468 
2469 #ifdef SCTP_PRINT_FOR_B_AND_M
2470 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2471 	    inp, asoc, mtu);
2472 #endif
2473 	asoc->smallest_mtu = mtu;
2474 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2475 		ovh = SCTP_MIN_OVERHEAD;
2476 	} else {
2477 		ovh = SCTP_MIN_V4_OVERHEAD;
2478 	}
2479 	eff_mtu = mtu - ovh;
2480 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2481 
2482 		if (chk->send_size > eff_mtu) {
2483 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2484 		}
2485 	}
2486 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2487 		if (chk->send_size > eff_mtu) {
2488 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2489 		}
2490 	}
2491 }
2492 
2493 
2494 /*
2495  * given an association and starting time of the current RTT period return
2496  * RTO in number of msecs net should point to the current network
2497  */
2498 uint32_t
2499 sctp_calculate_rto(struct sctp_tcb *stcb,
2500     struct sctp_association *asoc,
2501     struct sctp_nets *net,
2502     struct timeval *told,
2503     int safe)
2504 {
2505 	/*-
2506 	 * given an association and the starting time of the current RTT
2507 	 * period (in value1/value2) return RTO in number of msecs.
2508 	 */
2509 	int calc_time = 0;
2510 	int o_calctime;
2511 	uint32_t new_rto = 0;
2512 	int first_measure = 0;
2513 	struct timeval now, then, *old;
2514 
2515 	/* Copy it out for sparc64 */
2516 	if (safe == sctp_align_unsafe_makecopy) {
2517 		old = &then;
2518 		memcpy(&then, told, sizeof(struct timeval));
2519 	} else if (safe == sctp_align_safe_nocopy) {
2520 		old = told;
2521 	} else {
2522 		/* error */
2523 		SCTP_PRINTF("Huh, bad rto calc call\n");
2524 		return (0);
2525 	}
2526 	/************************/
2527 	/* 1. calculate new RTT */
2528 	/************************/
2529 	/* get the current time */
2530 	(void)SCTP_GETTIME_TIMEVAL(&now);
2531 	/* compute the RTT value */
2532 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2533 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2534 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2535 			calc_time += (((u_long)now.tv_usec -
2536 			    (u_long)old->tv_usec) / 1000);
2537 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2538 			/* Borrow 1,000ms from current calculation */
2539 			calc_time -= 1000;
2540 			/* Add in the slop over */
2541 			calc_time += ((int)now.tv_usec / 1000);
2542 			/* Add in the pre-second ms's */
2543 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2544 		}
2545 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2546 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2547 			calc_time = ((u_long)now.tv_usec -
2548 			    (u_long)old->tv_usec) / 1000;
2549 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2550 			/* impossible .. garbage in nothing out */
2551 			goto calc_rto;
2552 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2553 			/*
2554 			 * We have to have 1 usec :-D this must be the
2555 			 * loopback.
2556 			 */
2557 			calc_time = 1;
2558 		} else {
2559 			/* impossible .. garbage in nothing out */
2560 			goto calc_rto;
2561 		}
2562 	} else {
2563 		/* Clock wrapped? */
2564 		goto calc_rto;
2565 	}
2566 	/***************************/
2567 	/* 2. update RTTVAR & SRTT */
2568 	/***************************/
2569 	net->rtt = o_calctime = calc_time;
2570 	/* this is Van Jacobson's integer version */
2571 	if (net->RTO_measured) {
2572 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2573 								 * shift=3 */
2574 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2575 			rto_logging(net, SCTP_LOG_RTTVAR);
2576 		}
2577 		net->prev_rtt = o_calctime;
2578 		net->lastsa += calc_time;	/* add 7/8th into sa when
2579 						 * shift=3 */
2580 		if (calc_time < 0) {
2581 			calc_time = -calc_time;
2582 		}
2583 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2584 									 * VAR shift=2 */
2585 		net->lastsv += calc_time;
2586 		if (net->lastsv == 0) {
2587 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2588 		}
2589 	} else {
2590 		/* First RTO measurment */
2591 		net->RTO_measured = 1;
2592 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2593 								 * shift=3 */
2594 		net->lastsv = calc_time;
2595 		if (net->lastsv == 0) {
2596 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2597 		}
2598 		first_measure = 1;
2599 		net->prev_rtt = o_calctime;
2600 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2601 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2602 		}
2603 	}
2604 calc_rto:
2605 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2606 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2607 	    (stcb->asoc.sat_network_lockout == 0)) {
2608 		stcb->asoc.sat_network = 1;
2609 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2610 		stcb->asoc.sat_network = 0;
2611 		stcb->asoc.sat_network_lockout = 1;
2612 	}
2613 	/* bound it, per C6/C7 in Section 5.3.1 */
2614 	if (new_rto < stcb->asoc.minrto) {
2615 		new_rto = stcb->asoc.minrto;
2616 	}
2617 	if (new_rto > stcb->asoc.maxrto) {
2618 		new_rto = stcb->asoc.maxrto;
2619 	}
2620 	/* we are now returning the RTO */
2621 	return (new_rto);
2622 }
2623 
2624 /*
2625  * return a pointer to a contiguous piece of data from the given mbuf chain
2626  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2627  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2628  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2629  */
2630 caddr_t
2631 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2632 {
2633 	uint32_t count;
2634 	uint8_t *ptr;
2635 
2636 	ptr = in_ptr;
2637 	if ((off < 0) || (len <= 0))
2638 		return (NULL);
2639 
2640 	/* find the desired start location */
2641 	while ((m != NULL) && (off > 0)) {
2642 		if (off < SCTP_BUF_LEN(m))
2643 			break;
2644 		off -= SCTP_BUF_LEN(m);
2645 		m = SCTP_BUF_NEXT(m);
2646 	}
2647 	if (m == NULL)
2648 		return (NULL);
2649 
2650 	/* is the current mbuf large enough (eg. contiguous)? */
2651 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2652 		return (mtod(m, caddr_t)+off);
2653 	} else {
2654 		/* else, it spans more than one mbuf, so save a temp copy... */
2655 		while ((m != NULL) && (len > 0)) {
2656 			count = min(SCTP_BUF_LEN(m) - off, len);
2657 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2658 			len -= count;
2659 			ptr += count;
2660 			off = 0;
2661 			m = SCTP_BUF_NEXT(m);
2662 		}
2663 		if ((m == NULL) && (len > 0))
2664 			return (NULL);
2665 		else
2666 			return ((caddr_t)in_ptr);
2667 	}
2668 }
2669 
2670 
2671 
2672 struct sctp_paramhdr *
2673 sctp_get_next_param(struct mbuf *m,
2674     int offset,
2675     struct sctp_paramhdr *pull,
2676     int pull_limit)
2677 {
2678 	/* This just provides a typed signature to Peter's Pull routine */
2679 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2680 	    (uint8_t *) pull));
2681 }
2682 
2683 
2684 int
2685 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2686 {
2687 	/*
2688 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2689 	 * padlen is > 3 this routine will fail.
2690 	 */
2691 	uint8_t *dp;
2692 	int i;
2693 
2694 	if (padlen > 3) {
2695 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2696 		return (ENOBUFS);
2697 	}
2698 	if (padlen <= M_TRAILINGSPACE(m)) {
2699 		/*
2700 		 * The easy way. We hope the majority of the time we hit
2701 		 * here :)
2702 		 */
2703 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2704 		SCTP_BUF_LEN(m) += padlen;
2705 	} else {
2706 		/* Hard way we must grow the mbuf */
2707 		struct mbuf *tmp;
2708 
2709 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2710 		if (tmp == NULL) {
2711 			/* Out of space GAK! we are in big trouble. */
2712 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2713 			return (ENOSPC);
2714 		}
2715 		/* setup and insert in middle */
2716 		SCTP_BUF_LEN(tmp) = padlen;
2717 		SCTP_BUF_NEXT(tmp) = NULL;
2718 		SCTP_BUF_NEXT(m) = tmp;
2719 		dp = mtod(tmp, uint8_t *);
2720 	}
2721 	/* zero out the pad */
2722 	for (i = 0; i < padlen; i++) {
2723 		*dp = 0;
2724 		dp++;
2725 	}
2726 	return (0);
2727 }
2728 
2729 int
2730 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2731 {
2732 	/* find the last mbuf in chain and pad it */
2733 	struct mbuf *m_at;
2734 
2735 	m_at = m;
2736 	if (last_mbuf) {
2737 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2738 	} else {
2739 		while (m_at) {
2740 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2741 				return (sctp_add_pad_tombuf(m_at, padval));
2742 			}
2743 			m_at = SCTP_BUF_NEXT(m_at);
2744 		}
2745 	}
2746 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2747 	return (EFAULT);
2748 }
2749 
2750 int sctp_asoc_change_wake = 0;
2751 
2752 static void
2753 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2754     uint32_t error, void *data, int so_locked
2755 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2756     SCTP_UNUSED
2757 #endif
2758 )
2759 {
2760 	struct mbuf *m_notify;
2761 	struct sctp_assoc_change *sac;
2762 	struct sctp_queued_to_read *control;
2763 
2764 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2765 	struct socket *so;
2766 
2767 #endif
2768 
2769 	/*
2770 	 * For TCP model AND UDP connected sockets we will send an error up
2771 	 * when an ABORT comes in.
2772 	 */
2773 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2774 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2775 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2776 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2777 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2778 			stcb->sctp_socket->so_error = ECONNREFUSED;
2779 		} else {
2780 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2781 			stcb->sctp_socket->so_error = ECONNRESET;
2782 		}
2783 		/* Wake ANY sleepers */
2784 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2785 		so = SCTP_INP_SO(stcb->sctp_ep);
2786 		if (!so_locked) {
2787 			atomic_add_int(&stcb->asoc.refcnt, 1);
2788 			SCTP_TCB_UNLOCK(stcb);
2789 			SCTP_SOCKET_LOCK(so, 1);
2790 			SCTP_TCB_LOCK(stcb);
2791 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2792 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2793 				SCTP_SOCKET_UNLOCK(so, 1);
2794 				return;
2795 			}
2796 		}
2797 #endif
2798 		sorwakeup(stcb->sctp_socket);
2799 		sowwakeup(stcb->sctp_socket);
2800 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2801 		if (!so_locked) {
2802 			SCTP_SOCKET_UNLOCK(so, 1);
2803 		}
2804 #endif
2805 		sctp_asoc_change_wake++;
2806 	}
2807 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2808 		/* event not enabled */
2809 		return;
2810 	}
2811 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2812 	if (m_notify == NULL)
2813 		/* no space left */
2814 		return;
2815 	SCTP_BUF_LEN(m_notify) = 0;
2816 
2817 	sac = mtod(m_notify, struct sctp_assoc_change *);
2818 	sac->sac_type = SCTP_ASSOC_CHANGE;
2819 	sac->sac_flags = 0;
2820 	sac->sac_length = sizeof(struct sctp_assoc_change);
2821 	sac->sac_state = event;
2822 	sac->sac_error = error;
2823 	/* XXX verify these stream counts */
2824 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2825 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2826 	sac->sac_assoc_id = sctp_get_associd(stcb);
2827 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2828 	SCTP_BUF_NEXT(m_notify) = NULL;
2829 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2830 	    0, 0, 0, 0, 0, 0,
2831 	    m_notify);
2832 	if (control == NULL) {
2833 		/* no memory */
2834 		sctp_m_freem(m_notify);
2835 		return;
2836 	}
2837 	control->length = SCTP_BUF_LEN(m_notify);
2838 	/* not that we need this */
2839 	control->tail_mbuf = m_notify;
2840 	control->spec_flags = M_NOTIFICATION;
2841 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2842 	    control,
2843 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2844 	    so_locked);
2845 	if (event == SCTP_COMM_LOST) {
2846 		/* Wake up any sleeper */
2847 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2848 		so = SCTP_INP_SO(stcb->sctp_ep);
2849 		if (!so_locked) {
2850 			atomic_add_int(&stcb->asoc.refcnt, 1);
2851 			SCTP_TCB_UNLOCK(stcb);
2852 			SCTP_SOCKET_LOCK(so, 1);
2853 			SCTP_TCB_LOCK(stcb);
2854 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2855 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2856 				SCTP_SOCKET_UNLOCK(so, 1);
2857 				return;
2858 			}
2859 		}
2860 #endif
2861 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2862 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2863 		if (!so_locked) {
2864 			SCTP_SOCKET_UNLOCK(so, 1);
2865 		}
2866 #endif
2867 	}
2868 }
2869 
2870 static void
2871 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2872     struct sockaddr *sa, uint32_t error)
2873 {
2874 	struct mbuf *m_notify;
2875 	struct sctp_paddr_change *spc;
2876 	struct sctp_queued_to_read *control;
2877 
2878 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2879 		/* event not enabled */
2880 		return;
2881 	}
2882 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2883 	if (m_notify == NULL)
2884 		return;
2885 	SCTP_BUF_LEN(m_notify) = 0;
2886 	spc = mtod(m_notify, struct sctp_paddr_change *);
2887 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2888 	spc->spc_flags = 0;
2889 	spc->spc_length = sizeof(struct sctp_paddr_change);
2890 	switch (sa->sa_family) {
2891 	case AF_INET:
2892 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2893 		break;
2894 #ifdef INET6
2895 	case AF_INET6:
2896 		{
2897 			struct sockaddr_in6 *sin6;
2898 
2899 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2900 
2901 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2902 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2903 				if (sin6->sin6_scope_id == 0) {
2904 					/* recover scope_id for user */
2905 					(void)sa6_recoverscope(sin6);
2906 				} else {
2907 					/* clear embedded scope_id for user */
2908 					in6_clearscope(&sin6->sin6_addr);
2909 				}
2910 			}
2911 			break;
2912 		}
2913 #endif
2914 	default:
2915 		/* TSNH */
2916 		break;
2917 	}
2918 	spc->spc_state = state;
2919 	spc->spc_error = error;
2920 	spc->spc_assoc_id = sctp_get_associd(stcb);
2921 
2922 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2923 	SCTP_BUF_NEXT(m_notify) = NULL;
2924 
2925 	/* append to socket */
2926 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2927 	    0, 0, 0, 0, 0, 0,
2928 	    m_notify);
2929 	if (control == NULL) {
2930 		/* no memory */
2931 		sctp_m_freem(m_notify);
2932 		return;
2933 	}
2934 	control->length = SCTP_BUF_LEN(m_notify);
2935 	control->spec_flags = M_NOTIFICATION;
2936 	/* not that we need this */
2937 	control->tail_mbuf = m_notify;
2938 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2939 	    control,
2940 	    &stcb->sctp_socket->so_rcv, 1,
2941 	    SCTP_READ_LOCK_NOT_HELD,
2942 	    SCTP_SO_NOT_LOCKED);
2943 }
2944 
2945 
2946 static void
2947 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2948     struct sctp_tmit_chunk *chk, int so_locked
2949 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2950     SCTP_UNUSED
2951 #endif
2952 )
2953 {
2954 	struct mbuf *m_notify;
2955 	struct sctp_send_failed *ssf;
2956 	struct sctp_queued_to_read *control;
2957 	int length;
2958 
2959 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2960 		/* event not enabled */
2961 		return;
2962 	}
2963 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2964 	if (m_notify == NULL)
2965 		/* no space left */
2966 		return;
2967 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2968 	length -= sizeof(struct sctp_data_chunk);
2969 	SCTP_BUF_LEN(m_notify) = 0;
2970 	ssf = mtod(m_notify, struct sctp_send_failed *);
2971 	ssf->ssf_type = SCTP_SEND_FAILED;
2972 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2973 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2974 	else
2975 		ssf->ssf_flags = SCTP_DATA_SENT;
2976 	ssf->ssf_length = length;
2977 	ssf->ssf_error = error;
2978 	/* not exactly what the user sent in, but should be close :) */
2979 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2980 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2981 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2982 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2983 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2984 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2985 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2986 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2987 
2988 	if (chk->data) {
2989 		/*
2990 		 * trim off the sctp chunk header(it should be there)
2991 		 */
2992 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2993 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2994 			sctp_mbuf_crush(chk->data);
2995 			chk->send_size -= sizeof(struct sctp_data_chunk);
2996 		}
2997 	}
2998 	SCTP_BUF_NEXT(m_notify) = chk->data;
2999 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3000 	/* Steal off the mbuf */
3001 	chk->data = NULL;
3002 	/*
3003 	 * For this case, we check the actual socket buffer, since the assoc
3004 	 * is going away we don't want to overfill the socket buffer for a
3005 	 * non-reader
3006 	 */
3007 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3008 		sctp_m_freem(m_notify);
3009 		return;
3010 	}
3011 	/* append to socket */
3012 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3013 	    0, 0, 0, 0, 0, 0,
3014 	    m_notify);
3015 	if (control == NULL) {
3016 		/* no memory */
3017 		sctp_m_freem(m_notify);
3018 		return;
3019 	}
3020 	control->spec_flags = M_NOTIFICATION;
3021 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3022 	    control,
3023 	    &stcb->sctp_socket->so_rcv, 1,
3024 	    SCTP_READ_LOCK_NOT_HELD,
3025 	    so_locked);
3026 }
3027 
3028 
3029 static void
3030 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3031     struct sctp_stream_queue_pending *sp, int so_locked
3032 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3033     SCTP_UNUSED
3034 #endif
3035 )
3036 {
3037 	struct mbuf *m_notify;
3038 	struct sctp_send_failed *ssf;
3039 	struct sctp_queued_to_read *control;
3040 	int length;
3041 
3042 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3043 		/* event not enabled */
3044 		return;
3045 	}
3046 	length = sizeof(struct sctp_send_failed) + sp->length;
3047 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3048 	if (m_notify == NULL)
3049 		/* no space left */
3050 		return;
3051 	SCTP_BUF_LEN(m_notify) = 0;
3052 	ssf = mtod(m_notify, struct sctp_send_failed *);
3053 	ssf->ssf_type = SCTP_SEND_FAILED;
3054 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3055 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3056 	else
3057 		ssf->ssf_flags = SCTP_DATA_SENT;
3058 	ssf->ssf_length = length;
3059 	ssf->ssf_error = error;
3060 	/* not exactly what the user sent in, but should be close :) */
3061 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3062 	ssf->ssf_info.sinfo_stream = sp->stream;
3063 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3064 	if (sp->some_taken) {
3065 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3066 	} else {
3067 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3068 	}
3069 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3070 	ssf->ssf_info.sinfo_context = sp->context;
3071 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3072 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3073 	SCTP_BUF_NEXT(m_notify) = sp->data;
3074 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3075 
3076 	/* Steal off the mbuf */
3077 	sp->data = NULL;
3078 	/*
3079 	 * For this case, we check the actual socket buffer, since the assoc
3080 	 * is going away we don't want to overfill the socket buffer for a
3081 	 * non-reader
3082 	 */
3083 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3084 		sctp_m_freem(m_notify);
3085 		return;
3086 	}
3087 	/* append to socket */
3088 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3089 	    0, 0, 0, 0, 0, 0,
3090 	    m_notify);
3091 	if (control == NULL) {
3092 		/* no memory */
3093 		sctp_m_freem(m_notify);
3094 		return;
3095 	}
3096 	control->spec_flags = M_NOTIFICATION;
3097 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3098 	    control,
3099 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3100 }
3101 
3102 
3103 
3104 static void
3105 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3106     uint32_t error)
3107 {
3108 	struct mbuf *m_notify;
3109 	struct sctp_adaptation_event *sai;
3110 	struct sctp_queued_to_read *control;
3111 
3112 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3113 		/* event not enabled */
3114 		return;
3115 	}
3116 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3117 	if (m_notify == NULL)
3118 		/* no space left */
3119 		return;
3120 	SCTP_BUF_LEN(m_notify) = 0;
3121 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3122 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3123 	sai->sai_flags = 0;
3124 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3125 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3126 	sai->sai_assoc_id = sctp_get_associd(stcb);
3127 
3128 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3129 	SCTP_BUF_NEXT(m_notify) = NULL;
3130 
3131 	/* append to socket */
3132 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3133 	    0, 0, 0, 0, 0, 0,
3134 	    m_notify);
3135 	if (control == NULL) {
3136 		/* no memory */
3137 		sctp_m_freem(m_notify);
3138 		return;
3139 	}
3140 	control->length = SCTP_BUF_LEN(m_notify);
3141 	control->spec_flags = M_NOTIFICATION;
3142 	/* not that we need this */
3143 	control->tail_mbuf = m_notify;
3144 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3145 	    control,
3146 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3147 }
3148 
3149 /* This always must be called with the read-queue LOCKED in the INP */
3150 static void
3151 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3152     uint32_t val, int so_locked
3153 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3154     SCTP_UNUSED
3155 #endif
3156 )
3157 {
3158 	struct mbuf *m_notify;
3159 	struct sctp_pdapi_event *pdapi;
3160 	struct sctp_queued_to_read *control;
3161 	struct sockbuf *sb;
3162 
3163 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3164 		/* event not enabled */
3165 		return;
3166 	}
3167 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3168 	if (m_notify == NULL)
3169 		/* no space left */
3170 		return;
3171 	SCTP_BUF_LEN(m_notify) = 0;
3172 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3173 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3174 	pdapi->pdapi_flags = 0;
3175 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3176 	pdapi->pdapi_indication = error;
3177 	pdapi->pdapi_stream = (val >> 16);
3178 	pdapi->pdapi_seq = (val & 0x0000ffff);
3179 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3180 
3181 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3182 	SCTP_BUF_NEXT(m_notify) = NULL;
3183 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3184 	    0, 0, 0, 0, 0, 0,
3185 	    m_notify);
3186 	if (control == NULL) {
3187 		/* no memory */
3188 		sctp_m_freem(m_notify);
3189 		return;
3190 	}
3191 	control->spec_flags = M_NOTIFICATION;
3192 	control->length = SCTP_BUF_LEN(m_notify);
3193 	/* not that we need this */
3194 	control->tail_mbuf = m_notify;
3195 	control->held_length = 0;
3196 	control->length = 0;
3197 	sb = &stcb->sctp_socket->so_rcv;
3198 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3199 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3200 	}
3201 	sctp_sballoc(stcb, sb, m_notify);
3202 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3203 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3204 	}
3205 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3206 	control->end_added = 1;
3207 	if (stcb->asoc.control_pdapi)
3208 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3209 	else {
3210 		/* we really should not see this case */
3211 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3212 	}
3213 	if (stcb->sctp_ep && stcb->sctp_socket) {
3214 		/* This should always be the case */
3215 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3216 		struct socket *so;
3217 
3218 		so = SCTP_INP_SO(stcb->sctp_ep);
3219 		if (!so_locked) {
3220 			atomic_add_int(&stcb->asoc.refcnt, 1);
3221 			SCTP_TCB_UNLOCK(stcb);
3222 			SCTP_SOCKET_LOCK(so, 1);
3223 			SCTP_TCB_LOCK(stcb);
3224 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3225 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3226 				SCTP_SOCKET_UNLOCK(so, 1);
3227 				return;
3228 			}
3229 		}
3230 #endif
3231 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3232 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3233 		if (!so_locked) {
3234 			SCTP_SOCKET_UNLOCK(so, 1);
3235 		}
3236 #endif
3237 	}
3238 }
3239 
3240 static void
3241 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3242 {
3243 	struct mbuf *m_notify;
3244 	struct sctp_shutdown_event *sse;
3245 	struct sctp_queued_to_read *control;
3246 
3247 	/*
3248 	 * For TCP model AND UDP connected sockets we will send an error up
3249 	 * when an SHUTDOWN completes
3250 	 */
3251 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3252 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3253 		/* mark socket closed for read/write and wakeup! */
3254 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3255 		struct socket *so;
3256 
3257 		so = SCTP_INP_SO(stcb->sctp_ep);
3258 		atomic_add_int(&stcb->asoc.refcnt, 1);
3259 		SCTP_TCB_UNLOCK(stcb);
3260 		SCTP_SOCKET_LOCK(so, 1);
3261 		SCTP_TCB_LOCK(stcb);
3262 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3263 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3264 			SCTP_SOCKET_UNLOCK(so, 1);
3265 			return;
3266 		}
3267 #endif
3268 		socantsendmore(stcb->sctp_socket);
3269 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3270 		SCTP_SOCKET_UNLOCK(so, 1);
3271 #endif
3272 	}
3273 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3274 		/* event not enabled */
3275 		return;
3276 	}
3277 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3278 	if (m_notify == NULL)
3279 		/* no space left */
3280 		return;
3281 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3282 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3283 	sse->sse_flags = 0;
3284 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3285 	sse->sse_assoc_id = sctp_get_associd(stcb);
3286 
3287 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3288 	SCTP_BUF_NEXT(m_notify) = NULL;
3289 
3290 	/* append to socket */
3291 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3292 	    0, 0, 0, 0, 0, 0,
3293 	    m_notify);
3294 	if (control == NULL) {
3295 		/* no memory */
3296 		sctp_m_freem(m_notify);
3297 		return;
3298 	}
3299 	control->spec_flags = M_NOTIFICATION;
3300 	control->length = SCTP_BUF_LEN(m_notify);
3301 	/* not that we need this */
3302 	control->tail_mbuf = m_notify;
3303 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3304 	    control,
3305 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3306 }
3307 
3308 static void
3309 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3310     int so_locked
3311 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3312     SCTP_UNUSED
3313 #endif
3314 )
3315 {
3316 	struct mbuf *m_notify;
3317 	struct sctp_sender_dry_event *event;
3318 	struct sctp_queued_to_read *control;
3319 
3320 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3321 		/* event not enabled */
3322 		return;
3323 	}
3324 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3325 	if (m_notify == NULL) {
3326 		/* no space left */
3327 		return;
3328 	}
3329 	SCTP_BUF_LEN(m_notify) = 0;
3330 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3331 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3332 	event->sender_dry_flags = 0;
3333 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3334 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3335 
3336 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3337 	SCTP_BUF_NEXT(m_notify) = NULL;
3338 
3339 	/* append to socket */
3340 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3341 	    0, 0, 0, 0, 0, 0, m_notify);
3342 	if (control == NULL) {
3343 		/* no memory */
3344 		sctp_m_freem(m_notify);
3345 		return;
3346 	}
3347 	control->length = SCTP_BUF_LEN(m_notify);
3348 	control->spec_flags = M_NOTIFICATION;
3349 	/* not that we need this */
3350 	control->tail_mbuf = m_notify;
3351 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3352 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3353 }
3354 
3355 
3356 static void
3357 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3358 {
3359 	struct mbuf *m_notify;
3360 	struct sctp_queued_to_read *control;
3361 	struct sctp_stream_reset_event *strreset;
3362 	int len;
3363 
3364 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3365 		/* event not enabled */
3366 		return;
3367 	}
3368 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3369 	if (m_notify == NULL)
3370 		/* no space left */
3371 		return;
3372 	SCTP_BUF_LEN(m_notify) = 0;
3373 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3374 	if (len > M_TRAILINGSPACE(m_notify)) {
3375 		/* never enough room */
3376 		sctp_m_freem(m_notify);
3377 		return;
3378 	}
3379 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3380 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3381 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3382 	strreset->strreset_length = len;
3383 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3384 	strreset->strreset_list[0] = number_entries;
3385 
3386 	SCTP_BUF_LEN(m_notify) = len;
3387 	SCTP_BUF_NEXT(m_notify) = NULL;
3388 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3389 		/* no space */
3390 		sctp_m_freem(m_notify);
3391 		return;
3392 	}
3393 	/* append to socket */
3394 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3395 	    0, 0, 0, 0, 0, 0,
3396 	    m_notify);
3397 	if (control == NULL) {
3398 		/* no memory */
3399 		sctp_m_freem(m_notify);
3400 		return;
3401 	}
3402 	control->spec_flags = M_NOTIFICATION;
3403 	control->length = SCTP_BUF_LEN(m_notify);
3404 	/* not that we need this */
3405 	control->tail_mbuf = m_notify;
3406 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3407 	    control,
3408 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3409 }
3410 
3411 
3412 static void
3413 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3414     int number_entries, uint16_t * list, int flag)
3415 {
3416 	struct mbuf *m_notify;
3417 	struct sctp_queued_to_read *control;
3418 	struct sctp_stream_reset_event *strreset;
3419 	int len;
3420 
3421 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3422 		/* event not enabled */
3423 		return;
3424 	}
3425 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3426 	if (m_notify == NULL)
3427 		/* no space left */
3428 		return;
3429 	SCTP_BUF_LEN(m_notify) = 0;
3430 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3431 	if (len > M_TRAILINGSPACE(m_notify)) {
3432 		/* never enough room */
3433 		sctp_m_freem(m_notify);
3434 		return;
3435 	}
3436 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3437 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3438 	if (number_entries == 0) {
3439 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3440 	} else {
3441 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3442 	}
3443 	strreset->strreset_length = len;
3444 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3445 	if (number_entries) {
3446 		int i;
3447 
3448 		for (i = 0; i < number_entries; i++) {
3449 			strreset->strreset_list[i] = ntohs(list[i]);
3450 		}
3451 	}
3452 	SCTP_BUF_LEN(m_notify) = len;
3453 	SCTP_BUF_NEXT(m_notify) = NULL;
3454 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3455 		/* no space */
3456 		sctp_m_freem(m_notify);
3457 		return;
3458 	}
3459 	/* append to socket */
3460 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3461 	    0, 0, 0, 0, 0, 0,
3462 	    m_notify);
3463 	if (control == NULL) {
3464 		/* no memory */
3465 		sctp_m_freem(m_notify);
3466 		return;
3467 	}
3468 	control->spec_flags = M_NOTIFICATION;
3469 	control->length = SCTP_BUF_LEN(m_notify);
3470 	/* not that we need this */
3471 	control->tail_mbuf = m_notify;
3472 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3473 	    control,
3474 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3475 }
3476 
3477 
3478 void
3479 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3480     uint32_t error, void *data, int so_locked
3481 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3482     SCTP_UNUSED
3483 #endif
3484 )
3485 {
3486 	if ((stcb == NULL) ||
3487 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3488 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3489 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3490 		/* If the socket is gone we are out of here */
3491 		return;
3492 	}
3493 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3494 		return;
3495 	}
3496 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3497 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3498 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3499 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3500 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3501 			/* Don't report these in front states */
3502 			return;
3503 		}
3504 	}
3505 	switch (notification) {
3506 	case SCTP_NOTIFY_ASSOC_UP:
3507 		if (stcb->asoc.assoc_up_sent == 0) {
3508 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3509 			stcb->asoc.assoc_up_sent = 1;
3510 		}
3511 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3512 			sctp_notify_adaptation_layer(stcb, error);
3513 		}
3514 		if (stcb->asoc.peer_supports_auth == 0) {
3515 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3516 			    NULL, so_locked);
3517 		}
3518 		break;
3519 	case SCTP_NOTIFY_ASSOC_DOWN:
3520 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3521 		break;
3522 	case SCTP_NOTIFY_INTERFACE_DOWN:
3523 		{
3524 			struct sctp_nets *net;
3525 
3526 			net = (struct sctp_nets *)data;
3527 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3528 			    (struct sockaddr *)&net->ro._l_addr, error);
3529 			break;
3530 		}
3531 	case SCTP_NOTIFY_INTERFACE_UP:
3532 		{
3533 			struct sctp_nets *net;
3534 
3535 			net = (struct sctp_nets *)data;
3536 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3537 			    (struct sockaddr *)&net->ro._l_addr, error);
3538 			break;
3539 		}
3540 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3541 		{
3542 			struct sctp_nets *net;
3543 
3544 			net = (struct sctp_nets *)data;
3545 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3546 			    (struct sockaddr *)&net->ro._l_addr, error);
3547 			break;
3548 		}
3549 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3550 		sctp_notify_send_failed2(stcb, error,
3551 		    (struct sctp_stream_queue_pending *)data, so_locked);
3552 		break;
3553 	case SCTP_NOTIFY_DG_FAIL:
3554 		sctp_notify_send_failed(stcb, error,
3555 		    (struct sctp_tmit_chunk *)data, so_locked);
3556 		break;
3557 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3558 		{
3559 			uint32_t val;
3560 
3561 			val = *((uint32_t *) data);
3562 
3563 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3564 			break;
3565 		}
3566 	case SCTP_NOTIFY_STRDATA_ERR:
3567 		break;
3568 	case SCTP_NOTIFY_ASSOC_ABORTED:
3569 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3570 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3571 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3572 		} else {
3573 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3574 		}
3575 		break;
3576 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3577 		break;
3578 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3579 		break;
3580 	case SCTP_NOTIFY_ASSOC_RESTART:
3581 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3582 		if (stcb->asoc.peer_supports_auth == 0) {
3583 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3584 			    NULL, so_locked);
3585 		}
3586 		break;
3587 	case SCTP_NOTIFY_HB_RESP:
3588 		break;
3589 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3590 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3591 		break;
3592 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3593 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3594 		break;
3595 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3596 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3597 		break;
3598 
3599 	case SCTP_NOTIFY_STR_RESET_SEND:
3600 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3601 		break;
3602 	case SCTP_NOTIFY_STR_RESET_RECV:
3603 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3604 		break;
3605 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3606 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3607 		break;
3608 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3609 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3610 		break;
3611 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3612 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3613 		    error);
3614 		break;
3615 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3616 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3617 		    error);
3618 		break;
3619 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3620 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3621 		    error);
3622 		break;
3623 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3624 		break;
3625 	case SCTP_NOTIFY_ASCONF_FAILED:
3626 		break;
3627 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3628 		sctp_notify_shutdown_event(stcb);
3629 		break;
3630 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3631 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3632 		    (uint16_t) (uintptr_t) data,
3633 		    so_locked);
3634 		break;
3635 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3636 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3637 		    (uint16_t) (uintptr_t) data,
3638 		    so_locked);
3639 		break;
3640 	case SCTP_NOTIFY_NO_PEER_AUTH:
3641 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3642 		    (uint16_t) (uintptr_t) data,
3643 		    so_locked);
3644 		break;
3645 	case SCTP_NOTIFY_SENDER_DRY:
3646 		sctp_notify_sender_dry_event(stcb, so_locked);
3647 		break;
3648 	default:
3649 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3650 		    __FUNCTION__, notification, notification);
3651 		break;
3652 	}			/* end switch */
3653 }
3654 
3655 void
3656 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3657 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3658     SCTP_UNUSED
3659 #endif
3660 )
3661 {
3662 	struct sctp_association *asoc;
3663 	struct sctp_stream_out *outs;
3664 	struct sctp_tmit_chunk *chk;
3665 	struct sctp_stream_queue_pending *sp;
3666 	int i;
3667 
3668 	asoc = &stcb->asoc;
3669 
3670 	if (stcb == NULL) {
3671 		return;
3672 	}
3673 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3674 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3675 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3676 		return;
3677 	}
3678 	/* now through all the gunk freeing chunks */
3679 	if (holds_lock == 0) {
3680 		SCTP_TCB_SEND_LOCK(stcb);
3681 	}
3682 	/* sent queue SHOULD be empty */
3683 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3684 		chk = TAILQ_FIRST(&asoc->sent_queue);
3685 		while (chk) {
3686 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3687 			asoc->sent_queue_cnt--;
3688 			if (chk->data != NULL) {
3689 				sctp_free_bufspace(stcb, asoc, chk, 1);
3690 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3691 				    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3692 				if (chk->data) {
3693 					sctp_m_freem(chk->data);
3694 					chk->data = NULL;
3695 				}
3696 			}
3697 			sctp_free_a_chunk(stcb, chk);
3698 			/* sa_ignore FREED_MEMORY */
3699 			chk = TAILQ_FIRST(&asoc->sent_queue);
3700 		}
3701 	}
3702 	/* pending send queue SHOULD be empty */
3703 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3704 		chk = TAILQ_FIRST(&asoc->send_queue);
3705 		while (chk) {
3706 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3707 			asoc->send_queue_cnt--;
3708 			if (chk->data != NULL) {
3709 				sctp_free_bufspace(stcb, asoc, chk, 1);
3710 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3711 				    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3712 				if (chk->data) {
3713 					sctp_m_freem(chk->data);
3714 					chk->data = NULL;
3715 				}
3716 			}
3717 			sctp_free_a_chunk(stcb, chk);
3718 			/* sa_ignore FREED_MEMORY */
3719 			chk = TAILQ_FIRST(&asoc->send_queue);
3720 		}
3721 	}
3722 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3723 		/* For each stream */
3724 		outs = &stcb->asoc.strmout[i];
3725 		/* clean up any sends there */
3726 		stcb->asoc.locked_on_sending = NULL;
3727 		sp = TAILQ_FIRST(&outs->outqueue);
3728 		while (sp) {
3729 			stcb->asoc.stream_queue_cnt--;
3730 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3731 			sctp_free_spbufspace(stcb, asoc, sp);
3732 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3733 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3734 			if (sp->data) {
3735 				sctp_m_freem(sp->data);
3736 				sp->data = NULL;
3737 			}
3738 			if (sp->net)
3739 				sctp_free_remote_addr(sp->net);
3740 			sp->net = NULL;
3741 			/* Free the chunk */
3742 			sctp_free_a_strmoq(stcb, sp);
3743 			/* sa_ignore FREED_MEMORY */
3744 			sp = TAILQ_FIRST(&outs->outqueue);
3745 		}
3746 	}
3747 
3748 	if (holds_lock == 0) {
3749 		SCTP_TCB_SEND_UNLOCK(stcb);
3750 	}
3751 }
3752 
3753 void
3754 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3755 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3756     SCTP_UNUSED
3757 #endif
3758 )
3759 {
3760 
3761 	if (stcb == NULL) {
3762 		return;
3763 	}
3764 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3765 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3766 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3767 		return;
3768 	}
3769 	/* Tell them we lost the asoc */
3770 	sctp_report_all_outbound(stcb, 1, so_locked);
3771 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3772 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3773 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3774 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3775 	}
3776 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3777 }
3778 
3779 void
3780 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3781     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3782     uint32_t vrf_id, uint16_t port)
3783 {
3784 	uint32_t vtag;
3785 
3786 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3787 	struct socket *so;
3788 
3789 #endif
3790 
3791 	vtag = 0;
3792 	if (stcb != NULL) {
3793 		/* We have a TCB to abort, send notification too */
3794 		vtag = stcb->asoc.peer_vtag;
3795 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3796 		/* get the assoc vrf id and table id */
3797 		vrf_id = stcb->asoc.vrf_id;
3798 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3799 	}
3800 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3801 	if (stcb != NULL) {
3802 		/* Ok, now lets free it */
3803 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3804 		so = SCTP_INP_SO(inp);
3805 		atomic_add_int(&stcb->asoc.refcnt, 1);
3806 		SCTP_TCB_UNLOCK(stcb);
3807 		SCTP_SOCKET_LOCK(so, 1);
3808 		SCTP_TCB_LOCK(stcb);
3809 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3810 #endif
3811 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3812 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3813 		SCTP_SOCKET_UNLOCK(so, 1);
3814 #endif
3815 	} else {
3816 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3817 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3818 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3819 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3820 			}
3821 		}
3822 	}
3823 }
3824 
3825 #ifdef SCTP_ASOCLOG_OF_TSNS
3826 void
3827 sctp_print_out_track_log(struct sctp_tcb *stcb)
3828 {
3829 #ifdef NOSIY_PRINTS
3830 	int i;
3831 
3832 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3833 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3834 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3835 		SCTP_PRINTF("None rcvd\n");
3836 		goto none_in;
3837 	}
3838 	if (stcb->asoc.tsn_in_wrapped) {
3839 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3840 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3841 			    stcb->asoc.in_tsnlog[i].tsn,
3842 			    stcb->asoc.in_tsnlog[i].strm,
3843 			    stcb->asoc.in_tsnlog[i].seq,
3844 			    stcb->asoc.in_tsnlog[i].flgs,
3845 			    stcb->asoc.in_tsnlog[i].sz);
3846 		}
3847 	}
3848 	if (stcb->asoc.tsn_in_at) {
3849 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3850 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3851 			    stcb->asoc.in_tsnlog[i].tsn,
3852 			    stcb->asoc.in_tsnlog[i].strm,
3853 			    stcb->asoc.in_tsnlog[i].seq,
3854 			    stcb->asoc.in_tsnlog[i].flgs,
3855 			    stcb->asoc.in_tsnlog[i].sz);
3856 		}
3857 	}
3858 none_in:
3859 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3860 	if ((stcb->asoc.tsn_out_at == 0) &&
3861 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3862 		SCTP_PRINTF("None sent\n");
3863 	}
3864 	if (stcb->asoc.tsn_out_wrapped) {
3865 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3866 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3867 			    stcb->asoc.out_tsnlog[i].tsn,
3868 			    stcb->asoc.out_tsnlog[i].strm,
3869 			    stcb->asoc.out_tsnlog[i].seq,
3870 			    stcb->asoc.out_tsnlog[i].flgs,
3871 			    stcb->asoc.out_tsnlog[i].sz);
3872 		}
3873 	}
3874 	if (stcb->asoc.tsn_out_at) {
3875 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3876 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3877 			    stcb->asoc.out_tsnlog[i].tsn,
3878 			    stcb->asoc.out_tsnlog[i].strm,
3879 			    stcb->asoc.out_tsnlog[i].seq,
3880 			    stcb->asoc.out_tsnlog[i].flgs,
3881 			    stcb->asoc.out_tsnlog[i].sz);
3882 		}
3883 	}
3884 #endif
3885 }
3886 
3887 #endif
3888 
3889 void
3890 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3891     int error, struct mbuf *op_err,
3892     int so_locked
3893 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3894     SCTP_UNUSED
3895 #endif
3896 )
3897 {
3898 	uint32_t vtag;
3899 
3900 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3901 	struct socket *so;
3902 
3903 #endif
3904 
3905 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3906 	so = SCTP_INP_SO(inp);
3907 #endif
3908 	if (stcb == NULL) {
3909 		/* Got to have a TCB */
3910 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3911 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3912 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3913 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3914 			}
3915 		}
3916 		return;
3917 	} else {
3918 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3919 	}
3920 	vtag = stcb->asoc.peer_vtag;
3921 	/* notify the ulp */
3922 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3923 		sctp_abort_notification(stcb, error, so_locked);
3924 	/* notify the peer */
3925 #if defined(SCTP_PANIC_ON_ABORT)
3926 	panic("aborting an association");
3927 #endif
3928 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3929 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3930 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3931 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3932 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3933 	}
3934 	/* now free the asoc */
3935 #ifdef SCTP_ASOCLOG_OF_TSNS
3936 	sctp_print_out_track_log(stcb);
3937 #endif
3938 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3939 	if (!so_locked) {
3940 		atomic_add_int(&stcb->asoc.refcnt, 1);
3941 		SCTP_TCB_UNLOCK(stcb);
3942 		SCTP_SOCKET_LOCK(so, 1);
3943 		SCTP_TCB_LOCK(stcb);
3944 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3945 	}
3946 #endif
3947 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3948 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3949 	if (!so_locked) {
3950 		SCTP_SOCKET_UNLOCK(so, 1);
3951 	}
3952 #endif
3953 }
3954 
3955 void
3956 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3957     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3958 {
3959 	struct sctp_chunkhdr *ch, chunk_buf;
3960 	unsigned int chk_length;
3961 
3962 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3963 	/* Generate a TO address for future reference */
3964 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3965 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3966 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3967 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3968 		}
3969 	}
3970 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3971 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3972 	while (ch != NULL) {
3973 		chk_length = ntohs(ch->chunk_length);
3974 		if (chk_length < sizeof(*ch)) {
3975 			/* break to abort land */
3976 			break;
3977 		}
3978 		switch (ch->chunk_type) {
3979 		case SCTP_COOKIE_ECHO:
3980 			/* We hit here only if the assoc is being freed */
3981 			return;
3982 		case SCTP_PACKET_DROPPED:
3983 			/* we don't respond to pkt-dropped */
3984 			return;
3985 		case SCTP_ABORT_ASSOCIATION:
3986 			/* we don't respond with an ABORT to an ABORT */
3987 			return;
3988 		case SCTP_SHUTDOWN_COMPLETE:
3989 			/*
3990 			 * we ignore it since we are not waiting for it and
3991 			 * peer is gone
3992 			 */
3993 			return;
3994 		case SCTP_SHUTDOWN_ACK:
3995 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
3996 			return;
3997 		default:
3998 			break;
3999 		}
4000 		offset += SCTP_SIZE32(chk_length);
4001 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4002 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4003 	}
4004 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4005 }
4006 
4007 /*
4008  * check the inbound datagram to make sure there is not an abort inside it,
4009  * if there is return 1, else return 0.
4010  */
4011 int
4012 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4013 {
4014 	struct sctp_chunkhdr *ch;
4015 	struct sctp_init_chunk *init_chk, chunk_buf;
4016 	int offset;
4017 	unsigned int chk_length;
4018 
4019 	offset = iphlen + sizeof(struct sctphdr);
4020 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4021 	    (uint8_t *) & chunk_buf);
4022 	while (ch != NULL) {
4023 		chk_length = ntohs(ch->chunk_length);
4024 		if (chk_length < sizeof(*ch)) {
4025 			/* packet is probably corrupt */
4026 			break;
4027 		}
4028 		/* we seem to be ok, is it an abort? */
4029 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4030 			/* yep, tell them */
4031 			return (1);
4032 		}
4033 		if (ch->chunk_type == SCTP_INITIATION) {
4034 			/* need to update the Vtag */
4035 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4036 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4037 			if (init_chk != NULL) {
4038 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4039 			}
4040 		}
4041 		/* Nope, move to the next chunk */
4042 		offset += SCTP_SIZE32(chk_length);
4043 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4044 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4045 	}
4046 	return (0);
4047 }
4048 
4049 /*
4050  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4051  * set (i.e. it's 0) so, create this function to compare link local scopes
4052  */
4053 #ifdef INET6
4054 uint32_t
4055 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4056 {
4057 	struct sockaddr_in6 a, b;
4058 
4059 	/* save copies */
4060 	a = *addr1;
4061 	b = *addr2;
4062 
4063 	if (a.sin6_scope_id == 0)
4064 		if (sa6_recoverscope(&a)) {
4065 			/* can't get scope, so can't match */
4066 			return (0);
4067 		}
4068 	if (b.sin6_scope_id == 0)
4069 		if (sa6_recoverscope(&b)) {
4070 			/* can't get scope, so can't match */
4071 			return (0);
4072 		}
4073 	if (a.sin6_scope_id != b.sin6_scope_id)
4074 		return (0);
4075 
4076 	return (1);
4077 }
4078 
4079 /*
4080  * returns a sockaddr_in6 with embedded scope recovered and removed
4081  */
4082 struct sockaddr_in6 *
4083 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4084 {
4085 	/* check and strip embedded scope junk */
4086 	if (addr->sin6_family == AF_INET6) {
4087 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4088 			if (addr->sin6_scope_id == 0) {
4089 				*store = *addr;
4090 				if (!sa6_recoverscope(store)) {
4091 					/* use the recovered scope */
4092 					addr = store;
4093 				}
4094 			} else {
4095 				/* else, return the original "to" addr */
4096 				in6_clearscope(&addr->sin6_addr);
4097 			}
4098 		}
4099 	}
4100 	return (addr);
4101 }
4102 
4103 #endif
4104 
4105 /*
4106  * are the two addresses the same?  currently a "scopeless" check returns: 1
4107  * if same, 0 if not
4108  */
4109 int
4110 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4111 {
4112 
4113 	/* must be valid */
4114 	if (sa1 == NULL || sa2 == NULL)
4115 		return (0);
4116 
4117 	/* must be the same family */
4118 	if (sa1->sa_family != sa2->sa_family)
4119 		return (0);
4120 
4121 	switch (sa1->sa_family) {
4122 #ifdef INET6
4123 	case AF_INET6:
4124 		{
4125 			/* IPv6 addresses */
4126 			struct sockaddr_in6 *sin6_1, *sin6_2;
4127 
4128 			sin6_1 = (struct sockaddr_in6 *)sa1;
4129 			sin6_2 = (struct sockaddr_in6 *)sa2;
4130 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4131 			    sin6_2));
4132 		}
4133 #endif
4134 	case AF_INET:
4135 		{
4136 			/* IPv4 addresses */
4137 			struct sockaddr_in *sin_1, *sin_2;
4138 
4139 			sin_1 = (struct sockaddr_in *)sa1;
4140 			sin_2 = (struct sockaddr_in *)sa2;
4141 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4142 		}
4143 	default:
4144 		/* we don't do these... */
4145 		return (0);
4146 	}
4147 }
4148 
4149 void
4150 sctp_print_address(struct sockaddr *sa)
4151 {
4152 #ifdef INET6
4153 	char ip6buf[INET6_ADDRSTRLEN];
4154 
4155 	ip6buf[0] = 0;
4156 #endif
4157 
4158 	switch (sa->sa_family) {
4159 #ifdef INET6
4160 	case AF_INET6:
4161 		{
4162 			struct sockaddr_in6 *sin6;
4163 
4164 			sin6 = (struct sockaddr_in6 *)sa;
4165 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4166 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4167 			    ntohs(sin6->sin6_port),
4168 			    sin6->sin6_scope_id);
4169 			break;
4170 		}
4171 #endif
4172 	case AF_INET:
4173 		{
4174 			struct sockaddr_in *sin;
4175 			unsigned char *p;
4176 
4177 			sin = (struct sockaddr_in *)sa;
4178 			p = (unsigned char *)&sin->sin_addr;
4179 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4180 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4181 			break;
4182 		}
4183 	default:
4184 		SCTP_PRINTF("?\n");
4185 		break;
4186 	}
4187 }
4188 
4189 void
4190 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4191 {
4192 	switch (iph->ip_v) {
4193 		case IPVERSION:
4194 		{
4195 			struct sockaddr_in lsa, fsa;
4196 
4197 			bzero(&lsa, sizeof(lsa));
4198 			lsa.sin_len = sizeof(lsa);
4199 			lsa.sin_family = AF_INET;
4200 			lsa.sin_addr = iph->ip_src;
4201 			lsa.sin_port = sh->src_port;
4202 			bzero(&fsa, sizeof(fsa));
4203 			fsa.sin_len = sizeof(fsa);
4204 			fsa.sin_family = AF_INET;
4205 			fsa.sin_addr = iph->ip_dst;
4206 			fsa.sin_port = sh->dest_port;
4207 			SCTP_PRINTF("src: ");
4208 			sctp_print_address((struct sockaddr *)&lsa);
4209 			SCTP_PRINTF("dest: ");
4210 			sctp_print_address((struct sockaddr *)&fsa);
4211 			break;
4212 		}
4213 #ifdef INET6
4214 	case IPV6_VERSION >> 4:
4215 		{
4216 			struct ip6_hdr *ip6;
4217 			struct sockaddr_in6 lsa6, fsa6;
4218 
4219 			ip6 = (struct ip6_hdr *)iph;
4220 			bzero(&lsa6, sizeof(lsa6));
4221 			lsa6.sin6_len = sizeof(lsa6);
4222 			lsa6.sin6_family = AF_INET6;
4223 			lsa6.sin6_addr = ip6->ip6_src;
4224 			lsa6.sin6_port = sh->src_port;
4225 			bzero(&fsa6, sizeof(fsa6));
4226 			fsa6.sin6_len = sizeof(fsa6);
4227 			fsa6.sin6_family = AF_INET6;
4228 			fsa6.sin6_addr = ip6->ip6_dst;
4229 			fsa6.sin6_port = sh->dest_port;
4230 			SCTP_PRINTF("src: ");
4231 			sctp_print_address((struct sockaddr *)&lsa6);
4232 			SCTP_PRINTF("dest: ");
4233 			sctp_print_address((struct sockaddr *)&fsa6);
4234 			break;
4235 		}
4236 #endif
4237 	default:
4238 		/* TSNH */
4239 		break;
4240 	}
4241 }
4242 
4243 void
4244 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4245     struct sctp_inpcb *new_inp,
4246     struct sctp_tcb *stcb,
4247     int waitflags)
4248 {
4249 	/*
4250 	 * go through our old INP and pull off any control structures that
4251 	 * belong to stcb and move then to the new inp.
4252 	 */
4253 	struct socket *old_so, *new_so;
4254 	struct sctp_queued_to_read *control, *nctl;
4255 	struct sctp_readhead tmp_queue;
4256 	struct mbuf *m;
4257 	int error = 0;
4258 
4259 	old_so = old_inp->sctp_socket;
4260 	new_so = new_inp->sctp_socket;
4261 	TAILQ_INIT(&tmp_queue);
4262 	error = sblock(&old_so->so_rcv, waitflags);
4263 	if (error) {
4264 		/*
4265 		 * Gak, can't get sblock, we have a problem. data will be
4266 		 * left stranded.. and we don't dare look at it since the
4267 		 * other thread may be reading something. Oh well, its a
4268 		 * screwed up app that does a peeloff OR a accept while
4269 		 * reading from the main socket... actually its only the
4270 		 * peeloff() case, since I think read will fail on a
4271 		 * listening socket..
4272 		 */
4273 		return;
4274 	}
4275 	/* lock the socket buffers */
4276 	SCTP_INP_READ_LOCK(old_inp);
4277 	control = TAILQ_FIRST(&old_inp->read_queue);
4278 	/* Pull off all for out target stcb */
4279 	while (control) {
4280 		nctl = TAILQ_NEXT(control, next);
4281 		if (control->stcb == stcb) {
4282 			/* remove it we want it */
4283 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4284 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4285 			m = control->data;
4286 			while (m) {
4287 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4288 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4289 				}
4290 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4291 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4292 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4293 				}
4294 				m = SCTP_BUF_NEXT(m);
4295 			}
4296 		}
4297 		control = nctl;
4298 	}
4299 	SCTP_INP_READ_UNLOCK(old_inp);
4300 	/* Remove the sb-lock on the old socket */
4301 
4302 	sbunlock(&old_so->so_rcv);
4303 	/* Now we move them over to the new socket buffer */
4304 	control = TAILQ_FIRST(&tmp_queue);
4305 	SCTP_INP_READ_LOCK(new_inp);
4306 	while (control) {
4307 		nctl = TAILQ_NEXT(control, next);
4308 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4309 		m = control->data;
4310 		while (m) {
4311 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4312 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4313 			}
4314 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4315 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4316 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4317 			}
4318 			m = SCTP_BUF_NEXT(m);
4319 		}
4320 		control = nctl;
4321 	}
4322 	SCTP_INP_READ_UNLOCK(new_inp);
4323 }
4324 
4325 void
4326 sctp_add_to_readq(struct sctp_inpcb *inp,
4327     struct sctp_tcb *stcb,
4328     struct sctp_queued_to_read *control,
4329     struct sockbuf *sb,
4330     int end,
4331     int inp_read_lock_held,
4332     int so_locked
4333 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4334     SCTP_UNUSED
4335 #endif
4336 )
4337 {
4338 	/*
4339 	 * Here we must place the control on the end of the socket read
4340 	 * queue AND increment sb_cc so that select will work properly on
4341 	 * read.
4342 	 */
4343 	struct mbuf *m, *prev = NULL;
4344 
4345 	if (inp == NULL) {
4346 		/* Gak, TSNH!! */
4347 #ifdef INVARIANTS
4348 		panic("Gak, inp NULL on add_to_readq");
4349 #endif
4350 		return;
4351 	}
4352 	if (inp_read_lock_held == 0)
4353 		SCTP_INP_READ_LOCK(inp);
4354 	if (!(control->spec_flags & M_NOTIFICATION)) {
4355 		atomic_add_int(&inp->total_recvs, 1);
4356 		if (!control->do_not_ref_stcb) {
4357 			atomic_add_int(&stcb->total_recvs, 1);
4358 		}
4359 	}
4360 	m = control->data;
4361 	control->held_length = 0;
4362 	control->length = 0;
4363 	while (m) {
4364 		if (SCTP_BUF_LEN(m) == 0) {
4365 			/* Skip mbufs with NO length */
4366 			if (prev == NULL) {
4367 				/* First one */
4368 				control->data = sctp_m_free(m);
4369 				m = control->data;
4370 			} else {
4371 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4372 				m = SCTP_BUF_NEXT(prev);
4373 			}
4374 			if (m == NULL) {
4375 				control->tail_mbuf = prev;;
4376 			}
4377 			continue;
4378 		}
4379 		prev = m;
4380 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4381 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4382 		}
4383 		sctp_sballoc(stcb, sb, m);
4384 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4385 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4386 		}
4387 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4388 		m = SCTP_BUF_NEXT(m);
4389 	}
4390 	if (prev != NULL) {
4391 		control->tail_mbuf = prev;
4392 	} else {
4393 		/* Everything got collapsed out?? */
4394 		if (inp_read_lock_held == 0)
4395 			SCTP_INP_READ_UNLOCK(inp);
4396 		return;
4397 	}
4398 	if (end) {
4399 		control->end_added = 1;
4400 	}
4401 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4402 	if (inp_read_lock_held == 0)
4403 		SCTP_INP_READ_UNLOCK(inp);
4404 	if (inp && inp->sctp_socket) {
4405 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4406 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4407 		} else {
4408 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4409 			struct socket *so;
4410 
4411 			so = SCTP_INP_SO(inp);
4412 			if (!so_locked) {
4413 				atomic_add_int(&stcb->asoc.refcnt, 1);
4414 				SCTP_TCB_UNLOCK(stcb);
4415 				SCTP_SOCKET_LOCK(so, 1);
4416 				SCTP_TCB_LOCK(stcb);
4417 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4418 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4419 					SCTP_SOCKET_UNLOCK(so, 1);
4420 					return;
4421 				}
4422 			}
4423 #endif
4424 			sctp_sorwakeup(inp, inp->sctp_socket);
4425 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4426 			if (!so_locked) {
4427 				SCTP_SOCKET_UNLOCK(so, 1);
4428 			}
4429 #endif
4430 		}
4431 	}
4432 }
4433 
4434 
4435 int
4436 sctp_append_to_readq(struct sctp_inpcb *inp,
4437     struct sctp_tcb *stcb,
4438     struct sctp_queued_to_read *control,
4439     struct mbuf *m,
4440     int end,
4441     int ctls_cumack,
4442     struct sockbuf *sb)
4443 {
4444 	/*
4445 	 * A partial delivery API event is underway. OR we are appending on
4446 	 * the reassembly queue.
4447 	 *
4448 	 * If PDAPI this means we need to add m to the end of the data.
4449 	 * Increase the length in the control AND increment the sb_cc.
4450 	 * Otherwise sb is NULL and all we need to do is put it at the end
4451 	 * of the mbuf chain.
4452 	 */
4453 	int len = 0;
4454 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4455 
4456 	if (inp) {
4457 		SCTP_INP_READ_LOCK(inp);
4458 	}
4459 	if (control == NULL) {
4460 get_out:
4461 		if (inp) {
4462 			SCTP_INP_READ_UNLOCK(inp);
4463 		}
4464 		return (-1);
4465 	}
4466 	if (control->end_added) {
4467 		/* huh this one is complete? */
4468 		goto get_out;
4469 	}
4470 	mm = m;
4471 	if (mm == NULL) {
4472 		goto get_out;
4473 	}
4474 	while (mm) {
4475 		if (SCTP_BUF_LEN(mm) == 0) {
4476 			/* Skip mbufs with NO lenght */
4477 			if (prev == NULL) {
4478 				/* First one */
4479 				m = sctp_m_free(mm);
4480 				mm = m;
4481 			} else {
4482 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4483 				mm = SCTP_BUF_NEXT(prev);
4484 			}
4485 			continue;
4486 		}
4487 		prev = mm;
4488 		len += SCTP_BUF_LEN(mm);
4489 		if (sb) {
4490 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4491 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4492 			}
4493 			sctp_sballoc(stcb, sb, mm);
4494 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4495 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4496 			}
4497 		}
4498 		mm = SCTP_BUF_NEXT(mm);
4499 	}
4500 	if (prev) {
4501 		tail = prev;
4502 	} else {
4503 		/* Really there should always be a prev */
4504 		if (m == NULL) {
4505 			/* Huh nothing left? */
4506 #ifdef INVARIANTS
4507 			panic("Nothing left to add?");
4508 #else
4509 			goto get_out;
4510 #endif
4511 		}
4512 		tail = m;
4513 	}
4514 	if (control->tail_mbuf) {
4515 		/* append */
4516 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4517 		control->tail_mbuf = tail;
4518 	} else {
4519 		/* nothing there */
4520 #ifdef INVARIANTS
4521 		if (control->data != NULL) {
4522 			panic("This should NOT happen");
4523 		}
4524 #endif
4525 		control->data = m;
4526 		control->tail_mbuf = tail;
4527 	}
4528 	atomic_add_int(&control->length, len);
4529 	if (end) {
4530 		/* message is complete */
4531 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4532 			stcb->asoc.control_pdapi = NULL;
4533 		}
4534 		control->held_length = 0;
4535 		control->end_added = 1;
4536 	}
4537 	if (stcb == NULL) {
4538 		control->do_not_ref_stcb = 1;
4539 	}
4540 	/*
4541 	 * When we are appending in partial delivery, the cum-ack is used
4542 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4543 	 * is populated in the outbound sinfo structure from the true cumack
4544 	 * if the association exists...
4545 	 */
4546 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4547 	if (inp) {
4548 		SCTP_INP_READ_UNLOCK(inp);
4549 	}
4550 	if (inp && inp->sctp_socket) {
4551 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4552 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4553 		} else {
4554 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4555 			struct socket *so;
4556 
4557 			so = SCTP_INP_SO(inp);
4558 			atomic_add_int(&stcb->asoc.refcnt, 1);
4559 			SCTP_TCB_UNLOCK(stcb);
4560 			SCTP_SOCKET_LOCK(so, 1);
4561 			SCTP_TCB_LOCK(stcb);
4562 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4563 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4564 				SCTP_SOCKET_UNLOCK(so, 1);
4565 				return (0);
4566 			}
4567 #endif
4568 			sctp_sorwakeup(inp, inp->sctp_socket);
4569 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4570 			SCTP_SOCKET_UNLOCK(so, 1);
4571 #endif
4572 		}
4573 	}
4574 	return (0);
4575 }
4576 
4577 
4578 
4579 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4580  *************ALTERNATE ROUTING CODE
4581  */
4582 
4583 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4584  *************ALTERNATE ROUTING CODE
4585  */
4586 
4587 struct mbuf *
4588 sctp_generate_invmanparam(int err)
4589 {
4590 	/* Return a MBUF with a invalid mandatory parameter */
4591 	struct mbuf *m;
4592 
4593 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4594 	if (m) {
4595 		struct sctp_paramhdr *ph;
4596 
4597 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4598 		ph = mtod(m, struct sctp_paramhdr *);
4599 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4600 		ph->param_type = htons(err);
4601 	}
4602 	return (m);
4603 }
4604 
4605 #ifdef SCTP_MBCNT_LOGGING
4606 void
4607 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4608     struct sctp_tmit_chunk *tp1, int chk_cnt)
4609 {
4610 	if (tp1->data == NULL) {
4611 		return;
4612 	}
4613 	asoc->chunks_on_out_queue -= chk_cnt;
4614 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4615 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4616 		    asoc->total_output_queue_size,
4617 		    tp1->book_size,
4618 		    0,
4619 		    tp1->mbcnt);
4620 	}
4621 	if (asoc->total_output_queue_size >= tp1->book_size) {
4622 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4623 	} else {
4624 		asoc->total_output_queue_size = 0;
4625 	}
4626 
4627 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4628 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4629 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4630 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4631 		} else {
4632 			stcb->sctp_socket->so_snd.sb_cc = 0;
4633 
4634 		}
4635 	}
4636 }
4637 
4638 #endif
4639 
4640 int
4641 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4642     int reason, int so_locked
4643 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4644     SCTP_UNUSED
4645 #endif
4646 )
4647 {
4648 	struct sctp_stream_out *strq;
4649 	struct sctp_tmit_chunk *chk = NULL;
4650 	struct sctp_stream_queue_pending *sp;
4651 	uint16_t stream = 0, seq = 0;
4652 	uint8_t foundeom = 0;
4653 	int ret_sz = 0;
4654 	int notdone;
4655 	int do_wakeup_routine = 0;
4656 
4657 	stream = tp1->rec.data.stream_number;
4658 	seq = tp1->rec.data.stream_seq;
4659 	do {
4660 		ret_sz += tp1->book_size;
4661 		if (tp1->data != NULL) {
4662 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4663 				sctp_flight_size_decrease(tp1);
4664 				sctp_total_flight_decrease(stcb, tp1);
4665 			}
4666 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4667 			stcb->asoc.peers_rwnd += tp1->send_size;
4668 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4669 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4670 			sctp_m_freem(tp1->data);
4671 			tp1->data = NULL;
4672 			do_wakeup_routine = 1;
4673 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4674 				stcb->asoc.sent_queue_cnt_removeable--;
4675 			}
4676 		}
4677 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4678 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4679 		    SCTP_DATA_NOT_FRAG) {
4680 			/* not frag'ed we ae done   */
4681 			notdone = 0;
4682 			foundeom = 1;
4683 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4684 			/* end of frag, we are done */
4685 			notdone = 0;
4686 			foundeom = 1;
4687 		} else {
4688 			/*
4689 			 * Its a begin or middle piece, we must mark all of
4690 			 * it
4691 			 */
4692 			notdone = 1;
4693 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4694 		}
4695 	} while (tp1 && notdone);
4696 	if (foundeom == 0) {
4697 		/*
4698 		 * The multi-part message was scattered across the send and
4699 		 * sent queue.
4700 		 */
4701 next_on_sent:
4702 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4703 		/*
4704 		 * recurse throught the send_queue too, starting at the
4705 		 * beginning.
4706 		 */
4707 		if ((tp1) &&
4708 		    (tp1->rec.data.stream_number == stream) &&
4709 		    (tp1->rec.data.stream_seq == seq)
4710 		    ) {
4711 			/*
4712 			 * save to chk in case we have some on stream out
4713 			 * queue. If so and we have an un-transmitted one we
4714 			 * don't have to fudge the TSN.
4715 			 */
4716 			chk = tp1;
4717 			ret_sz += tp1->book_size;
4718 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4719 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4720 			sctp_m_freem(tp1->data);
4721 			/* No flight involved here book the size to 0 */
4722 			tp1->book_size = 0;
4723 			tp1->data = NULL;
4724 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4725 				foundeom = 1;
4726 			}
4727 			do_wakeup_routine = 1;
4728 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4729 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4730 			/*
4731 			 * on to the sent queue so we can wait for it to be
4732 			 * passed by.
4733 			 */
4734 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4735 			    sctp_next);
4736 			stcb->asoc.send_queue_cnt--;
4737 			stcb->asoc.sent_queue_cnt++;
4738 			goto next_on_sent;
4739 		}
4740 	}
4741 	if (foundeom == 0) {
4742 		/*
4743 		 * Still no eom found. That means there is stuff left on the
4744 		 * stream out queue.. yuck.
4745 		 */
4746 		strq = &stcb->asoc.strmout[stream];
4747 		SCTP_TCB_SEND_LOCK(stcb);
4748 		sp = TAILQ_FIRST(&strq->outqueue);
4749 		while (sp->strseq <= seq) {
4750 			/* Check if its our SEQ */
4751 			if (sp->strseq == seq) {
4752 				sp->discard_rest = 1;
4753 				/*
4754 				 * We may need to put a chunk on the queue
4755 				 * that holds the TSN that would have been
4756 				 * sent with the LAST bit.
4757 				 */
4758 				if (chk == NULL) {
4759 					/* Yep, we have to */
4760 					sctp_alloc_a_chunk(stcb, chk);
4761 					if (chk == NULL) {
4762 						/*
4763 						 * we are hosed. All we can
4764 						 * do is nothing.. which
4765 						 * will cause an abort if
4766 						 * the peer is paying
4767 						 * attention.
4768 						 */
4769 						goto oh_well;
4770 					}
4771 					memset(chk, 0, sizeof(*chk));
4772 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4773 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4774 					chk->asoc = &stcb->asoc;
4775 					chk->rec.data.stream_seq = sp->strseq;
4776 					chk->rec.data.stream_number = sp->stream;
4777 					chk->rec.data.payloadtype = sp->ppid;
4778 					chk->rec.data.context = sp->context;
4779 					chk->flags = sp->act_flags;
4780 					chk->addr_over = sp->addr_over;
4781 					chk->whoTo = sp->net;
4782 					atomic_add_int(&chk->whoTo->ref_count, 1);
4783 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4784 					stcb->asoc.pr_sctp_cnt++;
4785 					chk->pr_sctp_on = 1;
4786 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4787 					stcb->asoc.sent_queue_cnt++;
4788 					stcb->asoc.pr_sctp_cnt++;
4789 				} else {
4790 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4791 				}
4792 		oh_well:
4793 				if (sp->data) {
4794 					/*
4795 					 * Pull any data to free up the SB
4796 					 * and allow sender to "add more"
4797 					 * whilc we will throw away :-)
4798 					 */
4799 					sctp_free_spbufspace(stcb, &stcb->asoc,
4800 					    sp);
4801 					ret_sz += sp->length;
4802 					do_wakeup_routine = 1;
4803 					sp->some_taken = 1;
4804 					sctp_m_freem(sp->data);
4805 					sp->length = 0;
4806 					sp->data = NULL;
4807 					sp->tail_mbuf = NULL;
4808 				}
4809 				break;
4810 			} else {
4811 				/* Next one please */
4812 				sp = TAILQ_NEXT(sp, next);
4813 			}
4814 		}		/* End while */
4815 		SCTP_TCB_SEND_UNLOCK(stcb);
4816 	}
4817 	if (do_wakeup_routine) {
4818 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4819 		struct socket *so;
4820 
4821 		so = SCTP_INP_SO(stcb->sctp_ep);
4822 		if (!so_locked) {
4823 			atomic_add_int(&stcb->asoc.refcnt, 1);
4824 			SCTP_TCB_UNLOCK(stcb);
4825 			SCTP_SOCKET_LOCK(so, 1);
4826 			SCTP_TCB_LOCK(stcb);
4827 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4828 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4829 				/* assoc was freed while we were unlocked */
4830 				SCTP_SOCKET_UNLOCK(so, 1);
4831 				return (ret_sz);
4832 			}
4833 		}
4834 #endif
4835 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4836 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4837 		if (!so_locked) {
4838 			SCTP_SOCKET_UNLOCK(so, 1);
4839 		}
4840 #endif
4841 	}
4842 	return (ret_sz);
4843 }
4844 
4845 /*
4846  * checks to see if the given address, sa, is one that is currently known by
4847  * the kernel note: can't distinguish the same address on multiple interfaces
4848  * and doesn't handle multiple addresses with different zone/scope id's note:
4849  * ifa_ifwithaddr() compares the entire sockaddr struct
4850  */
4851 struct sctp_ifa *
4852 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4853     int holds_lock)
4854 {
4855 	struct sctp_laddr *laddr;
4856 
4857 	if (holds_lock == 0) {
4858 		SCTP_INP_RLOCK(inp);
4859 	}
4860 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4861 		if (laddr->ifa == NULL)
4862 			continue;
4863 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4864 			continue;
4865 		if (addr->sa_family == AF_INET) {
4866 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4867 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4868 				/* found him. */
4869 				if (holds_lock == 0) {
4870 					SCTP_INP_RUNLOCK(inp);
4871 				}
4872 				return (laddr->ifa);
4873 				break;
4874 			}
4875 		}
4876 #ifdef INET6
4877 		if (addr->sa_family == AF_INET6) {
4878 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4879 			    &laddr->ifa->address.sin6)) {
4880 				/* found him. */
4881 				if (holds_lock == 0) {
4882 					SCTP_INP_RUNLOCK(inp);
4883 				}
4884 				return (laddr->ifa);
4885 				break;
4886 			}
4887 		}
4888 #endif
4889 	}
4890 	if (holds_lock == 0) {
4891 		SCTP_INP_RUNLOCK(inp);
4892 	}
4893 	return (NULL);
4894 }
4895 
4896 uint32_t
4897 sctp_get_ifa_hash_val(struct sockaddr *addr)
4898 {
4899 	if (addr->sa_family == AF_INET) {
4900 		struct sockaddr_in *sin;
4901 
4902 		sin = (struct sockaddr_in *)addr;
4903 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4904 	} else if (addr->sa_family == AF_INET6) {
4905 		struct sockaddr_in6 *sin6;
4906 		uint32_t hash_of_addr;
4907 
4908 		sin6 = (struct sockaddr_in6 *)addr;
4909 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4910 		    sin6->sin6_addr.s6_addr32[1] +
4911 		    sin6->sin6_addr.s6_addr32[2] +
4912 		    sin6->sin6_addr.s6_addr32[3]);
4913 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4914 		return (hash_of_addr);
4915 	}
4916 	return (0);
4917 }
4918 
4919 struct sctp_ifa *
4920 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4921 {
4922 	struct sctp_ifa *sctp_ifap;
4923 	struct sctp_vrf *vrf;
4924 	struct sctp_ifalist *hash_head;
4925 	uint32_t hash_of_addr;
4926 
4927 	if (holds_lock == 0)
4928 		SCTP_IPI_ADDR_RLOCK();
4929 
4930 	vrf = sctp_find_vrf(vrf_id);
4931 	if (vrf == NULL) {
4932 stage_right:
4933 		if (holds_lock == 0)
4934 			SCTP_IPI_ADDR_RUNLOCK();
4935 		return (NULL);
4936 	}
4937 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4938 
4939 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4940 	if (hash_head == NULL) {
4941 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4942 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4943 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4944 		sctp_print_address(addr);
4945 		SCTP_PRINTF("No such bucket for address\n");
4946 		if (holds_lock == 0)
4947 			SCTP_IPI_ADDR_RUNLOCK();
4948 
4949 		return (NULL);
4950 	}
4951 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4952 		if (sctp_ifap == NULL) {
4953 #ifdef INVARIANTS
4954 			panic("Huh LIST_FOREACH corrupt");
4955 			goto stage_right;
4956 #else
4957 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4958 			goto stage_right;
4959 #endif
4960 		}
4961 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4962 			continue;
4963 		if (addr->sa_family == AF_INET) {
4964 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4965 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4966 				/* found him. */
4967 				if (holds_lock == 0)
4968 					SCTP_IPI_ADDR_RUNLOCK();
4969 				return (sctp_ifap);
4970 				break;
4971 			}
4972 		}
4973 #ifdef INET6
4974 		if (addr->sa_family == AF_INET6) {
4975 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4976 			    &sctp_ifap->address.sin6)) {
4977 				/* found him. */
4978 				if (holds_lock == 0)
4979 					SCTP_IPI_ADDR_RUNLOCK();
4980 				return (sctp_ifap);
4981 				break;
4982 			}
4983 		}
4984 #endif
4985 	}
4986 	if (holds_lock == 0)
4987 		SCTP_IPI_ADDR_RUNLOCK();
4988 	return (NULL);
4989 }
4990 
4991 static void
4992 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4993     uint32_t rwnd_req)
4994 {
4995 	/* User pulled some data, do we need a rwnd update? */
4996 	int r_unlocked = 0;
4997 	uint32_t dif, rwnd;
4998 	struct socket *so = NULL;
4999 
5000 	if (stcb == NULL)
5001 		return;
5002 
5003 	atomic_add_int(&stcb->asoc.refcnt, 1);
5004 
5005 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5006 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5007 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5008 		/* Pre-check If we are freeing no update */
5009 		goto no_lock;
5010 	}
5011 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5012 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5013 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5014 		goto out;
5015 	}
5016 	so = stcb->sctp_socket;
5017 	if (so == NULL) {
5018 		goto out;
5019 	}
5020 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5021 	/* Have you have freed enough to look */
5022 	*freed_so_far = 0;
5023 	/* Yep, its worth a look and the lock overhead */
5024 
5025 	/* Figure out what the rwnd would be */
5026 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5027 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5028 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5029 	} else {
5030 		dif = 0;
5031 	}
5032 	if (dif >= rwnd_req) {
5033 		if (hold_rlock) {
5034 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5035 			r_unlocked = 1;
5036 		}
5037 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5038 			/*
5039 			 * One last check before we allow the guy possibly
5040 			 * to get in. There is a race, where the guy has not
5041 			 * reached the gate. In that case
5042 			 */
5043 			goto out;
5044 		}
5045 		SCTP_TCB_LOCK(stcb);
5046 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5047 			/* No reports here */
5048 			SCTP_TCB_UNLOCK(stcb);
5049 			goto out;
5050 		}
5051 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5052 		/*
5053 		 * EY if nr_sacks used then send an nr-sack , a sack
5054 		 * otherwise
5055 		 */
5056 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
5057 			sctp_send_nr_sack(stcb);
5058 		else
5059 			sctp_send_sack(stcb);
5060 
5061 		sctp_chunk_output(stcb->sctp_ep, stcb,
5062 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5063 		/* make sure no timer is running */
5064 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5065 		SCTP_TCB_UNLOCK(stcb);
5066 	} else {
5067 		/* Update how much we have pending */
5068 		stcb->freed_by_sorcv_sincelast = dif;
5069 	}
5070 out:
5071 	if (so && r_unlocked && hold_rlock) {
5072 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5073 	}
5074 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5075 no_lock:
5076 	atomic_add_int(&stcb->asoc.refcnt, -1);
5077 	return;
5078 }
5079 
5080 int
5081 sctp_sorecvmsg(struct socket *so,
5082     struct uio *uio,
5083     struct mbuf **mp,
5084     struct sockaddr *from,
5085     int fromlen,
5086     int *msg_flags,
5087     struct sctp_sndrcvinfo *sinfo,
5088     int filling_sinfo)
5089 {
5090 	/*
5091 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5092 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5093 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5094 	 * On the way out we may send out any combination of:
5095 	 * MSG_NOTIFICATION MSG_EOR
5096 	 *
5097 	 */
5098 	struct sctp_inpcb *inp = NULL;
5099 	int my_len = 0;
5100 	int cp_len = 0, error = 0;
5101 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5102 	struct mbuf *m = NULL, *embuf = NULL;
5103 	struct sctp_tcb *stcb = NULL;
5104 	int wakeup_read_socket = 0;
5105 	int freecnt_applied = 0;
5106 	int out_flags = 0, in_flags = 0;
5107 	int block_allowed = 1;
5108 	uint32_t freed_so_far = 0;
5109 	uint32_t copied_so_far = 0;
5110 	int in_eeor_mode = 0;
5111 	int no_rcv_needed = 0;
5112 	uint32_t rwnd_req = 0;
5113 	int hold_sblock = 0;
5114 	int hold_rlock = 0;
5115 	int slen = 0;
5116 	uint32_t held_length = 0;
5117 	int sockbuf_lock = 0;
5118 
5119 	if (uio == NULL) {
5120 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5121 		return (EINVAL);
5122 	}
5123 	if (msg_flags) {
5124 		in_flags = *msg_flags;
5125 		if (in_flags & MSG_PEEK)
5126 			SCTP_STAT_INCR(sctps_read_peeks);
5127 	} else {
5128 		in_flags = 0;
5129 	}
5130 	slen = uio->uio_resid;
5131 
5132 	/* Pull in and set up our int flags */
5133 	if (in_flags & MSG_OOB) {
5134 		/* Out of band's NOT supported */
5135 		return (EOPNOTSUPP);
5136 	}
5137 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5138 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5139 		return (EINVAL);
5140 	}
5141 	if ((in_flags & (MSG_DONTWAIT
5142 	    | MSG_NBIO
5143 	    )) ||
5144 	    SCTP_SO_IS_NBIO(so)) {
5145 		block_allowed = 0;
5146 	}
5147 	/* setup the endpoint */
5148 	inp = (struct sctp_inpcb *)so->so_pcb;
5149 	if (inp == NULL) {
5150 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5151 		return (EFAULT);
5152 	}
5153 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5154 	/* Must be at least a MTU's worth */
5155 	if (rwnd_req < SCTP_MIN_RWND)
5156 		rwnd_req = SCTP_MIN_RWND;
5157 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5158 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5159 		sctp_misc_ints(SCTP_SORECV_ENTER,
5160 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5161 	}
5162 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5163 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5164 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5165 	}
5166 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5167 	sockbuf_lock = 1;
5168 	if (error) {
5169 		goto release_unlocked;
5170 	}
5171 restart:
5172 
5173 
5174 restart_nosblocks:
5175 	if (hold_sblock == 0) {
5176 		SOCKBUF_LOCK(&so->so_rcv);
5177 		hold_sblock = 1;
5178 	}
5179 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5180 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5181 		goto out;
5182 	}
5183 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5184 		if (so->so_error) {
5185 			error = so->so_error;
5186 			if ((in_flags & MSG_PEEK) == 0)
5187 				so->so_error = 0;
5188 			goto out;
5189 		} else {
5190 			if (so->so_rcv.sb_cc == 0) {
5191 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5192 				/* indicate EOF */
5193 				error = 0;
5194 				goto out;
5195 			}
5196 		}
5197 	}
5198 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5199 		/* we need to wait for data */
5200 		if ((so->so_rcv.sb_cc == 0) &&
5201 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5202 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5203 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5204 				/*
5205 				 * For active open side clear flags for
5206 				 * re-use passive open is blocked by
5207 				 * connect.
5208 				 */
5209 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5210 					/*
5211 					 * You were aborted, passive side
5212 					 * always hits here
5213 					 */
5214 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5215 					error = ECONNRESET;
5216 					/*
5217 					 * You get this once if you are
5218 					 * active open side
5219 					 */
5220 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5221 						/*
5222 						 * Remove flag if on the
5223 						 * active open side
5224 						 */
5225 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5226 					}
5227 				}
5228 				so->so_state &= ~(SS_ISCONNECTING |
5229 				    SS_ISDISCONNECTING |
5230 				    SS_ISCONFIRMING |
5231 				    SS_ISCONNECTED);
5232 				if (error == 0) {
5233 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5234 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5235 						error = ENOTCONN;
5236 					} else {
5237 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5238 					}
5239 				}
5240 				goto out;
5241 			}
5242 		}
5243 		error = sbwait(&so->so_rcv);
5244 		if (error) {
5245 			goto out;
5246 		}
5247 		held_length = 0;
5248 		goto restart_nosblocks;
5249 	} else if (so->so_rcv.sb_cc == 0) {
5250 		if (so->so_error) {
5251 			error = so->so_error;
5252 			if ((in_flags & MSG_PEEK) == 0)
5253 				so->so_error = 0;
5254 		} else {
5255 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5256 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5257 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5258 					/*
5259 					 * For active open side clear flags
5260 					 * for re-use passive open is
5261 					 * blocked by connect.
5262 					 */
5263 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5264 						/*
5265 						 * You were aborted, passive
5266 						 * side always hits here
5267 						 */
5268 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5269 						error = ECONNRESET;
5270 						/*
5271 						 * You get this once if you
5272 						 * are active open side
5273 						 */
5274 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5275 							/*
5276 							 * Remove flag if on
5277 							 * the active open
5278 							 * side
5279 							 */
5280 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5281 						}
5282 					}
5283 					so->so_state &= ~(SS_ISCONNECTING |
5284 					    SS_ISDISCONNECTING |
5285 					    SS_ISCONFIRMING |
5286 					    SS_ISCONNECTED);
5287 					if (error == 0) {
5288 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5289 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5290 							error = ENOTCONN;
5291 						} else {
5292 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5293 						}
5294 					}
5295 					goto out;
5296 				}
5297 			}
5298 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5299 			error = EWOULDBLOCK;
5300 		}
5301 		goto out;
5302 	}
5303 	if (hold_sblock == 1) {
5304 		SOCKBUF_UNLOCK(&so->so_rcv);
5305 		hold_sblock = 0;
5306 	}
5307 	/* we possibly have data we can read */
5308 	/* sa_ignore FREED_MEMORY */
5309 	control = TAILQ_FIRST(&inp->read_queue);
5310 	if (control == NULL) {
5311 		/*
5312 		 * This could be happening since the appender did the
5313 		 * increment but as not yet did the tailq insert onto the
5314 		 * read_queue
5315 		 */
5316 		if (hold_rlock == 0) {
5317 			SCTP_INP_READ_LOCK(inp);
5318 			hold_rlock = 1;
5319 		}
5320 		control = TAILQ_FIRST(&inp->read_queue);
5321 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5322 #ifdef INVARIANTS
5323 			panic("Huh, its non zero and nothing on control?");
5324 #endif
5325 			so->so_rcv.sb_cc = 0;
5326 		}
5327 		SCTP_INP_READ_UNLOCK(inp);
5328 		hold_rlock = 0;
5329 		goto restart;
5330 	}
5331 	if ((control->length == 0) &&
5332 	    (control->do_not_ref_stcb)) {
5333 		/*
5334 		 * Clean up code for freeing assoc that left behind a
5335 		 * pdapi.. maybe a peer in EEOR that just closed after
5336 		 * sending and never indicated a EOR.
5337 		 */
5338 		if (hold_rlock == 0) {
5339 			hold_rlock = 1;
5340 			SCTP_INP_READ_LOCK(inp);
5341 		}
5342 		control->held_length = 0;
5343 		if (control->data) {
5344 			/* Hmm there is data here .. fix */
5345 			struct mbuf *m_tmp;
5346 			int cnt = 0;
5347 
5348 			m_tmp = control->data;
5349 			while (m_tmp) {
5350 				cnt += SCTP_BUF_LEN(m_tmp);
5351 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5352 					control->tail_mbuf = m_tmp;
5353 					control->end_added = 1;
5354 				}
5355 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5356 			}
5357 			control->length = cnt;
5358 		} else {
5359 			/* remove it */
5360 			TAILQ_REMOVE(&inp->read_queue, control, next);
5361 			/* Add back any hiddend data */
5362 			sctp_free_remote_addr(control->whoFrom);
5363 			sctp_free_a_readq(stcb, control);
5364 		}
5365 		if (hold_rlock) {
5366 			hold_rlock = 0;
5367 			SCTP_INP_READ_UNLOCK(inp);
5368 		}
5369 		goto restart;
5370 	}
5371 	if ((control->length == 0) &&
5372 	    (control->end_added == 1)) {
5373 		/*
5374 		 * Do we also need to check for (control->pdapi_aborted ==
5375 		 * 1)?
5376 		 */
5377 		if (hold_rlock == 0) {
5378 			hold_rlock = 1;
5379 			SCTP_INP_READ_LOCK(inp);
5380 		}
5381 		TAILQ_REMOVE(&inp->read_queue, control, next);
5382 		if (control->data) {
5383 #ifdef INVARIANTS
5384 			panic("control->data not null but control->length == 0");
5385 #else
5386 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5387 			sctp_m_freem(control->data);
5388 			control->data = NULL;
5389 #endif
5390 		}
5391 		if (control->aux_data) {
5392 			sctp_m_free(control->aux_data);
5393 			control->aux_data = NULL;
5394 		}
5395 		sctp_free_remote_addr(control->whoFrom);
5396 		sctp_free_a_readq(stcb, control);
5397 		if (hold_rlock) {
5398 			hold_rlock = 0;
5399 			SCTP_INP_READ_UNLOCK(inp);
5400 		}
5401 		goto restart;
5402 	}
5403 	if (control->length == 0) {
5404 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5405 		    (filling_sinfo)) {
5406 			/* find a more suitable one then this */
5407 			ctl = TAILQ_NEXT(control, next);
5408 			while (ctl) {
5409 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5410 				    (ctl->some_taken ||
5411 				    (ctl->spec_flags & M_NOTIFICATION) ||
5412 				    ((ctl->do_not_ref_stcb == 0) &&
5413 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5414 				    ) {
5415 					/*-
5416 					 * If we have a different TCB next, and there is data
5417 					 * present. If we have already taken some (pdapi), OR we can
5418 					 * ref the tcb and no delivery as started on this stream, we
5419 					 * take it. Note we allow a notification on a different
5420 					 * assoc to be delivered..
5421 					 */
5422 					control = ctl;
5423 					goto found_one;
5424 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5425 					    (ctl->length) &&
5426 					    ((ctl->some_taken) ||
5427 					    ((ctl->do_not_ref_stcb == 0) &&
5428 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5429 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5430 				    ) {
5431 					/*-
5432 					 * If we have the same tcb, and there is data present, and we
5433 					 * have the strm interleave feature present. Then if we have
5434 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5435 					 * not started a delivery for this stream, we can take it.
5436 					 * Note we do NOT allow a notificaiton on the same assoc to
5437 					 * be delivered.
5438 					 */
5439 					control = ctl;
5440 					goto found_one;
5441 				}
5442 				ctl = TAILQ_NEXT(ctl, next);
5443 			}
5444 		}
5445 		/*
5446 		 * if we reach here, not suitable replacement is available
5447 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5448 		 * into the our held count, and its time to sleep again.
5449 		 */
5450 		held_length = so->so_rcv.sb_cc;
5451 		control->held_length = so->so_rcv.sb_cc;
5452 		goto restart;
5453 	}
5454 	/* Clear the held length since there is something to read */
5455 	control->held_length = 0;
5456 	if (hold_rlock) {
5457 		SCTP_INP_READ_UNLOCK(inp);
5458 		hold_rlock = 0;
5459 	}
5460 found_one:
5461 	/*
5462 	 * If we reach here, control has a some data for us to read off.
5463 	 * Note that stcb COULD be NULL.
5464 	 */
5465 	control->some_taken++;
5466 	if (hold_sblock) {
5467 		SOCKBUF_UNLOCK(&so->so_rcv);
5468 		hold_sblock = 0;
5469 	}
5470 	stcb = control->stcb;
5471 	if (stcb) {
5472 		if ((control->do_not_ref_stcb == 0) &&
5473 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5474 			if (freecnt_applied == 0)
5475 				stcb = NULL;
5476 		} else if (control->do_not_ref_stcb == 0) {
5477 			/* you can't free it on me please */
5478 			/*
5479 			 * The lock on the socket buffer protects us so the
5480 			 * free code will stop. But since we used the
5481 			 * socketbuf lock and the sender uses the tcb_lock
5482 			 * to increment, we need to use the atomic add to
5483 			 * the refcnt
5484 			 */
5485 			if (freecnt_applied) {
5486 #ifdef INVARIANTS
5487 				panic("refcnt already incremented");
5488 #else
5489 				printf("refcnt already incremented?\n");
5490 #endif
5491 			} else {
5492 				atomic_add_int(&stcb->asoc.refcnt, 1);
5493 				freecnt_applied = 1;
5494 			}
5495 			/*
5496 			 * Setup to remember how much we have not yet told
5497 			 * the peer our rwnd has opened up. Note we grab the
5498 			 * value from the tcb from last time. Note too that
5499 			 * sack sending clears this when a sack is sent,
5500 			 * which is fine. Once we hit the rwnd_req, we then
5501 			 * will go to the sctp_user_rcvd() that will not
5502 			 * lock until it KNOWs it MUST send a WUP-SACK.
5503 			 */
5504 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5505 			stcb->freed_by_sorcv_sincelast = 0;
5506 		}
5507 	}
5508 	if (stcb &&
5509 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5510 	    control->do_not_ref_stcb == 0) {
5511 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5512 	}
5513 	/* First lets get off the sinfo and sockaddr info */
5514 	if ((sinfo) && filling_sinfo) {
5515 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5516 		nxt = TAILQ_NEXT(control, next);
5517 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5518 			struct sctp_extrcvinfo *s_extra;
5519 
5520 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5521 			if ((nxt) &&
5522 			    (nxt->length)) {
5523 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5524 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5525 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5526 				}
5527 				if (nxt->spec_flags & M_NOTIFICATION) {
5528 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5529 				}
5530 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5531 				s_extra->sreinfo_next_length = nxt->length;
5532 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5533 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5534 				if (nxt->tail_mbuf != NULL) {
5535 					if (nxt->end_added) {
5536 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5537 					}
5538 				}
5539 			} else {
5540 				/*
5541 				 * we explicitly 0 this, since the memcpy
5542 				 * got some other things beyond the older
5543 				 * sinfo_ that is on the control's structure
5544 				 * :-D
5545 				 */
5546 				nxt = NULL;
5547 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5548 				s_extra->sreinfo_next_aid = 0;
5549 				s_extra->sreinfo_next_length = 0;
5550 				s_extra->sreinfo_next_ppid = 0;
5551 				s_extra->sreinfo_next_stream = 0;
5552 			}
5553 		}
5554 		/*
5555 		 * update off the real current cum-ack, if we have an stcb.
5556 		 */
5557 		if ((control->do_not_ref_stcb == 0) && stcb)
5558 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5559 		/*
5560 		 * mask off the high bits, we keep the actual chunk bits in
5561 		 * there.
5562 		 */
5563 		sinfo->sinfo_flags &= 0x00ff;
5564 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5565 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5566 		}
5567 	}
5568 #ifdef SCTP_ASOCLOG_OF_TSNS
5569 	{
5570 		int index, newindex;
5571 		struct sctp_pcbtsn_rlog *entry;
5572 
5573 		do {
5574 			index = inp->readlog_index;
5575 			newindex = index + 1;
5576 			if (newindex >= SCTP_READ_LOG_SIZE) {
5577 				newindex = 0;
5578 			}
5579 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5580 		entry = &inp->readlog[index];
5581 		entry->vtag = control->sinfo_assoc_id;
5582 		entry->strm = control->sinfo_stream;
5583 		entry->seq = control->sinfo_ssn;
5584 		entry->sz = control->length;
5585 		entry->flgs = control->sinfo_flags;
5586 	}
5587 #endif
5588 	if (fromlen && from) {
5589 		struct sockaddr *to;
5590 
5591 #ifdef INET
5592 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5593 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5594 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5595 #else
5596 		/* No AF_INET use AF_INET6 */
5597 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5598 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5599 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5600 #endif
5601 
5602 		to = from;
5603 #if defined(INET) && defined(INET6)
5604 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5605 		    (to->sa_family == AF_INET) &&
5606 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5607 			struct sockaddr_in *sin;
5608 			struct sockaddr_in6 sin6;
5609 
5610 			sin = (struct sockaddr_in *)to;
5611 			bzero(&sin6, sizeof(sin6));
5612 			sin6.sin6_family = AF_INET6;
5613 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5614 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5615 			bcopy(&sin->sin_addr,
5616 			    &sin6.sin6_addr.s6_addr32[3],
5617 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5618 			sin6.sin6_port = sin->sin_port;
5619 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5620 		}
5621 #endif
5622 #if defined(INET6)
5623 		{
5624 			struct sockaddr_in6 lsa6, *to6;
5625 
5626 			to6 = (struct sockaddr_in6 *)to;
5627 			sctp_recover_scope_mac(to6, (&lsa6));
5628 		}
5629 #endif
5630 	}
5631 	/* now copy out what data we can */
5632 	if (mp == NULL) {
5633 		/* copy out each mbuf in the chain up to length */
5634 get_more_data:
5635 		m = control->data;
5636 		while (m) {
5637 			/* Move out all we can */
5638 			cp_len = (int)uio->uio_resid;
5639 			my_len = (int)SCTP_BUF_LEN(m);
5640 			if (cp_len > my_len) {
5641 				/* not enough in this buf */
5642 				cp_len = my_len;
5643 			}
5644 			if (hold_rlock) {
5645 				SCTP_INP_READ_UNLOCK(inp);
5646 				hold_rlock = 0;
5647 			}
5648 			if (cp_len > 0)
5649 				error = uiomove(mtod(m, char *), cp_len, uio);
5650 			/* re-read */
5651 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5652 				goto release;
5653 			}
5654 			if ((control->do_not_ref_stcb == 0) && stcb &&
5655 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5656 				no_rcv_needed = 1;
5657 			}
5658 			if (error) {
5659 				/* error we are out of here */
5660 				goto release;
5661 			}
5662 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5663 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5664 			    ((control->end_added == 0) ||
5665 			    (control->end_added &&
5666 			    (TAILQ_NEXT(control, next) == NULL)))
5667 			    ) {
5668 				SCTP_INP_READ_LOCK(inp);
5669 				hold_rlock = 1;
5670 			}
5671 			if (cp_len == SCTP_BUF_LEN(m)) {
5672 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5673 				    (control->end_added)) {
5674 					out_flags |= MSG_EOR;
5675 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5676 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5677 				}
5678 				if (control->spec_flags & M_NOTIFICATION) {
5679 					out_flags |= MSG_NOTIFICATION;
5680 				}
5681 				/* we ate up the mbuf */
5682 				if (in_flags & MSG_PEEK) {
5683 					/* just looking */
5684 					m = SCTP_BUF_NEXT(m);
5685 					copied_so_far += cp_len;
5686 				} else {
5687 					/* dispose of the mbuf */
5688 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5689 						sctp_sblog(&so->so_rcv,
5690 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5691 					}
5692 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5693 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5694 						sctp_sblog(&so->so_rcv,
5695 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5696 					}
5697 					embuf = m;
5698 					copied_so_far += cp_len;
5699 					freed_so_far += cp_len;
5700 					freed_so_far += MSIZE;
5701 					atomic_subtract_int(&control->length, cp_len);
5702 					control->data = sctp_m_free(m);
5703 					m = control->data;
5704 					/*
5705 					 * been through it all, must hold sb
5706 					 * lock ok to null tail
5707 					 */
5708 					if (control->data == NULL) {
5709 #ifdef INVARIANTS
5710 						if ((control->end_added == 0) ||
5711 						    (TAILQ_NEXT(control, next) == NULL)) {
5712 							/*
5713 							 * If the end is not
5714 							 * added, OR the
5715 							 * next is NOT null
5716 							 * we MUST have the
5717 							 * lock.
5718 							 */
5719 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5720 								panic("Hmm we don't own the lock?");
5721 							}
5722 						}
5723 #endif
5724 						control->tail_mbuf = NULL;
5725 #ifdef INVARIANTS
5726 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5727 							panic("end_added, nothing left and no MSG_EOR");
5728 						}
5729 #endif
5730 					}
5731 				}
5732 			} else {
5733 				/* Do we need to trim the mbuf? */
5734 				if (control->spec_flags & M_NOTIFICATION) {
5735 					out_flags |= MSG_NOTIFICATION;
5736 				}
5737 				if ((in_flags & MSG_PEEK) == 0) {
5738 					SCTP_BUF_RESV_UF(m, cp_len);
5739 					SCTP_BUF_LEN(m) -= cp_len;
5740 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5741 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5742 					}
5743 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5744 					if ((control->do_not_ref_stcb == 0) &&
5745 					    stcb) {
5746 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5747 					}
5748 					copied_so_far += cp_len;
5749 					embuf = m;
5750 					freed_so_far += cp_len;
5751 					freed_so_far += MSIZE;
5752 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5753 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5754 						    SCTP_LOG_SBRESULT, 0);
5755 					}
5756 					atomic_subtract_int(&control->length, cp_len);
5757 				} else {
5758 					copied_so_far += cp_len;
5759 				}
5760 			}
5761 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5762 				break;
5763 			}
5764 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5765 			    (control->do_not_ref_stcb == 0) &&
5766 			    (freed_so_far >= rwnd_req)) {
5767 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5768 			}
5769 		}		/* end while(m) */
5770 		/*
5771 		 * At this point we have looked at it all and we either have
5772 		 * a MSG_EOR/or read all the user wants... <OR>
5773 		 * control->length == 0.
5774 		 */
5775 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5776 			/* we are done with this control */
5777 			if (control->length == 0) {
5778 				if (control->data) {
5779 #ifdef INVARIANTS
5780 					panic("control->data not null at read eor?");
5781 #else
5782 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5783 					sctp_m_freem(control->data);
5784 					control->data = NULL;
5785 #endif
5786 				}
5787 		done_with_control:
5788 				if (TAILQ_NEXT(control, next) == NULL) {
5789 					/*
5790 					 * If we don't have a next we need a
5791 					 * lock, if there is a next
5792 					 * interrupt is filling ahead of us
5793 					 * and we don't need a lock to
5794 					 * remove this guy (which is the
5795 					 * head of the queue).
5796 					 */
5797 					if (hold_rlock == 0) {
5798 						SCTP_INP_READ_LOCK(inp);
5799 						hold_rlock = 1;
5800 					}
5801 				}
5802 				TAILQ_REMOVE(&inp->read_queue, control, next);
5803 				/* Add back any hiddend data */
5804 				if (control->held_length) {
5805 					held_length = 0;
5806 					control->held_length = 0;
5807 					wakeup_read_socket = 1;
5808 				}
5809 				if (control->aux_data) {
5810 					sctp_m_free(control->aux_data);
5811 					control->aux_data = NULL;
5812 				}
5813 				no_rcv_needed = control->do_not_ref_stcb;
5814 				sctp_free_remote_addr(control->whoFrom);
5815 				control->data = NULL;
5816 				sctp_free_a_readq(stcb, control);
5817 				control = NULL;
5818 				if ((freed_so_far >= rwnd_req) &&
5819 				    (no_rcv_needed == 0))
5820 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5821 
5822 			} else {
5823 				/*
5824 				 * The user did not read all of this
5825 				 * message, turn off the returned MSG_EOR
5826 				 * since we are leaving more behind on the
5827 				 * control to read.
5828 				 */
5829 #ifdef INVARIANTS
5830 				if (control->end_added &&
5831 				    (control->data == NULL) &&
5832 				    (control->tail_mbuf == NULL)) {
5833 					panic("Gak, control->length is corrupt?");
5834 				}
5835 #endif
5836 				no_rcv_needed = control->do_not_ref_stcb;
5837 				out_flags &= ~MSG_EOR;
5838 			}
5839 		}
5840 		if (out_flags & MSG_EOR) {
5841 			goto release;
5842 		}
5843 		if ((uio->uio_resid == 0) ||
5844 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5845 		    ) {
5846 			goto release;
5847 		}
5848 		/*
5849 		 * If I hit here the receiver wants more and this message is
5850 		 * NOT done (pd-api). So two questions. Can we block? if not
5851 		 * we are done. Did the user NOT set MSG_WAITALL?
5852 		 */
5853 		if (block_allowed == 0) {
5854 			goto release;
5855 		}
5856 		/*
5857 		 * We need to wait for more data a few things: - We don't
5858 		 * sbunlock() so we don't get someone else reading. - We
5859 		 * must be sure to account for the case where what is added
5860 		 * is NOT to our control when we wakeup.
5861 		 */
5862 
5863 		/*
5864 		 * Do we need to tell the transport a rwnd update might be
5865 		 * needed before we go to sleep?
5866 		 */
5867 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5868 		    ((freed_so_far >= rwnd_req) &&
5869 		    (control->do_not_ref_stcb == 0) &&
5870 		    (no_rcv_needed == 0))) {
5871 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5872 		}
5873 wait_some_more:
5874 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5875 			goto release;
5876 		}
5877 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5878 			goto release;
5879 
5880 		if (hold_rlock == 1) {
5881 			SCTP_INP_READ_UNLOCK(inp);
5882 			hold_rlock = 0;
5883 		}
5884 		if (hold_sblock == 0) {
5885 			SOCKBUF_LOCK(&so->so_rcv);
5886 			hold_sblock = 1;
5887 		}
5888 		if ((copied_so_far) && (control->length == 0) &&
5889 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5890 		    ) {
5891 			goto release;
5892 		}
5893 		if (so->so_rcv.sb_cc <= control->held_length) {
5894 			error = sbwait(&so->so_rcv);
5895 			if (error) {
5896 				goto release;
5897 			}
5898 			control->held_length = 0;
5899 		}
5900 		if (hold_sblock) {
5901 			SOCKBUF_UNLOCK(&so->so_rcv);
5902 			hold_sblock = 0;
5903 		}
5904 		if (control->length == 0) {
5905 			/* still nothing here */
5906 			if (control->end_added == 1) {
5907 				/* he aborted, or is done i.e.did a shutdown */
5908 				out_flags |= MSG_EOR;
5909 				if (control->pdapi_aborted) {
5910 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5911 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5912 
5913 					out_flags |= MSG_TRUNC;
5914 				} else {
5915 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5916 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5917 				}
5918 				goto done_with_control;
5919 			}
5920 			if (so->so_rcv.sb_cc > held_length) {
5921 				control->held_length = so->so_rcv.sb_cc;
5922 				held_length = 0;
5923 			}
5924 			goto wait_some_more;
5925 		} else if (control->data == NULL) {
5926 			/*
5927 			 * we must re-sync since data is probably being
5928 			 * added
5929 			 */
5930 			SCTP_INP_READ_LOCK(inp);
5931 			if ((control->length > 0) && (control->data == NULL)) {
5932 				/*
5933 				 * big trouble.. we have the lock and its
5934 				 * corrupt?
5935 				 */
5936 #ifdef INVARIANTS
5937 				panic("Impossible data==NULL length !=0");
5938 #endif
5939 				out_flags |= MSG_EOR;
5940 				out_flags |= MSG_TRUNC;
5941 				control->length = 0;
5942 				SCTP_INP_READ_UNLOCK(inp);
5943 				goto done_with_control;
5944 			}
5945 			SCTP_INP_READ_UNLOCK(inp);
5946 			/* We will fall around to get more data */
5947 		}
5948 		goto get_more_data;
5949 	} else {
5950 		/*-
5951 		 * Give caller back the mbuf chain,
5952 		 * store in uio_resid the length
5953 		 */
5954 		wakeup_read_socket = 0;
5955 		if ((control->end_added == 0) ||
5956 		    (TAILQ_NEXT(control, next) == NULL)) {
5957 			/* Need to get rlock */
5958 			if (hold_rlock == 0) {
5959 				SCTP_INP_READ_LOCK(inp);
5960 				hold_rlock = 1;
5961 			}
5962 		}
5963 		if (control->end_added) {
5964 			out_flags |= MSG_EOR;
5965 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5966 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5967 		}
5968 		if (control->spec_flags & M_NOTIFICATION) {
5969 			out_flags |= MSG_NOTIFICATION;
5970 		}
5971 		uio->uio_resid = control->length;
5972 		*mp = control->data;
5973 		m = control->data;
5974 		while (m) {
5975 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5976 				sctp_sblog(&so->so_rcv,
5977 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5978 			}
5979 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5980 			freed_so_far += SCTP_BUF_LEN(m);
5981 			freed_so_far += MSIZE;
5982 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5983 				sctp_sblog(&so->so_rcv,
5984 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5985 			}
5986 			m = SCTP_BUF_NEXT(m);
5987 		}
5988 		control->data = control->tail_mbuf = NULL;
5989 		control->length = 0;
5990 		if (out_flags & MSG_EOR) {
5991 			/* Done with this control */
5992 			goto done_with_control;
5993 		}
5994 	}
5995 release:
5996 	if (hold_rlock == 1) {
5997 		SCTP_INP_READ_UNLOCK(inp);
5998 		hold_rlock = 0;
5999 	}
6000 	if (hold_sblock == 1) {
6001 		SOCKBUF_UNLOCK(&so->so_rcv);
6002 		hold_sblock = 0;
6003 	}
6004 	sbunlock(&so->so_rcv);
6005 	sockbuf_lock = 0;
6006 
6007 release_unlocked:
6008 	if (hold_sblock) {
6009 		SOCKBUF_UNLOCK(&so->so_rcv);
6010 		hold_sblock = 0;
6011 	}
6012 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6013 		if ((freed_so_far >= rwnd_req) &&
6014 		    (control && (control->do_not_ref_stcb == 0)) &&
6015 		    (no_rcv_needed == 0))
6016 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6017 	}
6018 out:
6019 	if (msg_flags) {
6020 		*msg_flags = out_flags;
6021 	}
6022 	if (((out_flags & MSG_EOR) == 0) &&
6023 	    ((in_flags & MSG_PEEK) == 0) &&
6024 	    (sinfo) &&
6025 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
6026 		struct sctp_extrcvinfo *s_extra;
6027 
6028 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6029 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6030 	}
6031 	if (hold_rlock == 1) {
6032 		SCTP_INP_READ_UNLOCK(inp);
6033 		hold_rlock = 0;
6034 	}
6035 	if (hold_sblock) {
6036 		SOCKBUF_UNLOCK(&so->so_rcv);
6037 		hold_sblock = 0;
6038 	}
6039 	if (sockbuf_lock) {
6040 		sbunlock(&so->so_rcv);
6041 	}
6042 	if (freecnt_applied) {
6043 		/*
6044 		 * The lock on the socket buffer protects us so the free
6045 		 * code will stop. But since we used the socketbuf lock and
6046 		 * the sender uses the tcb_lock to increment, we need to use
6047 		 * the atomic add to the refcnt.
6048 		 */
6049 		if (stcb == NULL) {
6050 #ifdef INVARIANTS
6051 			panic("stcb for refcnt has gone NULL?");
6052 			goto stage_left;
6053 #else
6054 			goto stage_left;
6055 #endif
6056 		}
6057 		atomic_add_int(&stcb->asoc.refcnt, -1);
6058 		freecnt_applied = 0;
6059 		/* Save the value back for next time */
6060 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6061 	}
6062 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6063 		if (stcb) {
6064 			sctp_misc_ints(SCTP_SORECV_DONE,
6065 			    freed_so_far,
6066 			    ((uio) ? (slen - uio->uio_resid) : slen),
6067 			    stcb->asoc.my_rwnd,
6068 			    so->so_rcv.sb_cc);
6069 		} else {
6070 			sctp_misc_ints(SCTP_SORECV_DONE,
6071 			    freed_so_far,
6072 			    ((uio) ? (slen - uio->uio_resid) : slen),
6073 			    0,
6074 			    so->so_rcv.sb_cc);
6075 		}
6076 	}
6077 stage_left:
6078 	if (wakeup_read_socket) {
6079 		sctp_sorwakeup(inp, so);
6080 	}
6081 	return (error);
6082 }
6083 
6084 
6085 #ifdef SCTP_MBUF_LOGGING
6086 struct mbuf *
6087 sctp_m_free(struct mbuf *m)
6088 {
6089 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6090 		if (SCTP_BUF_IS_EXTENDED(m)) {
6091 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6092 		}
6093 	}
6094 	return (m_free(m));
6095 }
6096 
6097 void
6098 sctp_m_freem(struct mbuf *mb)
6099 {
6100 	while (mb != NULL)
6101 		mb = sctp_m_free(mb);
6102 }
6103 
6104 #endif
6105 
6106 int
6107 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6108 {
6109 	/*
6110 	 * Given a local address. For all associations that holds the
6111 	 * address, request a peer-set-primary.
6112 	 */
6113 	struct sctp_ifa *ifa;
6114 	struct sctp_laddr *wi;
6115 
6116 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6117 	if (ifa == NULL) {
6118 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6119 		return (EADDRNOTAVAIL);
6120 	}
6121 	/*
6122 	 * Now that we have the ifa we must awaken the iterator with this
6123 	 * message.
6124 	 */
6125 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6126 	if (wi == NULL) {
6127 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6128 		return (ENOMEM);
6129 	}
6130 	/* Now incr the count and int wi structure */
6131 	SCTP_INCR_LADDR_COUNT();
6132 	bzero(wi, sizeof(*wi));
6133 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6134 	wi->ifa = ifa;
6135 	wi->action = SCTP_SET_PRIM_ADDR;
6136 	atomic_add_int(&ifa->refcount, 1);
6137 
6138 	/* Now add it to the work queue */
6139 	SCTP_IPI_ITERATOR_WQ_LOCK();
6140 	/*
6141 	 * Should this really be a tailq? As it is we will process the
6142 	 * newest first :-0
6143 	 */
6144 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6145 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6146 	    (struct sctp_inpcb *)NULL,
6147 	    (struct sctp_tcb *)NULL,
6148 	    (struct sctp_nets *)NULL);
6149 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
6150 	return (0);
6151 }
6152 
6153 
6154 int
6155 sctp_soreceive(struct socket *so,
6156     struct sockaddr **psa,
6157     struct uio *uio,
6158     struct mbuf **mp0,
6159     struct mbuf **controlp,
6160     int *flagsp)
6161 {
6162 	int error, fromlen;
6163 	uint8_t sockbuf[256];
6164 	struct sockaddr *from;
6165 	struct sctp_extrcvinfo sinfo;
6166 	int filling_sinfo = 1;
6167 	struct sctp_inpcb *inp;
6168 
6169 	inp = (struct sctp_inpcb *)so->so_pcb;
6170 	/* pickup the assoc we are reading from */
6171 	if (inp == NULL) {
6172 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6173 		return (EINVAL);
6174 	}
6175 	if ((sctp_is_feature_off(inp,
6176 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6177 	    (controlp == NULL)) {
6178 		/* user does not want the sndrcv ctl */
6179 		filling_sinfo = 0;
6180 	}
6181 	if (psa) {
6182 		from = (struct sockaddr *)sockbuf;
6183 		fromlen = sizeof(sockbuf);
6184 		from->sa_len = 0;
6185 	} else {
6186 		from = NULL;
6187 		fromlen = 0;
6188 	}
6189 
6190 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6191 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6192 	if ((controlp) && (filling_sinfo)) {
6193 		/* copy back the sinfo in a CMSG format */
6194 		if (filling_sinfo)
6195 			*controlp = sctp_build_ctl_nchunk(inp,
6196 			    (struct sctp_sndrcvinfo *)&sinfo);
6197 		else
6198 			*controlp = NULL;
6199 	}
6200 	if (psa) {
6201 		/* copy back the address info */
6202 		if (from && from->sa_len) {
6203 			*psa = sodupsockaddr(from, M_NOWAIT);
6204 		} else {
6205 			*psa = NULL;
6206 		}
6207 	}
6208 	return (error);
6209 }
6210 
6211 
6212 int
6213 sctp_l_soreceive(struct socket *so,
6214     struct sockaddr **name,
6215     struct uio *uio,
6216     char **controlp,
6217     int *controllen,
6218     int *flag)
6219 {
6220 	int error, fromlen;
6221 	uint8_t sockbuf[256];
6222 	struct sockaddr *from;
6223 	struct sctp_extrcvinfo sinfo;
6224 	int filling_sinfo = 1;
6225 	struct sctp_inpcb *inp;
6226 
6227 	inp = (struct sctp_inpcb *)so->so_pcb;
6228 	/* pickup the assoc we are reading from */
6229 	if (inp == NULL) {
6230 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6231 		return (EINVAL);
6232 	}
6233 	if ((sctp_is_feature_off(inp,
6234 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6235 	    (controlp == NULL)) {
6236 		/* user does not want the sndrcv ctl */
6237 		filling_sinfo = 0;
6238 	}
6239 	if (name) {
6240 		from = (struct sockaddr *)sockbuf;
6241 		fromlen = sizeof(sockbuf);
6242 		from->sa_len = 0;
6243 	} else {
6244 		from = NULL;
6245 		fromlen = 0;
6246 	}
6247 
6248 	error = sctp_sorecvmsg(so, uio,
6249 	    (struct mbuf **)NULL,
6250 	    from, fromlen, flag,
6251 	    (struct sctp_sndrcvinfo *)&sinfo,
6252 	    filling_sinfo);
6253 	if ((controlp) && (filling_sinfo)) {
6254 		/*
6255 		 * copy back the sinfo in a CMSG format note that the caller
6256 		 * has reponsibility for freeing the memory.
6257 		 */
6258 		if (filling_sinfo)
6259 			*controlp = sctp_build_ctl_cchunk(inp,
6260 			    controllen,
6261 			    (struct sctp_sndrcvinfo *)&sinfo);
6262 	}
6263 	if (name) {
6264 		/* copy back the address info */
6265 		if (from && from->sa_len) {
6266 			*name = sodupsockaddr(from, M_WAIT);
6267 		} else {
6268 			*name = NULL;
6269 		}
6270 	}
6271 	return (error);
6272 }
6273 
6274 
6275 
6276 
6277 
6278 
6279 
6280 int
6281 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6282     int totaddr, int *error)
6283 {
6284 	int added = 0;
6285 	int i;
6286 	struct sctp_inpcb *inp;
6287 	struct sockaddr *sa;
6288 	size_t incr = 0;
6289 
6290 	sa = addr;
6291 	inp = stcb->sctp_ep;
6292 	*error = 0;
6293 	for (i = 0; i < totaddr; i++) {
6294 		if (sa->sa_family == AF_INET) {
6295 			incr = sizeof(struct sockaddr_in);
6296 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6297 				/* assoc gone no un-lock */
6298 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6299 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6300 				*error = ENOBUFS;
6301 				goto out_now;
6302 			}
6303 			added++;
6304 		} else if (sa->sa_family == AF_INET6) {
6305 			incr = sizeof(struct sockaddr_in6);
6306 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6307 				/* assoc gone no un-lock */
6308 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6309 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6310 				*error = ENOBUFS;
6311 				goto out_now;
6312 			}
6313 			added++;
6314 		}
6315 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6316 	}
6317 out_now:
6318 	return (added);
6319 }
6320 
6321 struct sctp_tcb *
6322 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6323     int *totaddr, int *num_v4, int *num_v6, int *error,
6324     int limit, int *bad_addr)
6325 {
6326 	struct sockaddr *sa;
6327 	struct sctp_tcb *stcb = NULL;
6328 	size_t incr, at, i;
6329 
6330 	at = incr = 0;
6331 	sa = addr;
6332 	*error = *num_v6 = *num_v4 = 0;
6333 	/* account and validate addresses */
6334 	for (i = 0; i < (size_t)*totaddr; i++) {
6335 		if (sa->sa_family == AF_INET) {
6336 			(*num_v4) += 1;
6337 			incr = sizeof(struct sockaddr_in);
6338 			if (sa->sa_len != incr) {
6339 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6340 				*error = EINVAL;
6341 				*bad_addr = 1;
6342 				return (NULL);
6343 			}
6344 		} else if (sa->sa_family == AF_INET6) {
6345 			struct sockaddr_in6 *sin6;
6346 
6347 			sin6 = (struct sockaddr_in6 *)sa;
6348 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6349 				/* Must be non-mapped for connectx */
6350 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6351 				*error = EINVAL;
6352 				*bad_addr = 1;
6353 				return (NULL);
6354 			}
6355 			(*num_v6) += 1;
6356 			incr = sizeof(struct sockaddr_in6);
6357 			if (sa->sa_len != incr) {
6358 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6359 				*error = EINVAL;
6360 				*bad_addr = 1;
6361 				return (NULL);
6362 			}
6363 		} else {
6364 			*totaddr = i;
6365 			/* we are done */
6366 			break;
6367 		}
6368 		SCTP_INP_INCR_REF(inp);
6369 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6370 		if (stcb != NULL) {
6371 			/* Already have or am bring up an association */
6372 			return (stcb);
6373 		} else {
6374 			SCTP_INP_DECR_REF(inp);
6375 		}
6376 		if ((at + incr) > (size_t)limit) {
6377 			*totaddr = i;
6378 			break;
6379 		}
6380 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6381 	}
6382 	return ((struct sctp_tcb *)NULL);
6383 }
6384 
6385 /*
6386  * sctp_bindx(ADD) for one address.
6387  * assumes all arguments are valid/checked by caller.
6388  */
6389 void
6390 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6391     struct sockaddr *sa, sctp_assoc_t assoc_id,
6392     uint32_t vrf_id, int *error, void *p)
6393 {
6394 	struct sockaddr *addr_touse;
6395 
6396 #ifdef INET6
6397 	struct sockaddr_in sin;
6398 
6399 #endif
6400 
6401 	/* see if we're bound all already! */
6402 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6403 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6404 		*error = EINVAL;
6405 		return;
6406 	}
6407 	addr_touse = sa;
6408 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6409 	if (sa->sa_family == AF_INET6) {
6410 		struct sockaddr_in6 *sin6;
6411 
6412 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6413 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6414 			*error = EINVAL;
6415 			return;
6416 		}
6417 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6418 			/* can only bind v6 on PF_INET6 sockets */
6419 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6420 			*error = EINVAL;
6421 			return;
6422 		}
6423 		sin6 = (struct sockaddr_in6 *)addr_touse;
6424 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6425 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6426 			    SCTP_IPV6_V6ONLY(inp)) {
6427 				/* can't bind v4-mapped on PF_INET sockets */
6428 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6429 				*error = EINVAL;
6430 				return;
6431 			}
6432 			in6_sin6_2_sin(&sin, sin6);
6433 			addr_touse = (struct sockaddr *)&sin;
6434 		}
6435 	}
6436 #endif
6437 	if (sa->sa_family == AF_INET) {
6438 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6439 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6440 			*error = EINVAL;
6441 			return;
6442 		}
6443 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6444 		    SCTP_IPV6_V6ONLY(inp)) {
6445 			/* can't bind v4 on PF_INET sockets */
6446 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6447 			*error = EINVAL;
6448 			return;
6449 		}
6450 	}
6451 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6452 		if (p == NULL) {
6453 			/* Can't get proc for Net/Open BSD */
6454 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6455 			*error = EINVAL;
6456 			return;
6457 		}
6458 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6459 		return;
6460 	}
6461 	/*
6462 	 * No locks required here since bind and mgmt_ep_sa all do their own
6463 	 * locking. If we do something for the FIX: below we may need to
6464 	 * lock in that case.
6465 	 */
6466 	if (assoc_id == 0) {
6467 		/* add the address */
6468 		struct sctp_inpcb *lep;
6469 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6470 
6471 		/* validate the incoming port */
6472 		if ((lsin->sin_port != 0) &&
6473 		    (lsin->sin_port != inp->sctp_lport)) {
6474 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6475 			*error = EINVAL;
6476 			return;
6477 		} else {
6478 			/* user specified 0 port, set it to existing port */
6479 			lsin->sin_port = inp->sctp_lport;
6480 		}
6481 
6482 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6483 		if (lep != NULL) {
6484 			/*
6485 			 * We must decrement the refcount since we have the
6486 			 * ep already and are binding. No remove going on
6487 			 * here.
6488 			 */
6489 			SCTP_INP_DECR_REF(lep);
6490 		}
6491 		if (lep == inp) {
6492 			/* already bound to it.. ok */
6493 			return;
6494 		} else if (lep == NULL) {
6495 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6496 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6497 			    SCTP_ADD_IP_ADDRESS,
6498 			    vrf_id, NULL);
6499 		} else {
6500 			*error = EADDRINUSE;
6501 		}
6502 		if (*error)
6503 			return;
6504 	} else {
6505 		/*
6506 		 * FIX: decide whether we allow assoc based bindx
6507 		 */
6508 	}
6509 }
6510 
6511 /*
6512  * sctp_bindx(DELETE) for one address.
6513  * assumes all arguments are valid/checked by caller.
6514  */
6515 void
6516 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6517     struct sockaddr *sa, sctp_assoc_t assoc_id,
6518     uint32_t vrf_id, int *error)
6519 {
6520 	struct sockaddr *addr_touse;
6521 
6522 #ifdef INET6
6523 	struct sockaddr_in sin;
6524 
6525 #endif
6526 
6527 	/* see if we're bound all already! */
6528 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6529 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6530 		*error = EINVAL;
6531 		return;
6532 	}
6533 	addr_touse = sa;
6534 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6535 	if (sa->sa_family == AF_INET6) {
6536 		struct sockaddr_in6 *sin6;
6537 
6538 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6539 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6540 			*error = EINVAL;
6541 			return;
6542 		}
6543 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6544 			/* can only bind v6 on PF_INET6 sockets */
6545 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6546 			*error = EINVAL;
6547 			return;
6548 		}
6549 		sin6 = (struct sockaddr_in6 *)addr_touse;
6550 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6551 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6552 			    SCTP_IPV6_V6ONLY(inp)) {
6553 				/* can't bind mapped-v4 on PF_INET sockets */
6554 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6555 				*error = EINVAL;
6556 				return;
6557 			}
6558 			in6_sin6_2_sin(&sin, sin6);
6559 			addr_touse = (struct sockaddr *)&sin;
6560 		}
6561 	}
6562 #endif
6563 	if (sa->sa_family == AF_INET) {
6564 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6565 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6566 			*error = EINVAL;
6567 			return;
6568 		}
6569 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6570 		    SCTP_IPV6_V6ONLY(inp)) {
6571 			/* can't bind v4 on PF_INET sockets */
6572 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6573 			*error = EINVAL;
6574 			return;
6575 		}
6576 	}
6577 	/*
6578 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6579 	 * below is ever changed we may need to lock before calling
6580 	 * association level binding.
6581 	 */
6582 	if (assoc_id == 0) {
6583 		/* delete the address */
6584 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6585 		    SCTP_DEL_IP_ADDRESS,
6586 		    vrf_id, NULL);
6587 	} else {
6588 		/*
6589 		 * FIX: decide whether we allow assoc based bindx
6590 		 */
6591 	}
6592 }
6593 
6594 /*
6595  * returns the valid local address count for an assoc, taking into account
6596  * all scoping rules
6597  */
6598 int
6599 sctp_local_addr_count(struct sctp_tcb *stcb)
6600 {
6601 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6602 	int ipv4_addr_legal, ipv6_addr_legal;
6603 	struct sctp_vrf *vrf;
6604 	struct sctp_ifn *sctp_ifn;
6605 	struct sctp_ifa *sctp_ifa;
6606 	int count = 0;
6607 
6608 	/* Turn on all the appropriate scopes */
6609 	loopback_scope = stcb->asoc.loopback_scope;
6610 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6611 	local_scope = stcb->asoc.local_scope;
6612 	site_scope = stcb->asoc.site_scope;
6613 	ipv4_addr_legal = ipv6_addr_legal = 0;
6614 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6615 		ipv6_addr_legal = 1;
6616 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6617 			ipv4_addr_legal = 1;
6618 		}
6619 	} else {
6620 		ipv4_addr_legal = 1;
6621 	}
6622 
6623 	SCTP_IPI_ADDR_RLOCK();
6624 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6625 	if (vrf == NULL) {
6626 		/* no vrf, no addresses */
6627 		SCTP_IPI_ADDR_RUNLOCK();
6628 		return (0);
6629 	}
6630 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6631 		/*
6632 		 * bound all case: go through all ifns on the vrf
6633 		 */
6634 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6635 			if ((loopback_scope == 0) &&
6636 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6637 				continue;
6638 			}
6639 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6640 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6641 					continue;
6642 				switch (sctp_ifa->address.sa.sa_family) {
6643 				case AF_INET:
6644 					if (ipv4_addr_legal) {
6645 						struct sockaddr_in *sin;
6646 
6647 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6648 						if (sin->sin_addr.s_addr == 0) {
6649 							/*
6650 							 * skip unspecified
6651 							 * addrs
6652 							 */
6653 							continue;
6654 						}
6655 						if ((ipv4_local_scope == 0) &&
6656 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6657 							continue;
6658 						}
6659 						/* count this one */
6660 						count++;
6661 					} else {
6662 						continue;
6663 					}
6664 					break;
6665 #ifdef INET6
6666 				case AF_INET6:
6667 					if (ipv6_addr_legal) {
6668 						struct sockaddr_in6 *sin6;
6669 
6670 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6671 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6672 							continue;
6673 						}
6674 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6675 							if (local_scope == 0)
6676 								continue;
6677 							if (sin6->sin6_scope_id == 0) {
6678 								if (sa6_recoverscope(sin6) != 0)
6679 									/*
6680 									 *
6681 									 * bad
6682 									 *
6683 									 * li
6684 									 * nk
6685 									 *
6686 									 * loc
6687 									 * al
6688 									 *
6689 									 * add
6690 									 * re
6691 									 * ss
6692 									 * */
6693 									continue;
6694 							}
6695 						}
6696 						if ((site_scope == 0) &&
6697 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6698 							continue;
6699 						}
6700 						/* count this one */
6701 						count++;
6702 					}
6703 					break;
6704 #endif
6705 				default:
6706 					/* TSNH */
6707 					break;
6708 				}
6709 			}
6710 		}
6711 	} else {
6712 		/*
6713 		 * subset bound case
6714 		 */
6715 		struct sctp_laddr *laddr;
6716 
6717 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6718 		    sctp_nxt_addr) {
6719 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6720 				continue;
6721 			}
6722 			/* count this one */
6723 			count++;
6724 		}
6725 	}
6726 	SCTP_IPI_ADDR_RUNLOCK();
6727 	return (count);
6728 }
6729 
6730 #if defined(SCTP_LOCAL_TRACE_BUF)
6731 
6732 void
6733 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6734 {
6735 	uint32_t saveindex, newindex;
6736 
6737 	do {
6738 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6739 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6740 			newindex = 1;
6741 		} else {
6742 			newindex = saveindex + 1;
6743 		}
6744 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6745 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6746 		saveindex = 0;
6747 	}
6748 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6749 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6750 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6751 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6752 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6753 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6754 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6755 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6756 }
6757 
6758 #endif
6759 /* We will need to add support
6760  * to bind the ports and such here
6761  * so we can do UDP tunneling. In
6762  * the mean-time, we return error
6763  */
6764 #include <netinet/udp.h>
6765 #include <netinet/udp_var.h>
6766 #include <sys/proc.h>
6767 #ifdef INET6
6768 #include <netinet6/sctp6_var.h>
6769 #endif
6770 
6771 static void
6772 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6773 {
6774 	struct ip *iph;
6775 	struct mbuf *sp, *last;
6776 	struct udphdr *uhdr;
6777 	uint16_t port = 0, len;
6778 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6779 
6780 	/*
6781 	 * Split out the mbuf chain. Leave the IP header in m, place the
6782 	 * rest in the sp.
6783 	 */
6784 	if ((m->m_flags & M_PKTHDR) == 0) {
6785 		/* Can't handle one that is not a pkt hdr */
6786 		goto out;
6787 	}
6788 	/* pull the src port */
6789 	iph = mtod(m, struct ip *);
6790 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6791 
6792 	port = uhdr->uh_sport;
6793 	sp = m_split(m, off, M_DONTWAIT);
6794 	if (sp == NULL) {
6795 		/* Gak, drop packet, we can't do a split */
6796 		goto out;
6797 	}
6798 	if (sp->m_pkthdr.len < header_size) {
6799 		/* Gak, packet can't have an SCTP header in it - to small */
6800 		m_freem(sp);
6801 		goto out;
6802 	}
6803 	/* ok now pull up the UDP header and SCTP header together */
6804 	sp = m_pullup(sp, header_size);
6805 	if (sp == NULL) {
6806 		/* Gak pullup failed */
6807 		goto out;
6808 	}
6809 	/* trim out the UDP header */
6810 	m_adj(sp, sizeof(struct udphdr));
6811 
6812 	/* Now reconstruct the mbuf chain */
6813 	/* 1) find last one */
6814 	last = m;
6815 	while (last->m_next != NULL) {
6816 		last = last->m_next;
6817 	}
6818 	last->m_next = sp;
6819 	m->m_pkthdr.len += sp->m_pkthdr.len;
6820 	last = m;
6821 	while (last != NULL) {
6822 		last = last->m_next;
6823 	}
6824 	/* Now its ready for sctp_input or sctp6_input */
6825 	iph = mtod(m, struct ip *);
6826 	switch (iph->ip_v) {
6827 	case IPVERSION:
6828 		{
6829 			/* its IPv4 */
6830 			len = SCTP_GET_IPV4_LENGTH(iph);
6831 			len -= sizeof(struct udphdr);
6832 			SCTP_GET_IPV4_LENGTH(iph) = len;
6833 			sctp_input_with_port(m, off, port);
6834 			break;
6835 		}
6836 #ifdef INET6
6837 	case IPV6_VERSION >> 4:
6838 		{
6839 			/* its IPv6 - NOT supported */
6840 			goto out;
6841 			break;
6842 
6843 		}
6844 #endif
6845 	default:
6846 		{
6847 			m_freem(m);
6848 			break;
6849 		}
6850 	}
6851 	return;
6852 out:
6853 	m_freem(m);
6854 }
6855 
6856 void
6857 sctp_over_udp_stop(void)
6858 {
6859 	struct socket *sop;
6860 
6861 	/*
6862 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6863 	 * for writting!
6864 	 */
6865 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6866 		/* Nothing to do */
6867 		return;
6868 	}
6869 	sop = SCTP_BASE_INFO(udp_tun_socket);
6870 	soclose(sop);
6871 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6872 }
6873 int
6874 sctp_over_udp_start(void)
6875 {
6876 	uint16_t port;
6877 	int ret;
6878 	struct sockaddr_in sin;
6879 	struct socket *sop = NULL;
6880 	struct thread *th;
6881 	struct ucred *cred;
6882 
6883 	/*
6884 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6885 	 * for writting!
6886 	 */
6887 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6888 	if (port == 0) {
6889 		/* Must have a port set */
6890 		return (EINVAL);
6891 	}
6892 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6893 		/* Already running -- must stop first */
6894 		return (EALREADY);
6895 	}
6896 	th = curthread;
6897 	cred = th->td_ucred;
6898 	if ((ret = socreate(PF_INET, &sop,
6899 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6900 		return (ret);
6901 	}
6902 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6903 	/* call the special UDP hook */
6904 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6905 	if (ret) {
6906 		goto exit_stage_left;
6907 	}
6908 	/* Ok we have a socket, bind it to the port */
6909 	memset(&sin, 0, sizeof(sin));
6910 	sin.sin_len = sizeof(sin);
6911 	sin.sin_family = AF_INET;
6912 	sin.sin_port = htons(port);
6913 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6914 	if (ret) {
6915 		/* Close up we cant get the port */
6916 exit_stage_left:
6917 		sctp_over_udp_stop();
6918 		return (ret);
6919 	}
6920 	/*
6921 	 * Ok we should now get UDP packets directly to our input routine
6922 	 * sctp_recv_upd_tunneled_packet().
6923 	 */
6924 	return (0);
6925 }
6926