xref: /freebsd/sys/netinet/sctputil.c (revision 00a5db46de56179184c0f000eaacad695e2b0859)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 
52 #define NUMBER_OF_MTU_SIZES 18
53 
54 
55 #if defined(__Windows__) && !defined(SCTP_LOCAL_TRACE_BUF)
56 #include "eventrace_netinet.h"
57 #include "sctputil.tmh"		/* this is the file that will be auto
58 				 * generated */
59 #else
60 #ifndef KTR_SCTP
61 #define KTR_SCTP KTR_SUBSYS
62 #endif
63 #endif
64 
65 void
66 sctp_sblog(struct sockbuf *sb,
67     struct sctp_tcb *stcb, int from, int incr)
68 {
69 	struct sctp_cwnd_log sctp_clog;
70 
71 	sctp_clog.x.sb.stcb = stcb;
72 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
73 	if (stcb)
74 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
75 	else
76 		sctp_clog.x.sb.stcb_sbcc = 0;
77 	sctp_clog.x.sb.incr = incr;
78 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
79 	    SCTP_LOG_EVENT_SB,
80 	    from,
81 	    sctp_clog.x.misc.log1,
82 	    sctp_clog.x.misc.log2,
83 	    sctp_clog.x.misc.log3,
84 	    sctp_clog.x.misc.log4);
85 }
86 
87 void
88 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
89 {
90 	struct sctp_cwnd_log sctp_clog;
91 
92 	sctp_clog.x.close.inp = (void *)inp;
93 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
94 	if (stcb) {
95 		sctp_clog.x.close.stcb = (void *)stcb;
96 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
97 	} else {
98 		sctp_clog.x.close.stcb = 0;
99 		sctp_clog.x.close.state = 0;
100 	}
101 	sctp_clog.x.close.loc = loc;
102 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
103 	    SCTP_LOG_EVENT_CLOSE,
104 	    0,
105 	    sctp_clog.x.misc.log1,
106 	    sctp_clog.x.misc.log2,
107 	    sctp_clog.x.misc.log3,
108 	    sctp_clog.x.misc.log4);
109 }
110 
111 
112 void
113 rto_logging(struct sctp_nets *net, int from)
114 {
115 	struct sctp_cwnd_log sctp_clog;
116 
117 	memset(&sctp_clog, 0, sizeof(sctp_clog));
118 	sctp_clog.x.rto.net = (void *)net;
119 	sctp_clog.x.rto.rtt = net->prev_rtt;
120 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
121 	    SCTP_LOG_EVENT_RTT,
122 	    from,
123 	    sctp_clog.x.misc.log1,
124 	    sctp_clog.x.misc.log2,
125 	    sctp_clog.x.misc.log3,
126 	    sctp_clog.x.misc.log4);
127 
128 }
129 
130 void
131 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
132 {
133 	struct sctp_cwnd_log sctp_clog;
134 
135 	sctp_clog.x.strlog.stcb = stcb;
136 	sctp_clog.x.strlog.n_tsn = tsn;
137 	sctp_clog.x.strlog.n_sseq = sseq;
138 	sctp_clog.x.strlog.e_tsn = 0;
139 	sctp_clog.x.strlog.e_sseq = 0;
140 	sctp_clog.x.strlog.strm = stream;
141 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
142 	    SCTP_LOG_EVENT_STRM,
143 	    from,
144 	    sctp_clog.x.misc.log1,
145 	    sctp_clog.x.misc.log2,
146 	    sctp_clog.x.misc.log3,
147 	    sctp_clog.x.misc.log4);
148 
149 }
150 
151 void
152 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
153 {
154 	struct sctp_cwnd_log sctp_clog;
155 
156 	sctp_clog.x.nagle.stcb = (void *)stcb;
157 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
158 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
159 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
160 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
161 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
162 	    SCTP_LOG_EVENT_NAGLE,
163 	    action,
164 	    sctp_clog.x.misc.log1,
165 	    sctp_clog.x.misc.log2,
166 	    sctp_clog.x.misc.log3,
167 	    sctp_clog.x.misc.log4);
168 }
169 
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	struct sctp_cwnd_log sctp_clog;
175 
176 	sctp_clog.x.sack.cumack = cumack;
177 	sctp_clog.x.sack.oldcumack = old_cumack;
178 	sctp_clog.x.sack.tsn = tsn;
179 	sctp_clog.x.sack.numGaps = gaps;
180 	sctp_clog.x.sack.numDups = dups;
181 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 	    SCTP_LOG_EVENT_SACK,
183 	    from,
184 	    sctp_clog.x.misc.log1,
185 	    sctp_clog.x.misc.log2,
186 	    sctp_clog.x.misc.log3,
187 	    sctp_clog.x.misc.log4);
188 }
189 
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 	struct sctp_cwnd_log sctp_clog;
194 
195 	memset(&sctp_clog, 0, sizeof(sctp_clog));
196 	sctp_clog.x.map.base = map;
197 	sctp_clog.x.map.cum = cum;
198 	sctp_clog.x.map.high = high;
199 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 	    SCTP_LOG_EVENT_MAP,
201 	    from,
202 	    sctp_clog.x.misc.log1,
203 	    sctp_clog.x.misc.log2,
204 	    sctp_clog.x.misc.log3,
205 	    sctp_clog.x.misc.log4);
206 }
207 
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
210     int from)
211 {
212 	struct sctp_cwnd_log sctp_clog;
213 
214 	memset(&sctp_clog, 0, sizeof(sctp_clog));
215 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
216 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
217 	sctp_clog.x.fr.tsn = tsn;
218 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
219 	    SCTP_LOG_EVENT_FR,
220 	    from,
221 	    sctp_clog.x.misc.log1,
222 	    sctp_clog.x.misc.log2,
223 	    sctp_clog.x.misc.log3,
224 	    sctp_clog.x.misc.log4);
225 
226 }
227 
228 
229 void
230 sctp_log_mb(struct mbuf *m, int from)
231 {
232 	struct sctp_cwnd_log sctp_clog;
233 
234 	sctp_clog.x.mb.mp = m;
235 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
236 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
237 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
238 	if (SCTP_BUF_IS_EXTENDED(m)) {
239 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
240 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
241 	} else {
242 		sctp_clog.x.mb.ext = 0;
243 		sctp_clog.x.mb.refcnt = 0;
244 	}
245 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
246 	    SCTP_LOG_EVENT_MBUF,
247 	    from,
248 	    sctp_clog.x.misc.log1,
249 	    sctp_clog.x.misc.log2,
250 	    sctp_clog.x.misc.log3,
251 	    sctp_clog.x.misc.log4);
252 }
253 
254 
255 void
256 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
257     int from)
258 {
259 	struct sctp_cwnd_log sctp_clog;
260 
261 	if (control == NULL) {
262 		SCTP_PRINTF("Gak log of NULL?\n");
263 		return;
264 	}
265 	sctp_clog.x.strlog.stcb = control->stcb;
266 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
267 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
268 	sctp_clog.x.strlog.strm = control->sinfo_stream;
269 	if (poschk != NULL) {
270 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
271 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
272 	} else {
273 		sctp_clog.x.strlog.e_tsn = 0;
274 		sctp_clog.x.strlog.e_sseq = 0;
275 	}
276 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
277 	    SCTP_LOG_EVENT_STRM,
278 	    from,
279 	    sctp_clog.x.misc.log1,
280 	    sctp_clog.x.misc.log2,
281 	    sctp_clog.x.misc.log3,
282 	    sctp_clog.x.misc.log4);
283 
284 }
285 
286 void
287 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
288 {
289 	struct sctp_cwnd_log sctp_clog;
290 
291 	sctp_clog.x.cwnd.net = net;
292 	if (stcb->asoc.send_queue_cnt > 255)
293 		sctp_clog.x.cwnd.cnt_in_send = 255;
294 	else
295 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
296 	if (stcb->asoc.stream_queue_cnt > 255)
297 		sctp_clog.x.cwnd.cnt_in_str = 255;
298 	else
299 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
300 
301 	if (net) {
302 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
303 		sctp_clog.x.cwnd.inflight = net->flight_size;
304 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
305 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
306 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
307 	}
308 	if (SCTP_CWNDLOG_PRESEND == from) {
309 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
310 	}
311 	sctp_clog.x.cwnd.cwnd_augment = augment;
312 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
313 	    SCTP_LOG_EVENT_CWND,
314 	    from,
315 	    sctp_clog.x.misc.log1,
316 	    sctp_clog.x.misc.log2,
317 	    sctp_clog.x.misc.log3,
318 	    sctp_clog.x.misc.log4);
319 
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp->sctp_socket) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 
365 }
366 
367 void
368 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
369 {
370 	struct sctp_cwnd_log sctp_clog;
371 
372 	memset(&sctp_clog, 0, sizeof(sctp_clog));
373 	sctp_clog.x.cwnd.net = net;
374 	sctp_clog.x.cwnd.cwnd_new_value = error;
375 	sctp_clog.x.cwnd.inflight = net->flight_size;
376 	sctp_clog.x.cwnd.cwnd_augment = burst;
377 	if (stcb->asoc.send_queue_cnt > 255)
378 		sctp_clog.x.cwnd.cnt_in_send = 255;
379 	else
380 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
381 	if (stcb->asoc.stream_queue_cnt > 255)
382 		sctp_clog.x.cwnd.cnt_in_str = 255;
383 	else
384 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
385 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
386 	    SCTP_LOG_EVENT_MAXBURST,
387 	    from,
388 	    sctp_clog.x.misc.log1,
389 	    sctp_clog.x.misc.log2,
390 	    sctp_clog.x.misc.log3,
391 	    sctp_clog.x.misc.log4);
392 
393 }
394 
395 void
396 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
397 {
398 	struct sctp_cwnd_log sctp_clog;
399 
400 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
401 	sctp_clog.x.rwnd.send_size = snd_size;
402 	sctp_clog.x.rwnd.overhead = overhead;
403 	sctp_clog.x.rwnd.new_rwnd = 0;
404 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
405 	    SCTP_LOG_EVENT_RWND,
406 	    from,
407 	    sctp_clog.x.misc.log1,
408 	    sctp_clog.x.misc.log2,
409 	    sctp_clog.x.misc.log3,
410 	    sctp_clog.x.misc.log4);
411 }
412 
413 void
414 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
415 {
416 	struct sctp_cwnd_log sctp_clog;
417 
418 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
419 	sctp_clog.x.rwnd.send_size = flight_size;
420 	sctp_clog.x.rwnd.overhead = overhead;
421 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
422 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
423 	    SCTP_LOG_EVENT_RWND,
424 	    from,
425 	    sctp_clog.x.misc.log1,
426 	    sctp_clog.x.misc.log2,
427 	    sctp_clog.x.misc.log3,
428 	    sctp_clog.x.misc.log4);
429 }
430 
431 void
432 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
433 {
434 	struct sctp_cwnd_log sctp_clog;
435 
436 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
437 	sctp_clog.x.mbcnt.size_change = book;
438 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
439 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
440 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
441 	    SCTP_LOG_EVENT_MBCNT,
442 	    from,
443 	    sctp_clog.x.misc.log1,
444 	    sctp_clog.x.misc.log2,
445 	    sctp_clog.x.misc.log3,
446 	    sctp_clog.x.misc.log4);
447 
448 }
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 
504 }
505 
506 void
507 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
508 {
509 	struct sctp_cwnd_log sctp_clog;
510 
511 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
512 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
513 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
514 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
515 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
516 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
517 	sctp_clog.x.blk.sndlen = sendlen;
518 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
519 	    SCTP_LOG_EVENT_BLOCK,
520 	    from,
521 	    sctp_clog.x.misc.log1,
522 	    sctp_clog.x.misc.log2,
523 	    sctp_clog.x.misc.log3,
524 	    sctp_clog.x.misc.log4);
525 
526 }
527 
528 int
529 sctp_fill_stat_log(void *optval, size_t *optsize)
530 {
531 	/* May need to fix this if ktrdump does not work */
532 	return (0);
533 }
534 
535 #ifdef SCTP_AUDITING_ENABLED
536 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
537 static int sctp_audit_indx = 0;
538 
539 static
540 void
541 sctp_print_audit_report(void)
542 {
543 	int i;
544 	int cnt;
545 
546 	cnt = 0;
547 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
548 		if ((sctp_audit_data[i][0] == 0xe0) &&
549 		    (sctp_audit_data[i][1] == 0x01)) {
550 			cnt = 0;
551 			SCTP_PRINTF("\n");
552 		} else if (sctp_audit_data[i][0] == 0xf0) {
553 			cnt = 0;
554 			SCTP_PRINTF("\n");
555 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
556 		    (sctp_audit_data[i][1] == 0x01)) {
557 			SCTP_PRINTF("\n");
558 			cnt = 0;
559 		}
560 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
561 		    (uint32_t) sctp_audit_data[i][1]);
562 		cnt++;
563 		if ((cnt % 14) == 0)
564 			SCTP_PRINTF("\n");
565 	}
566 	for (i = 0; i < sctp_audit_indx; i++) {
567 		if ((sctp_audit_data[i][0] == 0xe0) &&
568 		    (sctp_audit_data[i][1] == 0x01)) {
569 			cnt = 0;
570 			SCTP_PRINTF("\n");
571 		} else if (sctp_audit_data[i][0] == 0xf0) {
572 			cnt = 0;
573 			SCTP_PRINTF("\n");
574 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
575 		    (sctp_audit_data[i][1] == 0x01)) {
576 			SCTP_PRINTF("\n");
577 			cnt = 0;
578 		}
579 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
580 		    (uint32_t) sctp_audit_data[i][1]);
581 		cnt++;
582 		if ((cnt % 14) == 0)
583 			SCTP_PRINTF("\n");
584 	}
585 	SCTP_PRINTF("\n");
586 }
587 
588 void
589 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
590     struct sctp_nets *net)
591 {
592 	int resend_cnt, tot_out, rep, tot_book_cnt;
593 	struct sctp_nets *lnet;
594 	struct sctp_tmit_chunk *chk;
595 
596 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
597 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
598 	sctp_audit_indx++;
599 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
600 		sctp_audit_indx = 0;
601 	}
602 	if (inp == NULL) {
603 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
604 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
605 		sctp_audit_indx++;
606 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 			sctp_audit_indx = 0;
608 		}
609 		return;
610 	}
611 	if (stcb == NULL) {
612 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
613 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
614 		sctp_audit_indx++;
615 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
616 			sctp_audit_indx = 0;
617 		}
618 		return;
619 	}
620 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
621 	sctp_audit_data[sctp_audit_indx][1] =
622 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
623 	sctp_audit_indx++;
624 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625 		sctp_audit_indx = 0;
626 	}
627 	rep = 0;
628 	tot_book_cnt = 0;
629 	resend_cnt = tot_out = 0;
630 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
631 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
632 			resend_cnt++;
633 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
634 			tot_out += chk->book_size;
635 			tot_book_cnt++;
636 		}
637 	}
638 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
639 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
640 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
641 		sctp_audit_indx++;
642 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 			sctp_audit_indx = 0;
644 		}
645 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
646 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
647 		rep = 1;
648 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
649 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
650 		sctp_audit_data[sctp_audit_indx][1] =
651 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
652 		sctp_audit_indx++;
653 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
654 			sctp_audit_indx = 0;
655 		}
656 	}
657 	if (tot_out != stcb->asoc.total_flight) {
658 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
659 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
660 		sctp_audit_indx++;
661 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
662 			sctp_audit_indx = 0;
663 		}
664 		rep = 1;
665 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
666 		    (int)stcb->asoc.total_flight);
667 		stcb->asoc.total_flight = tot_out;
668 	}
669 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
670 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
671 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
672 		sctp_audit_indx++;
673 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
674 			sctp_audit_indx = 0;
675 		}
676 		rep = 1;
677 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
678 
679 		stcb->asoc.total_flight_count = tot_book_cnt;
680 	}
681 	tot_out = 0;
682 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
683 		tot_out += lnet->flight_size;
684 	}
685 	if (tot_out != stcb->asoc.total_flight) {
686 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
687 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
688 		sctp_audit_indx++;
689 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
690 			sctp_audit_indx = 0;
691 		}
692 		rep = 1;
693 		SCTP_PRINTF("real flight:%d net total was %d\n",
694 		    stcb->asoc.total_flight, tot_out);
695 		/* now corrective action */
696 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
697 
698 			tot_out = 0;
699 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
700 				if ((chk->whoTo == lnet) &&
701 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
702 					tot_out += chk->book_size;
703 				}
704 			}
705 			if (lnet->flight_size != tot_out) {
706 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
707 				    (uint32_t) lnet, lnet->flight_size,
708 				    tot_out);
709 				lnet->flight_size = tot_out;
710 			}
711 		}
712 	}
713 	if (rep) {
714 		sctp_print_audit_report();
715 	}
716 }
717 
718 void
719 sctp_audit_log(uint8_t ev, uint8_t fd)
720 {
721 
722 	sctp_audit_data[sctp_audit_indx][0] = ev;
723 	sctp_audit_data[sctp_audit_indx][1] = fd;
724 	sctp_audit_indx++;
725 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
726 		sctp_audit_indx = 0;
727 	}
728 }
729 
730 #endif
731 
732 /*
733  * a list of sizes based on typical mtu's, used only if next hop size not
734  * returned.
735  */
736 static int sctp_mtu_sizes[] = {
737 	68,
738 	296,
739 	508,
740 	512,
741 	544,
742 	576,
743 	1006,
744 	1492,
745 	1500,
746 	1536,
747 	2002,
748 	2048,
749 	4352,
750 	4464,
751 	8166,
752 	17914,
753 	32000,
754 	65535
755 };
756 
757 void
758 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
759 {
760 	struct sctp_association *asoc;
761 	struct sctp_nets *net;
762 
763 	asoc = &stcb->asoc;
764 
765 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
766 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
767 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
768 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
769 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
770 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
771 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
772 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
773 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
774 	}
775 }
776 
777 int
778 find_next_best_mtu(int totsz)
779 {
780 	int i, perfer;
781 
782 	/*
783 	 * if we are in here we must find the next best fit based on the
784 	 * size of the dg that failed to be sent.
785 	 */
786 	perfer = 0;
787 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
788 		if (totsz < sctp_mtu_sizes[i]) {
789 			perfer = i - 1;
790 			if (perfer < 0)
791 				perfer = 0;
792 			break;
793 		}
794 	}
795 	return (sctp_mtu_sizes[perfer]);
796 }
797 
798 void
799 sctp_fill_random_store(struct sctp_pcb *m)
800 {
801 	/*
802 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
803 	 * our counter. The result becomes our good random numbers and we
804 	 * then setup to give these out. Note that we do no locking to
805 	 * protect this. This is ok, since if competing folks call this we
806 	 * will get more gobbled gook in the random store which is what we
807 	 * want. There is a danger that two guys will use the same random
808 	 * numbers, but thats ok too since that is random as well :->
809 	 */
810 	m->store_at = 0;
811 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
812 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
813 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
814 	m->random_counter++;
815 }
816 
817 uint32_t
818 sctp_select_initial_TSN(struct sctp_pcb *inp)
819 {
820 	/*
821 	 * A true implementation should use random selection process to get
822 	 * the initial stream sequence number, using RFC1750 as a good
823 	 * guideline
824 	 */
825 	uint32_t x, *xp;
826 	uint8_t *p;
827 	int store_at, new_store;
828 
829 	if (inp->initial_sequence_debug != 0) {
830 		uint32_t ret;
831 
832 		ret = inp->initial_sequence_debug;
833 		inp->initial_sequence_debug++;
834 		return (ret);
835 	}
836 retry:
837 	store_at = inp->store_at;
838 	new_store = store_at + sizeof(uint32_t);
839 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
840 		new_store = 0;
841 	}
842 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
843 		goto retry;
844 	}
845 	if (new_store == 0) {
846 		/* Refill the random store */
847 		sctp_fill_random_store(inp);
848 	}
849 	p = &inp->random_store[store_at];
850 	xp = (uint32_t *) p;
851 	x = *xp;
852 	return (x);
853 }
854 
855 uint32_t
856 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
857 {
858 	u_long x, not_done;
859 	struct timeval now;
860 
861 	(void)SCTP_GETTIME_TIMEVAL(&now);
862 	not_done = 1;
863 	while (not_done) {
864 		x = sctp_select_initial_TSN(&inp->sctp_ep);
865 		if (x == 0) {
866 			/* we never use 0 */
867 			continue;
868 		}
869 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
870 			not_done = 0;
871 		}
872 	}
873 	return (x);
874 }
875 
876 int
877 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
878     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
879 {
880 	struct sctp_association *asoc;
881 
882 	/*
883 	 * Anything set to zero is taken care of by the allocation routine's
884 	 * bzero
885 	 */
886 
887 	/*
888 	 * Up front select what scoping to apply on addresses I tell my peer
889 	 * Not sure what to do with these right now, we will need to come up
890 	 * with a way to set them. We may need to pass them through from the
891 	 * caller in the sctp_aloc_assoc() function.
892 	 */
893 	int i;
894 
895 	asoc = &stcb->asoc;
896 	/* init all variables to a known value. */
897 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
898 	asoc->max_burst = m->sctp_ep.max_burst;
899 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
900 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
901 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
902 	/* EY Init nr_sack variable */
903 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
904 	/* JRS 5/21/07 - Init CMT PF variables */
905 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
906 	asoc->sctp_frag_point = m->sctp_frag_point;
907 #ifdef INET
908 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
909 #else
910 	asoc->default_tos = 0;
911 #endif
912 
913 #ifdef INET6
914 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
915 #else
916 	asoc->default_flowlabel = 0;
917 #endif
918 	asoc->sb_send_resv = 0;
919 	if (override_tag) {
920 #ifdef MICHAELS_EXPERIMENT
921 		if (sctp_is_in_timewait(override_tag, stcb->sctp_ep->sctp_lport, stcb->rport)) {
922 			/*
923 			 * It must be in the time-wait hash, we put it there
924 			 * when we aloc one. If not the peer is playing
925 			 * games.
926 			 */
927 			asoc->my_vtag = override_tag;
928 		} else {
929 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
930 #ifdef INVARIANTS
931 			panic("Huh is_in_timewait fails");
932 #endif
933 			return (ENOMEM);
934 		}
935 #else
936 		asoc->my_vtag = override_tag;
937 #endif
938 	} else {
939 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
940 	}
941 	/* Get the nonce tags */
942 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
943 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
944 	asoc->vrf_id = vrf_id;
945 
946 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
947 		asoc->hb_is_disabled = 1;
948 	else
949 		asoc->hb_is_disabled = 0;
950 
951 #ifdef SCTP_ASOCLOG_OF_TSNS
952 	asoc->tsn_in_at = 0;
953 	asoc->tsn_out_at = 0;
954 	asoc->tsn_in_wrapped = 0;
955 	asoc->tsn_out_wrapped = 0;
956 	asoc->cumack_log_at = 0;
957 	asoc->cumack_log_atsnt = 0;
958 #endif
959 #ifdef SCTP_FS_SPEC_LOG
960 	asoc->fs_index = 0;
961 #endif
962 	asoc->refcnt = 0;
963 	asoc->assoc_up_sent = 0;
964 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
965 	    sctp_select_initial_TSN(&m->sctp_ep);
966 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
967 	/* we are optimisitic here */
968 	asoc->peer_supports_pktdrop = 1;
969 	asoc->peer_supports_nat = 0;
970 	asoc->sent_queue_retran_cnt = 0;
971 
972 	/* for CMT */
973 	asoc->last_net_cmt_send_started = NULL;
974 
975 	/* This will need to be adjusted */
976 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
977 	asoc->last_acked_seq = asoc->init_seq_number - 1;
978 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
979 	asoc->asconf_seq_in = asoc->last_acked_seq;
980 
981 	/* here we are different, we hold the next one we expect */
982 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
983 
984 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
985 	asoc->initial_rto = m->sctp_ep.initial_rto;
986 
987 	asoc->max_init_times = m->sctp_ep.max_init_times;
988 	asoc->max_send_times = m->sctp_ep.max_send_times;
989 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
990 	asoc->free_chunk_cnt = 0;
991 
992 	asoc->iam_blocking = 0;
993 	/* ECN Nonce initialization */
994 	asoc->context = m->sctp_context;
995 	asoc->def_send = m->def_send;
996 	asoc->ecn_nonce_allowed = 0;
997 	asoc->receiver_nonce_sum = 1;
998 	asoc->nonce_sum_expect_base = 1;
999 	asoc->nonce_sum_check = 1;
1000 	asoc->nonce_resync_tsn = 0;
1001 	asoc->nonce_wait_for_ecne = 0;
1002 	asoc->nonce_wait_tsn = 0;
1003 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1004 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
1005 	asoc->pr_sctp_cnt = 0;
1006 	asoc->total_output_queue_size = 0;
1007 
1008 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1009 		struct in6pcb *inp6;
1010 
1011 		/* Its a V6 socket */
1012 		inp6 = (struct in6pcb *)m;
1013 		asoc->ipv6_addr_legal = 1;
1014 		/* Now look at the binding flag to see if V4 will be legal */
1015 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1016 			asoc->ipv4_addr_legal = 1;
1017 		} else {
1018 			/* V4 addresses are NOT legal on the association */
1019 			asoc->ipv4_addr_legal = 0;
1020 		}
1021 	} else {
1022 		/* Its a V4 socket, no - V6 */
1023 		asoc->ipv4_addr_legal = 1;
1024 		asoc->ipv6_addr_legal = 0;
1025 	}
1026 
1027 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1028 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1029 
1030 	asoc->smallest_mtu = m->sctp_frag_point;
1031 #ifdef SCTP_PRINT_FOR_B_AND_M
1032 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1033 	    asoc->smallest_mtu);
1034 #endif
1035 	asoc->minrto = m->sctp_ep.sctp_minrto;
1036 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1037 
1038 	asoc->locked_on_sending = NULL;
1039 	asoc->stream_locked_on = 0;
1040 	asoc->ecn_echo_cnt_onq = 0;
1041 	asoc->stream_locked = 0;
1042 
1043 	asoc->send_sack = 1;
1044 
1045 	LIST_INIT(&asoc->sctp_restricted_addrs);
1046 
1047 	TAILQ_INIT(&asoc->nets);
1048 	TAILQ_INIT(&asoc->pending_reply_queue);
1049 	TAILQ_INIT(&asoc->asconf_ack_sent);
1050 	/* Setup to fill the hb random cache at first HB */
1051 	asoc->hb_random_idx = 4;
1052 
1053 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1054 
1055 	/*
1056 	 * JRS - Pick the default congestion control module based on the
1057 	 * sysctl.
1058 	 */
1059 	switch (m->sctp_ep.sctp_default_cc_module) {
1060 		/* JRS - Standard TCP congestion control */
1061 	case SCTP_CC_RFC2581:
1062 		{
1063 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1064 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1065 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1066 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1067 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1068 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1069 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1070 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1071 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1072 			break;
1073 		}
1074 		/* JRS - High Speed TCP congestion control (Floyd) */
1075 	case SCTP_CC_HSTCP:
1076 		{
1077 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1078 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1079 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1080 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1081 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1082 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1083 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1084 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1085 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1086 			break;
1087 		}
1088 		/* JRS - HTCP congestion control */
1089 	case SCTP_CC_HTCP:
1090 		{
1091 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1092 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1093 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1094 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1095 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1096 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1097 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1098 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1099 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1100 			break;
1101 		}
1102 		/* JRS - By default, use RFC2581 */
1103 	default:
1104 		{
1105 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1106 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1107 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1108 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1109 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1110 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1111 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1112 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1113 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1114 			break;
1115 		}
1116 	}
1117 
1118 	/*
1119 	 * Now the stream parameters, here we allocate space for all streams
1120 	 * that we request by default.
1121 	 */
1122 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1123 	    m->sctp_ep.pre_open_stream_count;
1124 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1125 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1126 	    SCTP_M_STRMO);
1127 	if (asoc->strmout == NULL) {
1128 		/* big trouble no memory */
1129 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1130 		return (ENOMEM);
1131 	}
1132 	for (i = 0; i < asoc->streamoutcnt; i++) {
1133 		/*
1134 		 * inbound side must be set to 0xffff, also NOTE when we get
1135 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1136 		 * count (streamoutcnt) but first check if we sent to any of
1137 		 * the upper streams that were dropped (if some were). Those
1138 		 * that were dropped must be notified to the upper layer as
1139 		 * failed to send.
1140 		 */
1141 		asoc->strmout[i].next_sequence_sent = 0x0;
1142 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1143 		asoc->strmout[i].stream_no = i;
1144 		asoc->strmout[i].last_msg_incomplete = 0;
1145 		asoc->strmout[i].next_spoke.tqe_next = 0;
1146 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1147 	}
1148 	/* Now the mapping array */
1149 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1150 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1151 	    SCTP_M_MAP);
1152 	if (asoc->mapping_array == NULL) {
1153 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1154 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1155 		return (ENOMEM);
1156 	}
1157 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1158 	/* EY  - initialize the nr_mapping_array just like mapping array */
1159 	asoc->nr_mapping_array_size = SCTP_INITIAL_NR_MAPPING_ARRAY;
1160 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->nr_mapping_array_size,
1161 	    SCTP_M_MAP);
1162 	/*
1163 	 * if (asoc->nr_mapping_array == NULL) { SCTP_FREE(asoc->strmout,
1164 	 * SCTP_M_STRMO); SCTP_LTRACE_ERR_RET(NULL, stcb, NULL,
1165 	 * SCTP_FROM_SCTPUTIL, ENOMEM); return (ENOMEM); }
1166 	 */
1167 	memset(asoc->nr_mapping_array, 0, asoc->nr_mapping_array_size);
1168 
1169 	/* Now the init of the other outqueues */
1170 	TAILQ_INIT(&asoc->free_chunks);
1171 	TAILQ_INIT(&asoc->out_wheel);
1172 	TAILQ_INIT(&asoc->control_send_queue);
1173 	TAILQ_INIT(&asoc->asconf_send_queue);
1174 	TAILQ_INIT(&asoc->send_queue);
1175 	TAILQ_INIT(&asoc->sent_queue);
1176 	TAILQ_INIT(&asoc->reasmqueue);
1177 	TAILQ_INIT(&asoc->resetHead);
1178 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1179 	TAILQ_INIT(&asoc->asconf_queue);
1180 	/* authentication fields */
1181 	asoc->authinfo.random = NULL;
1182 	asoc->authinfo.active_keyid = 0;
1183 	asoc->authinfo.assoc_key = NULL;
1184 	asoc->authinfo.assoc_keyid = 0;
1185 	asoc->authinfo.recv_key = NULL;
1186 	asoc->authinfo.recv_keyid = 0;
1187 	LIST_INIT(&asoc->shared_keys);
1188 	asoc->marked_retrans = 0;
1189 	asoc->timoinit = 0;
1190 	asoc->timodata = 0;
1191 	asoc->timosack = 0;
1192 	asoc->timoshutdown = 0;
1193 	asoc->timoheartbeat = 0;
1194 	asoc->timocookie = 0;
1195 	asoc->timoshutdownack = 0;
1196 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1197 	asoc->discontinuity_time = asoc->start_time;
1198 	/*
1199 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1200 	 * freed later whe the association is freed.
1201 	 */
1202 	return (0);
1203 }
1204 
1205 int
1206 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1207 {
1208 	/* mapping array needs to grow */
1209 	uint8_t *new_array;
1210 	uint32_t new_size;
1211 
1212 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1213 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1214 	if (new_array == NULL) {
1215 		/* can't get more, forget it */
1216 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1217 		    new_size);
1218 		return (-1);
1219 	}
1220 	memset(new_array, 0, new_size);
1221 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1222 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1223 	asoc->mapping_array = new_array;
1224 	asoc->mapping_array_size = new_size;
1225 	if (asoc->peer_supports_nr_sack) {
1226 		new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
1227 		SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1228 		if (new_array == NULL) {
1229 			/* can't get more, forget it */
1230 			SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1231 			    new_size);
1232 			return (-1);
1233 		}
1234 		memset(new_array, 0, new_size);
1235 		memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
1236 		SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1237 		asoc->nr_mapping_array = new_array;
1238 		asoc->nr_mapping_array_size = new_size;
1239 	}
1240 	return (0);
1241 }
1242 
1243 
1244 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1245 static void
1246 sctp_iterator_work(struct sctp_iterator *it)
1247 {
1248 	int iteration_count = 0;
1249 	int inp_skip = 0;
1250 
1251 	SCTP_ITERATOR_LOCK();
1252 	if (it->inp) {
1253 		SCTP_INP_DECR_REF(it->inp);
1254 	}
1255 	if (it->inp == NULL) {
1256 		/* iterator is complete */
1257 done_with_iterator:
1258 		SCTP_ITERATOR_UNLOCK();
1259 		if (it->function_atend != NULL) {
1260 			(*it->function_atend) (it->pointer, it->val);
1261 		}
1262 		SCTP_FREE(it, SCTP_M_ITER);
1263 		return;
1264 	}
1265 select_a_new_ep:
1266 	SCTP_INP_WLOCK(it->inp);
1267 	while (((it->pcb_flags) &&
1268 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1269 	    ((it->pcb_features) &&
1270 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1271 		/* endpoint flags or features don't match, so keep looking */
1272 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1273 			SCTP_INP_WUNLOCK(it->inp);
1274 			goto done_with_iterator;
1275 		}
1276 		SCTP_INP_WUNLOCK(it->inp);
1277 		it->inp = LIST_NEXT(it->inp, sctp_list);
1278 		if (it->inp == NULL) {
1279 			goto done_with_iterator;
1280 		}
1281 		SCTP_INP_WLOCK(it->inp);
1282 	}
1283 
1284 	SCTP_INP_WUNLOCK(it->inp);
1285 	SCTP_INP_RLOCK(it->inp);
1286 
1287 	/* now go through each assoc which is in the desired state */
1288 	if (it->done_current_ep == 0) {
1289 		if (it->function_inp != NULL)
1290 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1291 		it->done_current_ep = 1;
1292 	}
1293 	if (it->stcb == NULL) {
1294 		/* run the per instance function */
1295 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1296 	}
1297 	if ((inp_skip) || it->stcb == NULL) {
1298 		if (it->function_inp_end != NULL) {
1299 			inp_skip = (*it->function_inp_end) (it->inp,
1300 			    it->pointer,
1301 			    it->val);
1302 		}
1303 		SCTP_INP_RUNLOCK(it->inp);
1304 		goto no_stcb;
1305 	}
1306 	while (it->stcb) {
1307 		SCTP_TCB_LOCK(it->stcb);
1308 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1309 			/* not in the right state... keep looking */
1310 			SCTP_TCB_UNLOCK(it->stcb);
1311 			goto next_assoc;
1312 		}
1313 		/* see if we have limited out the iterator loop */
1314 		iteration_count++;
1315 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1316 			/* Pause to let others grab the lock */
1317 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1318 			SCTP_TCB_UNLOCK(it->stcb);
1319 
1320 			SCTP_INP_INCR_REF(it->inp);
1321 			SCTP_INP_RUNLOCK(it->inp);
1322 			SCTP_ITERATOR_UNLOCK();
1323 			SCTP_ITERATOR_LOCK();
1324 			SCTP_INP_RLOCK(it->inp);
1325 
1326 			SCTP_INP_DECR_REF(it->inp);
1327 			SCTP_TCB_LOCK(it->stcb);
1328 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1329 			iteration_count = 0;
1330 		}
1331 		/* run function on this one */
1332 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1333 
1334 		/*
1335 		 * we lie here, it really needs to have its own type but
1336 		 * first I must verify that this won't effect things :-0
1337 		 */
1338 		if (it->no_chunk_output == 0)
1339 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1340 
1341 		SCTP_TCB_UNLOCK(it->stcb);
1342 next_assoc:
1343 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1344 		if (it->stcb == NULL) {
1345 			/* Run last function */
1346 			if (it->function_inp_end != NULL) {
1347 				inp_skip = (*it->function_inp_end) (it->inp,
1348 				    it->pointer,
1349 				    it->val);
1350 			}
1351 		}
1352 	}
1353 	SCTP_INP_RUNLOCK(it->inp);
1354 no_stcb:
1355 	/* done with all assocs on this endpoint, move on to next endpoint */
1356 	it->done_current_ep = 0;
1357 	SCTP_INP_WLOCK(it->inp);
1358 	SCTP_INP_WUNLOCK(it->inp);
1359 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1360 		it->inp = NULL;
1361 	} else {
1362 		SCTP_INP_INFO_RLOCK();
1363 		it->inp = LIST_NEXT(it->inp, sctp_list);
1364 		SCTP_INP_INFO_RUNLOCK();
1365 	}
1366 	if (it->inp == NULL) {
1367 		goto done_with_iterator;
1368 	}
1369 	goto select_a_new_ep;
1370 }
1371 
1372 void
1373 sctp_iterator_worker(void)
1374 {
1375 	struct sctp_iterator *it = NULL;
1376 
1377 	/* This function is called with the WQ lock in place */
1378 
1379 	SCTP_BASE_INFO(iterator_running) = 1;
1380 again:
1381 	it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1382 	while (it) {
1383 		/* now lets work on this one */
1384 		TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1385 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1386 		sctp_iterator_work(it);
1387 		SCTP_IPI_ITERATOR_WQ_LOCK();
1388 		/* sa_ignore FREED_MEMORY */
1389 		it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1390 	}
1391 	if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
1392 		goto again;
1393 	}
1394 	SCTP_BASE_INFO(iterator_running) = 0;
1395 	return;
1396 }
1397 
1398 #endif
1399 
1400 
1401 static void
1402 sctp_handle_addr_wq(void)
1403 {
1404 	/* deal with the ADDR wq from the rtsock calls */
1405 	struct sctp_laddr *wi;
1406 	struct sctp_asconf_iterator *asc;
1407 
1408 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1409 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1410 	if (asc == NULL) {
1411 		/* Try later, no memory */
1412 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1413 		    (struct sctp_inpcb *)NULL,
1414 		    (struct sctp_tcb *)NULL,
1415 		    (struct sctp_nets *)NULL);
1416 		return;
1417 	}
1418 	LIST_INIT(&asc->list_of_work);
1419 	asc->cnt = 0;
1420 	SCTP_IPI_ITERATOR_WQ_LOCK();
1421 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1422 	while (wi != NULL) {
1423 		LIST_REMOVE(wi, sctp_nxt_addr);
1424 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1425 		asc->cnt++;
1426 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1427 	}
1428 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1429 	if (asc->cnt == 0) {
1430 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1431 	} else {
1432 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1433 		    sctp_asconf_iterator_stcb,
1434 		    NULL,	/* No ep end for boundall */
1435 		    SCTP_PCB_FLAGS_BOUNDALL,
1436 		    SCTP_PCB_ANY_FEATURES,
1437 		    SCTP_ASOC_ANY_STATE,
1438 		    (void *)asc, 0,
1439 		    sctp_asconf_iterator_end, NULL, 0);
1440 	}
1441 }
1442 
1443 int retcode = 0;
1444 int cur_oerr = 0;
1445 
1446 void
1447 sctp_timeout_handler(void *t)
1448 {
1449 	struct sctp_inpcb *inp;
1450 	struct sctp_tcb *stcb;
1451 	struct sctp_nets *net;
1452 	struct sctp_timer *tmr;
1453 
1454 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1455 	struct socket *so;
1456 
1457 #endif
1458 	int did_output, type;
1459 	struct sctp_iterator *it = NULL;
1460 
1461 	tmr = (struct sctp_timer *)t;
1462 	inp = (struct sctp_inpcb *)tmr->ep;
1463 	stcb = (struct sctp_tcb *)tmr->tcb;
1464 	net = (struct sctp_nets *)tmr->net;
1465 	did_output = 1;
1466 
1467 #ifdef SCTP_AUDITING_ENABLED
1468 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1469 	sctp_auditing(3, inp, stcb, net);
1470 #endif
1471 
1472 	/* sanity checks... */
1473 	if (tmr->self != (void *)tmr) {
1474 		/*
1475 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1476 		 * tmr);
1477 		 */
1478 		return;
1479 	}
1480 	tmr->stopped_from = 0xa001;
1481 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1482 		/*
1483 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1484 		 * tmr->type);
1485 		 */
1486 		return;
1487 	}
1488 	tmr->stopped_from = 0xa002;
1489 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1490 		return;
1491 	}
1492 	/* if this is an iterator timeout, get the struct and clear inp */
1493 	tmr->stopped_from = 0xa003;
1494 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1495 		it = (struct sctp_iterator *)inp;
1496 		inp = NULL;
1497 	}
1498 	type = tmr->type;
1499 	if (inp) {
1500 		SCTP_INP_INCR_REF(inp);
1501 		if ((inp->sctp_socket == 0) &&
1502 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1503 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1504 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1505 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1506 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1507 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1508 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1509 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1510 		    ) {
1511 			SCTP_INP_DECR_REF(inp);
1512 			return;
1513 		}
1514 	}
1515 	tmr->stopped_from = 0xa004;
1516 	if (stcb) {
1517 		atomic_add_int(&stcb->asoc.refcnt, 1);
1518 		if (stcb->asoc.state == 0) {
1519 			atomic_add_int(&stcb->asoc.refcnt, -1);
1520 			if (inp) {
1521 				SCTP_INP_DECR_REF(inp);
1522 			}
1523 			return;
1524 		}
1525 	}
1526 	tmr->stopped_from = 0xa005;
1527 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1528 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1529 		if (inp) {
1530 			SCTP_INP_DECR_REF(inp);
1531 		}
1532 		if (stcb) {
1533 			atomic_add_int(&stcb->asoc.refcnt, -1);
1534 		}
1535 		return;
1536 	}
1537 	tmr->stopped_from = 0xa006;
1538 
1539 	if (stcb) {
1540 		SCTP_TCB_LOCK(stcb);
1541 		atomic_add_int(&stcb->asoc.refcnt, -1);
1542 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1543 		    ((stcb->asoc.state == 0) ||
1544 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1545 			SCTP_TCB_UNLOCK(stcb);
1546 			if (inp) {
1547 				SCTP_INP_DECR_REF(inp);
1548 			}
1549 			return;
1550 		}
1551 	}
1552 	/* record in stopped what t-o occured */
1553 	tmr->stopped_from = tmr->type;
1554 
1555 	/* mark as being serviced now */
1556 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1557 		/*
1558 		 * Callout has been rescheduled.
1559 		 */
1560 		goto get_out;
1561 	}
1562 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1563 		/*
1564 		 * Not active, so no action.
1565 		 */
1566 		goto get_out;
1567 	}
1568 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1569 
1570 	/* call the handler for the appropriate timer type */
1571 	switch (tmr->type) {
1572 	case SCTP_TIMER_TYPE_ZERO_COPY:
1573 		if (inp == NULL) {
1574 			break;
1575 		}
1576 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1577 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1578 		}
1579 		break;
1580 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1581 		if (inp == NULL) {
1582 			break;
1583 		}
1584 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1585 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1586 		}
1587 		break;
1588 	case SCTP_TIMER_TYPE_ADDR_WQ:
1589 		sctp_handle_addr_wq();
1590 		break;
1591 	case SCTP_TIMER_TYPE_ITERATOR:
1592 		SCTP_STAT_INCR(sctps_timoiterator);
1593 		sctp_iterator_timer(it);
1594 		break;
1595 	case SCTP_TIMER_TYPE_SEND:
1596 		if ((stcb == NULL) || (inp == NULL)) {
1597 			break;
1598 		}
1599 		SCTP_STAT_INCR(sctps_timodata);
1600 		stcb->asoc.timodata++;
1601 		stcb->asoc.num_send_timers_up--;
1602 		if (stcb->asoc.num_send_timers_up < 0) {
1603 			stcb->asoc.num_send_timers_up = 0;
1604 		}
1605 		SCTP_TCB_LOCK_ASSERT(stcb);
1606 		cur_oerr = stcb->asoc.overall_error_count;
1607 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1608 		if (retcode) {
1609 			/* no need to unlock on tcb its gone */
1610 
1611 			goto out_decr;
1612 		}
1613 		SCTP_TCB_LOCK_ASSERT(stcb);
1614 #ifdef SCTP_AUDITING_ENABLED
1615 		sctp_auditing(4, inp, stcb, net);
1616 #endif
1617 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1618 		if ((stcb->asoc.num_send_timers_up == 0) &&
1619 		    (stcb->asoc.sent_queue_cnt > 0)
1620 		    ) {
1621 			struct sctp_tmit_chunk *chk;
1622 
1623 			/*
1624 			 * safeguard. If there on some on the sent queue
1625 			 * somewhere but no timers running something is
1626 			 * wrong... so we start a timer on the first chunk
1627 			 * on the send queue on whatever net it is sent to.
1628 			 */
1629 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1630 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1631 			    chk->whoTo);
1632 		}
1633 		break;
1634 	case SCTP_TIMER_TYPE_INIT:
1635 		if ((stcb == NULL) || (inp == NULL)) {
1636 			break;
1637 		}
1638 		SCTP_STAT_INCR(sctps_timoinit);
1639 		stcb->asoc.timoinit++;
1640 		if (sctp_t1init_timer(inp, stcb, net)) {
1641 			/* no need to unlock on tcb its gone */
1642 			goto out_decr;
1643 		}
1644 		/* We do output but not here */
1645 		did_output = 0;
1646 		break;
1647 	case SCTP_TIMER_TYPE_RECV:
1648 		if ((stcb == NULL) || (inp == NULL)) {
1649 			break;
1650 		} {
1651 			int abort_flag;
1652 
1653 			SCTP_STAT_INCR(sctps_timosack);
1654 			stcb->asoc.timosack++;
1655 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1656 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1657 
1658 			/*
1659 			 * EY if nr_sacks used then send an nr-sack , a sack
1660 			 * otherwise
1661 			 */
1662 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
1663 				sctp_send_nr_sack(stcb);
1664 			else
1665 				sctp_send_sack(stcb);
1666 		}
1667 #ifdef SCTP_AUDITING_ENABLED
1668 		sctp_auditing(4, inp, stcb, net);
1669 #endif
1670 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1671 		break;
1672 	case SCTP_TIMER_TYPE_SHUTDOWN:
1673 		if ((stcb == NULL) || (inp == NULL)) {
1674 			break;
1675 		}
1676 		if (sctp_shutdown_timer(inp, stcb, net)) {
1677 			/* no need to unlock on tcb its gone */
1678 			goto out_decr;
1679 		}
1680 		SCTP_STAT_INCR(sctps_timoshutdown);
1681 		stcb->asoc.timoshutdown++;
1682 #ifdef SCTP_AUDITING_ENABLED
1683 		sctp_auditing(4, inp, stcb, net);
1684 #endif
1685 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1686 		break;
1687 	case SCTP_TIMER_TYPE_HEARTBEAT:
1688 		{
1689 			struct sctp_nets *lnet;
1690 			int cnt_of_unconf = 0;
1691 
1692 			if ((stcb == NULL) || (inp == NULL)) {
1693 				break;
1694 			}
1695 			SCTP_STAT_INCR(sctps_timoheartbeat);
1696 			stcb->asoc.timoheartbeat++;
1697 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1698 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1699 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1700 					cnt_of_unconf++;
1701 				}
1702 			}
1703 			if (cnt_of_unconf == 0) {
1704 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1705 				    cnt_of_unconf)) {
1706 					/* no need to unlock on tcb its gone */
1707 					goto out_decr;
1708 				}
1709 			}
1710 #ifdef SCTP_AUDITING_ENABLED
1711 			sctp_auditing(4, inp, stcb, lnet);
1712 #endif
1713 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1714 			    stcb->sctp_ep, stcb, lnet);
1715 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1716 		}
1717 		break;
1718 	case SCTP_TIMER_TYPE_COOKIE:
1719 		if ((stcb == NULL) || (inp == NULL)) {
1720 			break;
1721 		}
1722 		if (sctp_cookie_timer(inp, stcb, net)) {
1723 			/* no need to unlock on tcb its gone */
1724 			goto out_decr;
1725 		}
1726 		SCTP_STAT_INCR(sctps_timocookie);
1727 		stcb->asoc.timocookie++;
1728 #ifdef SCTP_AUDITING_ENABLED
1729 		sctp_auditing(4, inp, stcb, net);
1730 #endif
1731 		/*
1732 		 * We consider T3 and Cookie timer pretty much the same with
1733 		 * respect to where from in chunk_output.
1734 		 */
1735 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1736 		break;
1737 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1738 		{
1739 			struct timeval tv;
1740 			int i, secret;
1741 
1742 			if (inp == NULL) {
1743 				break;
1744 			}
1745 			SCTP_STAT_INCR(sctps_timosecret);
1746 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1747 			SCTP_INP_WLOCK(inp);
1748 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1749 			inp->sctp_ep.last_secret_number =
1750 			    inp->sctp_ep.current_secret_number;
1751 			inp->sctp_ep.current_secret_number++;
1752 			if (inp->sctp_ep.current_secret_number >=
1753 			    SCTP_HOW_MANY_SECRETS) {
1754 				inp->sctp_ep.current_secret_number = 0;
1755 			}
1756 			secret = (int)inp->sctp_ep.current_secret_number;
1757 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1758 				inp->sctp_ep.secret_key[secret][i] =
1759 				    sctp_select_initial_TSN(&inp->sctp_ep);
1760 			}
1761 			SCTP_INP_WUNLOCK(inp);
1762 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1763 		}
1764 		did_output = 0;
1765 		break;
1766 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1767 		if ((stcb == NULL) || (inp == NULL)) {
1768 			break;
1769 		}
1770 		SCTP_STAT_INCR(sctps_timopathmtu);
1771 		sctp_pathmtu_timer(inp, stcb, net);
1772 		did_output = 0;
1773 		break;
1774 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1775 		if ((stcb == NULL) || (inp == NULL)) {
1776 			break;
1777 		}
1778 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1779 			/* no need to unlock on tcb its gone */
1780 			goto out_decr;
1781 		}
1782 		SCTP_STAT_INCR(sctps_timoshutdownack);
1783 		stcb->asoc.timoshutdownack++;
1784 #ifdef SCTP_AUDITING_ENABLED
1785 		sctp_auditing(4, inp, stcb, net);
1786 #endif
1787 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1788 		break;
1789 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1790 		if ((stcb == NULL) || (inp == NULL)) {
1791 			break;
1792 		}
1793 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1794 		sctp_abort_an_association(inp, stcb,
1795 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1796 		/* no need to unlock on tcb its gone */
1797 		goto out_decr;
1798 
1799 	case SCTP_TIMER_TYPE_STRRESET:
1800 		if ((stcb == NULL) || (inp == NULL)) {
1801 			break;
1802 		}
1803 		if (sctp_strreset_timer(inp, stcb, net)) {
1804 			/* no need to unlock on tcb its gone */
1805 			goto out_decr;
1806 		}
1807 		SCTP_STAT_INCR(sctps_timostrmrst);
1808 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1809 		break;
1810 	case SCTP_TIMER_TYPE_EARLYFR:
1811 		/* Need to do FR of things for net */
1812 		if ((stcb == NULL) || (inp == NULL)) {
1813 			break;
1814 		}
1815 		SCTP_STAT_INCR(sctps_timoearlyfr);
1816 		sctp_early_fr_timer(inp, stcb, net);
1817 		break;
1818 	case SCTP_TIMER_TYPE_ASCONF:
1819 		if ((stcb == NULL) || (inp == NULL)) {
1820 			break;
1821 		}
1822 		if (sctp_asconf_timer(inp, stcb, net)) {
1823 			/* no need to unlock on tcb its gone */
1824 			goto out_decr;
1825 		}
1826 		SCTP_STAT_INCR(sctps_timoasconf);
1827 #ifdef SCTP_AUDITING_ENABLED
1828 		sctp_auditing(4, inp, stcb, net);
1829 #endif
1830 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1831 		break;
1832 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1833 		if ((stcb == NULL) || (inp == NULL)) {
1834 			break;
1835 		}
1836 		sctp_delete_prim_timer(inp, stcb, net);
1837 		SCTP_STAT_INCR(sctps_timodelprim);
1838 		break;
1839 
1840 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1841 		if ((stcb == NULL) || (inp == NULL)) {
1842 			break;
1843 		}
1844 		SCTP_STAT_INCR(sctps_timoautoclose);
1845 		sctp_autoclose_timer(inp, stcb, net);
1846 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1847 		did_output = 0;
1848 		break;
1849 	case SCTP_TIMER_TYPE_ASOCKILL:
1850 		if ((stcb == NULL) || (inp == NULL)) {
1851 			break;
1852 		}
1853 		SCTP_STAT_INCR(sctps_timoassockill);
1854 		/* Can we free it yet? */
1855 		SCTP_INP_DECR_REF(inp);
1856 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1857 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1858 		so = SCTP_INP_SO(inp);
1859 		atomic_add_int(&stcb->asoc.refcnt, 1);
1860 		SCTP_TCB_UNLOCK(stcb);
1861 		SCTP_SOCKET_LOCK(so, 1);
1862 		SCTP_TCB_LOCK(stcb);
1863 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1864 #endif
1865 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1866 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1867 		SCTP_SOCKET_UNLOCK(so, 1);
1868 #endif
1869 		/*
1870 		 * free asoc, always unlocks (or destroy's) so prevent
1871 		 * duplicate unlock or unlock of a free mtx :-0
1872 		 */
1873 		stcb = NULL;
1874 		goto out_no_decr;
1875 	case SCTP_TIMER_TYPE_INPKILL:
1876 		SCTP_STAT_INCR(sctps_timoinpkill);
1877 		if (inp == NULL) {
1878 			break;
1879 		}
1880 		/*
1881 		 * special case, take away our increment since WE are the
1882 		 * killer
1883 		 */
1884 		SCTP_INP_DECR_REF(inp);
1885 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1886 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1887 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1888 		inp = NULL;
1889 		goto out_no_decr;
1890 	default:
1891 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1892 		    tmr->type);
1893 		break;
1894 	};
1895 #ifdef SCTP_AUDITING_ENABLED
1896 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1897 	if (inp)
1898 		sctp_auditing(5, inp, stcb, net);
1899 #endif
1900 	if ((did_output) && stcb) {
1901 		/*
1902 		 * Now we need to clean up the control chunk chain if an
1903 		 * ECNE is on it. It must be marked as UNSENT again so next
1904 		 * call will continue to send it until such time that we get
1905 		 * a CWR, to remove it. It is, however, less likely that we
1906 		 * will find a ecn echo on the chain though.
1907 		 */
1908 		sctp_fix_ecn_echo(&stcb->asoc);
1909 	}
1910 get_out:
1911 	if (stcb) {
1912 		SCTP_TCB_UNLOCK(stcb);
1913 	}
1914 out_decr:
1915 	if (inp) {
1916 		SCTP_INP_DECR_REF(inp);
1917 	}
1918 out_no_decr:
1919 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1920 	    type);
1921 }
1922 
1923 void
1924 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1925     struct sctp_nets *net)
1926 {
1927 	int to_ticks;
1928 	struct sctp_timer *tmr;
1929 
1930 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1931 		return;
1932 
1933 	to_ticks = 0;
1934 
1935 	tmr = NULL;
1936 	if (stcb) {
1937 		SCTP_TCB_LOCK_ASSERT(stcb);
1938 	}
1939 	switch (t_type) {
1940 	case SCTP_TIMER_TYPE_ZERO_COPY:
1941 		tmr = &inp->sctp_ep.zero_copy_timer;
1942 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1943 		break;
1944 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1945 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1946 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1947 		break;
1948 	case SCTP_TIMER_TYPE_ADDR_WQ:
1949 		/* Only 1 tick away :-) */
1950 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1951 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1952 		break;
1953 	case SCTP_TIMER_TYPE_ITERATOR:
1954 		{
1955 			struct sctp_iterator *it;
1956 
1957 			it = (struct sctp_iterator *)inp;
1958 			tmr = &it->tmr;
1959 			to_ticks = SCTP_ITERATOR_TICKS;
1960 		}
1961 		break;
1962 	case SCTP_TIMER_TYPE_SEND:
1963 		/* Here we use the RTO timer */
1964 		{
1965 			int rto_val;
1966 
1967 			if ((stcb == NULL) || (net == NULL)) {
1968 				return;
1969 			}
1970 			tmr = &net->rxt_timer;
1971 			if (net->RTO == 0) {
1972 				rto_val = stcb->asoc.initial_rto;
1973 			} else {
1974 				rto_val = net->RTO;
1975 			}
1976 			to_ticks = MSEC_TO_TICKS(rto_val);
1977 		}
1978 		break;
1979 	case SCTP_TIMER_TYPE_INIT:
1980 		/*
1981 		 * Here we use the INIT timer default usually about 1
1982 		 * minute.
1983 		 */
1984 		if ((stcb == NULL) || (net == NULL)) {
1985 			return;
1986 		}
1987 		tmr = &net->rxt_timer;
1988 		if (net->RTO == 0) {
1989 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1990 		} else {
1991 			to_ticks = MSEC_TO_TICKS(net->RTO);
1992 		}
1993 		break;
1994 	case SCTP_TIMER_TYPE_RECV:
1995 		/*
1996 		 * Here we use the Delayed-Ack timer value from the inp
1997 		 * ususually about 200ms.
1998 		 */
1999 		if (stcb == NULL) {
2000 			return;
2001 		}
2002 		tmr = &stcb->asoc.dack_timer;
2003 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2004 		break;
2005 	case SCTP_TIMER_TYPE_SHUTDOWN:
2006 		/* Here we use the RTO of the destination. */
2007 		if ((stcb == NULL) || (net == NULL)) {
2008 			return;
2009 		}
2010 		if (net->RTO == 0) {
2011 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2012 		} else {
2013 			to_ticks = MSEC_TO_TICKS(net->RTO);
2014 		}
2015 		tmr = &net->rxt_timer;
2016 		break;
2017 	case SCTP_TIMER_TYPE_HEARTBEAT:
2018 		/*
2019 		 * the net is used here so that we can add in the RTO. Even
2020 		 * though we use a different timer. We also add the HB timer
2021 		 * PLUS a random jitter.
2022 		 */
2023 		if ((inp == NULL) || (stcb == NULL)) {
2024 			return;
2025 		} else {
2026 			uint32_t rndval;
2027 			uint8_t this_random;
2028 			int cnt_of_unconf = 0;
2029 			struct sctp_nets *lnet;
2030 
2031 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2032 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2033 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2034 					cnt_of_unconf++;
2035 				}
2036 			}
2037 			if (cnt_of_unconf) {
2038 				net = lnet = NULL;
2039 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2040 			}
2041 			if (stcb->asoc.hb_random_idx > 3) {
2042 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2043 				memcpy(stcb->asoc.hb_random_values, &rndval,
2044 				    sizeof(stcb->asoc.hb_random_values));
2045 				stcb->asoc.hb_random_idx = 0;
2046 			}
2047 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2048 			stcb->asoc.hb_random_idx++;
2049 			stcb->asoc.hb_ect_randombit = 0;
2050 			/*
2051 			 * this_random will be 0 - 256 ms RTO is in ms.
2052 			 */
2053 			if ((stcb->asoc.hb_is_disabled) &&
2054 			    (cnt_of_unconf == 0)) {
2055 				return;
2056 			}
2057 			if (net) {
2058 				int delay;
2059 
2060 				delay = stcb->asoc.heart_beat_delay;
2061 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2062 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2063 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2064 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2065 						delay = 0;
2066 					}
2067 				}
2068 				if (net->RTO == 0) {
2069 					/* Never been checked */
2070 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2071 				} else {
2072 					/* set rto_val to the ms */
2073 					to_ticks = delay + net->RTO + this_random;
2074 				}
2075 			} else {
2076 				if (cnt_of_unconf) {
2077 					to_ticks = this_random + stcb->asoc.initial_rto;
2078 				} else {
2079 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2080 				}
2081 			}
2082 			/*
2083 			 * Now we must convert the to_ticks that are now in
2084 			 * ms to ticks.
2085 			 */
2086 			to_ticks = MSEC_TO_TICKS(to_ticks);
2087 			tmr = &stcb->asoc.hb_timer;
2088 		}
2089 		break;
2090 	case SCTP_TIMER_TYPE_COOKIE:
2091 		/*
2092 		 * Here we can use the RTO timer from the network since one
2093 		 * RTT was compelete. If a retran happened then we will be
2094 		 * using the RTO initial value.
2095 		 */
2096 		if ((stcb == NULL) || (net == NULL)) {
2097 			return;
2098 		}
2099 		if (net->RTO == 0) {
2100 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2101 		} else {
2102 			to_ticks = MSEC_TO_TICKS(net->RTO);
2103 		}
2104 		tmr = &net->rxt_timer;
2105 		break;
2106 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2107 		/*
2108 		 * nothing needed but the endpoint here ususually about 60
2109 		 * minutes.
2110 		 */
2111 		if (inp == NULL) {
2112 			return;
2113 		}
2114 		tmr = &inp->sctp_ep.signature_change;
2115 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2116 		break;
2117 	case SCTP_TIMER_TYPE_ASOCKILL:
2118 		if (stcb == NULL) {
2119 			return;
2120 		}
2121 		tmr = &stcb->asoc.strreset_timer;
2122 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2123 		break;
2124 	case SCTP_TIMER_TYPE_INPKILL:
2125 		/*
2126 		 * The inp is setup to die. We re-use the signature_chage
2127 		 * timer since that has stopped and we are in the GONE
2128 		 * state.
2129 		 */
2130 		if (inp == NULL) {
2131 			return;
2132 		}
2133 		tmr = &inp->sctp_ep.signature_change;
2134 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2135 		break;
2136 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2137 		/*
2138 		 * Here we use the value found in the EP for PMTU ususually
2139 		 * about 10 minutes.
2140 		 */
2141 		if ((stcb == NULL) || (inp == NULL)) {
2142 			return;
2143 		}
2144 		if (net == NULL) {
2145 			return;
2146 		}
2147 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2148 		tmr = &net->pmtu_timer;
2149 		break;
2150 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2151 		/* Here we use the RTO of the destination */
2152 		if ((stcb == NULL) || (net == NULL)) {
2153 			return;
2154 		}
2155 		if (net->RTO == 0) {
2156 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2157 		} else {
2158 			to_ticks = MSEC_TO_TICKS(net->RTO);
2159 		}
2160 		tmr = &net->rxt_timer;
2161 		break;
2162 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2163 		/*
2164 		 * Here we use the endpoints shutdown guard timer usually
2165 		 * about 3 minutes.
2166 		 */
2167 		if ((inp == NULL) || (stcb == NULL)) {
2168 			return;
2169 		}
2170 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2171 		tmr = &stcb->asoc.shut_guard_timer;
2172 		break;
2173 	case SCTP_TIMER_TYPE_STRRESET:
2174 		/*
2175 		 * Here the timer comes from the stcb but its value is from
2176 		 * the net's RTO.
2177 		 */
2178 		if ((stcb == NULL) || (net == NULL)) {
2179 			return;
2180 		}
2181 		if (net->RTO == 0) {
2182 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2183 		} else {
2184 			to_ticks = MSEC_TO_TICKS(net->RTO);
2185 		}
2186 		tmr = &stcb->asoc.strreset_timer;
2187 		break;
2188 
2189 	case SCTP_TIMER_TYPE_EARLYFR:
2190 		{
2191 			unsigned int msec;
2192 
2193 			if ((stcb == NULL) || (net == NULL)) {
2194 				return;
2195 			}
2196 			if (net->flight_size > net->cwnd) {
2197 				/* no need to start */
2198 				return;
2199 			}
2200 			SCTP_STAT_INCR(sctps_earlyfrstart);
2201 			if (net->lastsa == 0) {
2202 				/* Hmm no rtt estimate yet? */
2203 				msec = stcb->asoc.initial_rto >> 2;
2204 			} else {
2205 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2206 			}
2207 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2208 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2209 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2210 					msec = SCTP_MINFR_MSEC_FLOOR;
2211 				}
2212 			}
2213 			to_ticks = MSEC_TO_TICKS(msec);
2214 			tmr = &net->fr_timer;
2215 		}
2216 		break;
2217 	case SCTP_TIMER_TYPE_ASCONF:
2218 		/*
2219 		 * Here the timer comes from the stcb but its value is from
2220 		 * the net's RTO.
2221 		 */
2222 		if ((stcb == NULL) || (net == NULL)) {
2223 			return;
2224 		}
2225 		if (net->RTO == 0) {
2226 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2227 		} else {
2228 			to_ticks = MSEC_TO_TICKS(net->RTO);
2229 		}
2230 		tmr = &stcb->asoc.asconf_timer;
2231 		break;
2232 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2233 		if ((stcb == NULL) || (net != NULL)) {
2234 			return;
2235 		}
2236 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2237 		tmr = &stcb->asoc.delete_prim_timer;
2238 		break;
2239 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2240 		if (stcb == NULL) {
2241 			return;
2242 		}
2243 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2244 			/*
2245 			 * Really an error since stcb is NOT set to
2246 			 * autoclose
2247 			 */
2248 			return;
2249 		}
2250 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2251 		tmr = &stcb->asoc.autoclose_timer;
2252 		break;
2253 	default:
2254 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2255 		    __FUNCTION__, t_type);
2256 		return;
2257 		break;
2258 	};
2259 	if ((to_ticks <= 0) || (tmr == NULL)) {
2260 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2261 		    __FUNCTION__, t_type, to_ticks, tmr);
2262 		return;
2263 	}
2264 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2265 		/*
2266 		 * we do NOT allow you to have it already running. if it is
2267 		 * we leave the current one up unchanged
2268 		 */
2269 		return;
2270 	}
2271 	/* At this point we can proceed */
2272 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2273 		stcb->asoc.num_send_timers_up++;
2274 	}
2275 	tmr->stopped_from = 0;
2276 	tmr->type = t_type;
2277 	tmr->ep = (void *)inp;
2278 	tmr->tcb = (void *)stcb;
2279 	tmr->net = (void *)net;
2280 	tmr->self = (void *)tmr;
2281 	tmr->ticks = sctp_get_tick_count();
2282 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2283 	return;
2284 }
2285 
2286 void
2287 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2288     struct sctp_nets *net, uint32_t from)
2289 {
2290 	struct sctp_timer *tmr;
2291 
2292 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2293 	    (inp == NULL))
2294 		return;
2295 
2296 	tmr = NULL;
2297 	if (stcb) {
2298 		SCTP_TCB_LOCK_ASSERT(stcb);
2299 	}
2300 	switch (t_type) {
2301 	case SCTP_TIMER_TYPE_ZERO_COPY:
2302 		tmr = &inp->sctp_ep.zero_copy_timer;
2303 		break;
2304 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2305 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2306 		break;
2307 	case SCTP_TIMER_TYPE_ADDR_WQ:
2308 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2309 		break;
2310 	case SCTP_TIMER_TYPE_EARLYFR:
2311 		if ((stcb == NULL) || (net == NULL)) {
2312 			return;
2313 		}
2314 		tmr = &net->fr_timer;
2315 		SCTP_STAT_INCR(sctps_earlyfrstop);
2316 		break;
2317 	case SCTP_TIMER_TYPE_ITERATOR:
2318 		{
2319 			struct sctp_iterator *it;
2320 
2321 			it = (struct sctp_iterator *)inp;
2322 			tmr = &it->tmr;
2323 		}
2324 		break;
2325 	case SCTP_TIMER_TYPE_SEND:
2326 		if ((stcb == NULL) || (net == NULL)) {
2327 			return;
2328 		}
2329 		tmr = &net->rxt_timer;
2330 		break;
2331 	case SCTP_TIMER_TYPE_INIT:
2332 		if ((stcb == NULL) || (net == NULL)) {
2333 			return;
2334 		}
2335 		tmr = &net->rxt_timer;
2336 		break;
2337 	case SCTP_TIMER_TYPE_RECV:
2338 		if (stcb == NULL) {
2339 			return;
2340 		}
2341 		tmr = &stcb->asoc.dack_timer;
2342 		break;
2343 	case SCTP_TIMER_TYPE_SHUTDOWN:
2344 		if ((stcb == NULL) || (net == NULL)) {
2345 			return;
2346 		}
2347 		tmr = &net->rxt_timer;
2348 		break;
2349 	case SCTP_TIMER_TYPE_HEARTBEAT:
2350 		if (stcb == NULL) {
2351 			return;
2352 		}
2353 		tmr = &stcb->asoc.hb_timer;
2354 		break;
2355 	case SCTP_TIMER_TYPE_COOKIE:
2356 		if ((stcb == NULL) || (net == NULL)) {
2357 			return;
2358 		}
2359 		tmr = &net->rxt_timer;
2360 		break;
2361 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2362 		/* nothing needed but the endpoint here */
2363 		tmr = &inp->sctp_ep.signature_change;
2364 		/*
2365 		 * We re-use the newcookie timer for the INP kill timer. We
2366 		 * must assure that we do not kill it by accident.
2367 		 */
2368 		break;
2369 	case SCTP_TIMER_TYPE_ASOCKILL:
2370 		/*
2371 		 * Stop the asoc kill timer.
2372 		 */
2373 		if (stcb == NULL) {
2374 			return;
2375 		}
2376 		tmr = &stcb->asoc.strreset_timer;
2377 		break;
2378 
2379 	case SCTP_TIMER_TYPE_INPKILL:
2380 		/*
2381 		 * The inp is setup to die. We re-use the signature_chage
2382 		 * timer since that has stopped and we are in the GONE
2383 		 * state.
2384 		 */
2385 		tmr = &inp->sctp_ep.signature_change;
2386 		break;
2387 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2388 		if ((stcb == NULL) || (net == NULL)) {
2389 			return;
2390 		}
2391 		tmr = &net->pmtu_timer;
2392 		break;
2393 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2394 		if ((stcb == NULL) || (net == NULL)) {
2395 			return;
2396 		}
2397 		tmr = &net->rxt_timer;
2398 		break;
2399 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2400 		if (stcb == NULL) {
2401 			return;
2402 		}
2403 		tmr = &stcb->asoc.shut_guard_timer;
2404 		break;
2405 	case SCTP_TIMER_TYPE_STRRESET:
2406 		if (stcb == NULL) {
2407 			return;
2408 		}
2409 		tmr = &stcb->asoc.strreset_timer;
2410 		break;
2411 	case SCTP_TIMER_TYPE_ASCONF:
2412 		if (stcb == NULL) {
2413 			return;
2414 		}
2415 		tmr = &stcb->asoc.asconf_timer;
2416 		break;
2417 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2418 		if (stcb == NULL) {
2419 			return;
2420 		}
2421 		tmr = &stcb->asoc.delete_prim_timer;
2422 		break;
2423 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2424 		if (stcb == NULL) {
2425 			return;
2426 		}
2427 		tmr = &stcb->asoc.autoclose_timer;
2428 		break;
2429 	default:
2430 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2431 		    __FUNCTION__, t_type);
2432 		break;
2433 	};
2434 	if (tmr == NULL) {
2435 		return;
2436 	}
2437 	if ((tmr->type != t_type) && tmr->type) {
2438 		/*
2439 		 * Ok we have a timer that is under joint use. Cookie timer
2440 		 * per chance with the SEND timer. We therefore are NOT
2441 		 * running the timer that the caller wants stopped.  So just
2442 		 * return.
2443 		 */
2444 		return;
2445 	}
2446 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2447 		stcb->asoc.num_send_timers_up--;
2448 		if (stcb->asoc.num_send_timers_up < 0) {
2449 			stcb->asoc.num_send_timers_up = 0;
2450 		}
2451 	}
2452 	tmr->self = NULL;
2453 	tmr->stopped_from = from;
2454 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2455 	return;
2456 }
2457 
2458 uint32_t
2459 sctp_calculate_len(struct mbuf *m)
2460 {
2461 	uint32_t tlen = 0;
2462 	struct mbuf *at;
2463 
2464 	at = m;
2465 	while (at) {
2466 		tlen += SCTP_BUF_LEN(at);
2467 		at = SCTP_BUF_NEXT(at);
2468 	}
2469 	return (tlen);
2470 }
2471 
2472 void
2473 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2474     struct sctp_association *asoc, uint32_t mtu)
2475 {
2476 	/*
2477 	 * Reset the P-MTU size on this association, this involves changing
2478 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2479 	 * allow the DF flag to be cleared.
2480 	 */
2481 	struct sctp_tmit_chunk *chk;
2482 	unsigned int eff_mtu, ovh;
2483 
2484 #ifdef SCTP_PRINT_FOR_B_AND_M
2485 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2486 	    inp, asoc, mtu);
2487 #endif
2488 	asoc->smallest_mtu = mtu;
2489 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2490 		ovh = SCTP_MIN_OVERHEAD;
2491 	} else {
2492 		ovh = SCTP_MIN_V4_OVERHEAD;
2493 	}
2494 	eff_mtu = mtu - ovh;
2495 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2496 
2497 		if (chk->send_size > eff_mtu) {
2498 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2499 		}
2500 	}
2501 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2502 		if (chk->send_size > eff_mtu) {
2503 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2504 		}
2505 	}
2506 }
2507 
2508 
2509 /*
2510  * given an association and starting time of the current RTT period return
2511  * RTO in number of msecs net should point to the current network
2512  */
2513 uint32_t
2514 sctp_calculate_rto(struct sctp_tcb *stcb,
2515     struct sctp_association *asoc,
2516     struct sctp_nets *net,
2517     struct timeval *told,
2518     int safe)
2519 {
2520 	/*-
2521 	 * given an association and the starting time of the current RTT
2522 	 * period (in value1/value2) return RTO in number of msecs.
2523 	 */
2524 	int calc_time = 0;
2525 	int o_calctime;
2526 	uint32_t new_rto = 0;
2527 	int first_measure = 0;
2528 	struct timeval now, then, *old;
2529 
2530 	/* Copy it out for sparc64 */
2531 	if (safe == sctp_align_unsafe_makecopy) {
2532 		old = &then;
2533 		memcpy(&then, told, sizeof(struct timeval));
2534 	} else if (safe == sctp_align_safe_nocopy) {
2535 		old = told;
2536 	} else {
2537 		/* error */
2538 		SCTP_PRINTF("Huh, bad rto calc call\n");
2539 		return (0);
2540 	}
2541 	/************************/
2542 	/* 1. calculate new RTT */
2543 	/************************/
2544 	/* get the current time */
2545 	(void)SCTP_GETTIME_TIMEVAL(&now);
2546 	/* compute the RTT value */
2547 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2548 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2549 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2550 			calc_time += (((u_long)now.tv_usec -
2551 			    (u_long)old->tv_usec) / 1000);
2552 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2553 			/* Borrow 1,000ms from current calculation */
2554 			calc_time -= 1000;
2555 			/* Add in the slop over */
2556 			calc_time += ((int)now.tv_usec / 1000);
2557 			/* Add in the pre-second ms's */
2558 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2559 		}
2560 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2561 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2562 			calc_time = ((u_long)now.tv_usec -
2563 			    (u_long)old->tv_usec) / 1000;
2564 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2565 			/* impossible .. garbage in nothing out */
2566 			goto calc_rto;
2567 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2568 			/*
2569 			 * We have to have 1 usec :-D this must be the
2570 			 * loopback.
2571 			 */
2572 			calc_time = 1;
2573 		} else {
2574 			/* impossible .. garbage in nothing out */
2575 			goto calc_rto;
2576 		}
2577 	} else {
2578 		/* Clock wrapped? */
2579 		goto calc_rto;
2580 	}
2581 	/***************************/
2582 	/* 2. update RTTVAR & SRTT */
2583 	/***************************/
2584 	net->rtt = o_calctime = calc_time;
2585 	/* this is Van Jacobson's integer version */
2586 	if (net->RTO_measured) {
2587 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2588 								 * shift=3 */
2589 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2590 			rto_logging(net, SCTP_LOG_RTTVAR);
2591 		}
2592 		net->prev_rtt = o_calctime;
2593 		net->lastsa += calc_time;	/* add 7/8th into sa when
2594 						 * shift=3 */
2595 		if (calc_time < 0) {
2596 			calc_time = -calc_time;
2597 		}
2598 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2599 									 * VAR shift=2 */
2600 		net->lastsv += calc_time;
2601 		if (net->lastsv == 0) {
2602 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2603 		}
2604 	} else {
2605 		/* First RTO measurment */
2606 		net->RTO_measured = 1;
2607 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2608 								 * shift=3 */
2609 		net->lastsv = calc_time;
2610 		if (net->lastsv == 0) {
2611 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2612 		}
2613 		first_measure = 1;
2614 		net->prev_rtt = o_calctime;
2615 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2616 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2617 		}
2618 	}
2619 calc_rto:
2620 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2621 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2622 	    (stcb->asoc.sat_network_lockout == 0)) {
2623 		stcb->asoc.sat_network = 1;
2624 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2625 		stcb->asoc.sat_network = 0;
2626 		stcb->asoc.sat_network_lockout = 1;
2627 	}
2628 	/* bound it, per C6/C7 in Section 5.3.1 */
2629 	if (new_rto < stcb->asoc.minrto) {
2630 		new_rto = stcb->asoc.minrto;
2631 	}
2632 	if (new_rto > stcb->asoc.maxrto) {
2633 		new_rto = stcb->asoc.maxrto;
2634 	}
2635 	/* we are now returning the RTO */
2636 	return (new_rto);
2637 }
2638 
2639 /*
2640  * return a pointer to a contiguous piece of data from the given mbuf chain
2641  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2642  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2643  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2644  */
2645 caddr_t
2646 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2647 {
2648 	uint32_t count;
2649 	uint8_t *ptr;
2650 
2651 	ptr = in_ptr;
2652 	if ((off < 0) || (len <= 0))
2653 		return (NULL);
2654 
2655 	/* find the desired start location */
2656 	while ((m != NULL) && (off > 0)) {
2657 		if (off < SCTP_BUF_LEN(m))
2658 			break;
2659 		off -= SCTP_BUF_LEN(m);
2660 		m = SCTP_BUF_NEXT(m);
2661 	}
2662 	if (m == NULL)
2663 		return (NULL);
2664 
2665 	/* is the current mbuf large enough (eg. contiguous)? */
2666 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2667 		return (mtod(m, caddr_t)+off);
2668 	} else {
2669 		/* else, it spans more than one mbuf, so save a temp copy... */
2670 		while ((m != NULL) && (len > 0)) {
2671 			count = min(SCTP_BUF_LEN(m) - off, len);
2672 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2673 			len -= count;
2674 			ptr += count;
2675 			off = 0;
2676 			m = SCTP_BUF_NEXT(m);
2677 		}
2678 		if ((m == NULL) && (len > 0))
2679 			return (NULL);
2680 		else
2681 			return ((caddr_t)in_ptr);
2682 	}
2683 }
2684 
2685 
2686 
2687 struct sctp_paramhdr *
2688 sctp_get_next_param(struct mbuf *m,
2689     int offset,
2690     struct sctp_paramhdr *pull,
2691     int pull_limit)
2692 {
2693 	/* This just provides a typed signature to Peter's Pull routine */
2694 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2695 	    (uint8_t *) pull));
2696 }
2697 
2698 
2699 int
2700 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2701 {
2702 	/*
2703 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2704 	 * padlen is > 3 this routine will fail.
2705 	 */
2706 	uint8_t *dp;
2707 	int i;
2708 
2709 	if (padlen > 3) {
2710 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2711 		return (ENOBUFS);
2712 	}
2713 	if (padlen <= M_TRAILINGSPACE(m)) {
2714 		/*
2715 		 * The easy way. We hope the majority of the time we hit
2716 		 * here :)
2717 		 */
2718 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2719 		SCTP_BUF_LEN(m) += padlen;
2720 	} else {
2721 		/* Hard way we must grow the mbuf */
2722 		struct mbuf *tmp;
2723 
2724 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2725 		if (tmp == NULL) {
2726 			/* Out of space GAK! we are in big trouble. */
2727 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2728 			return (ENOSPC);
2729 		}
2730 		/* setup and insert in middle */
2731 		SCTP_BUF_LEN(tmp) = padlen;
2732 		SCTP_BUF_NEXT(tmp) = NULL;
2733 		SCTP_BUF_NEXT(m) = tmp;
2734 		dp = mtod(tmp, uint8_t *);
2735 	}
2736 	/* zero out the pad */
2737 	for (i = 0; i < padlen; i++) {
2738 		*dp = 0;
2739 		dp++;
2740 	}
2741 	return (0);
2742 }
2743 
2744 int
2745 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2746 {
2747 	/* find the last mbuf in chain and pad it */
2748 	struct mbuf *m_at;
2749 
2750 	m_at = m;
2751 	if (last_mbuf) {
2752 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2753 	} else {
2754 		while (m_at) {
2755 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2756 				return (sctp_add_pad_tombuf(m_at, padval));
2757 			}
2758 			m_at = SCTP_BUF_NEXT(m_at);
2759 		}
2760 	}
2761 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2762 	return (EFAULT);
2763 }
2764 
2765 int sctp_asoc_change_wake = 0;
2766 
2767 static void
2768 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2769     uint32_t error, void *data, int so_locked
2770 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2771     SCTP_UNUSED
2772 #endif
2773 )
2774 {
2775 	struct mbuf *m_notify;
2776 	struct sctp_assoc_change *sac;
2777 	struct sctp_queued_to_read *control;
2778 
2779 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2780 	struct socket *so;
2781 
2782 #endif
2783 
2784 	/*
2785 	 * For TCP model AND UDP connected sockets we will send an error up
2786 	 * when an ABORT comes in.
2787 	 */
2788 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2789 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2790 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2791 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2792 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2793 			stcb->sctp_socket->so_error = ECONNREFUSED;
2794 		} else {
2795 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2796 			stcb->sctp_socket->so_error = ECONNRESET;
2797 		}
2798 		/* Wake ANY sleepers */
2799 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2800 		so = SCTP_INP_SO(stcb->sctp_ep);
2801 		if (!so_locked) {
2802 			atomic_add_int(&stcb->asoc.refcnt, 1);
2803 			SCTP_TCB_UNLOCK(stcb);
2804 			SCTP_SOCKET_LOCK(so, 1);
2805 			SCTP_TCB_LOCK(stcb);
2806 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2807 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2808 				SCTP_SOCKET_UNLOCK(so, 1);
2809 				return;
2810 			}
2811 		}
2812 #endif
2813 		sorwakeup(stcb->sctp_socket);
2814 		sowwakeup(stcb->sctp_socket);
2815 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2816 		if (!so_locked) {
2817 			SCTP_SOCKET_UNLOCK(so, 1);
2818 		}
2819 #endif
2820 		sctp_asoc_change_wake++;
2821 	}
2822 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2823 		/* event not enabled */
2824 		return;
2825 	}
2826 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2827 	if (m_notify == NULL)
2828 		/* no space left */
2829 		return;
2830 	SCTP_BUF_LEN(m_notify) = 0;
2831 
2832 	sac = mtod(m_notify, struct sctp_assoc_change *);
2833 	sac->sac_type = SCTP_ASSOC_CHANGE;
2834 	sac->sac_flags = 0;
2835 	sac->sac_length = sizeof(struct sctp_assoc_change);
2836 	sac->sac_state = event;
2837 	sac->sac_error = error;
2838 	/* XXX verify these stream counts */
2839 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2840 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2841 	sac->sac_assoc_id = sctp_get_associd(stcb);
2842 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2843 	SCTP_BUF_NEXT(m_notify) = NULL;
2844 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2845 	    0, 0, 0, 0, 0, 0,
2846 	    m_notify);
2847 	if (control == NULL) {
2848 		/* no memory */
2849 		sctp_m_freem(m_notify);
2850 		return;
2851 	}
2852 	control->length = SCTP_BUF_LEN(m_notify);
2853 	/* not that we need this */
2854 	control->tail_mbuf = m_notify;
2855 	control->spec_flags = M_NOTIFICATION;
2856 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2857 	    control,
2858 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
2859 	if (event == SCTP_COMM_LOST) {
2860 		/* Wake up any sleeper */
2861 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2862 		so = SCTP_INP_SO(stcb->sctp_ep);
2863 		if (!so_locked) {
2864 			atomic_add_int(&stcb->asoc.refcnt, 1);
2865 			SCTP_TCB_UNLOCK(stcb);
2866 			SCTP_SOCKET_LOCK(so, 1);
2867 			SCTP_TCB_LOCK(stcb);
2868 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2869 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2870 				SCTP_SOCKET_UNLOCK(so, 1);
2871 				return;
2872 			}
2873 		}
2874 #endif
2875 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2876 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2877 		if (!so_locked) {
2878 			SCTP_SOCKET_UNLOCK(so, 1);
2879 		}
2880 #endif
2881 	}
2882 }
2883 
2884 static void
2885 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2886     struct sockaddr *sa, uint32_t error)
2887 {
2888 	struct mbuf *m_notify;
2889 	struct sctp_paddr_change *spc;
2890 	struct sctp_queued_to_read *control;
2891 
2892 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2893 		/* event not enabled */
2894 		return;
2895 	}
2896 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2897 	if (m_notify == NULL)
2898 		return;
2899 	SCTP_BUF_LEN(m_notify) = 0;
2900 	spc = mtod(m_notify, struct sctp_paddr_change *);
2901 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2902 	spc->spc_flags = 0;
2903 	spc->spc_length = sizeof(struct sctp_paddr_change);
2904 	switch (sa->sa_family) {
2905 	case AF_INET:
2906 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2907 		break;
2908 #ifdef INET6
2909 	case AF_INET6:
2910 		{
2911 			struct sockaddr_in6 *sin6;
2912 
2913 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2914 
2915 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2916 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2917 				if (sin6->sin6_scope_id == 0) {
2918 					/* recover scope_id for user */
2919 					(void)sa6_recoverscope(sin6);
2920 				} else {
2921 					/* clear embedded scope_id for user */
2922 					in6_clearscope(&sin6->sin6_addr);
2923 				}
2924 			}
2925 			break;
2926 		}
2927 #endif
2928 	default:
2929 		/* TSNH */
2930 		break;
2931 	}
2932 	spc->spc_state = state;
2933 	spc->spc_error = error;
2934 	spc->spc_assoc_id = sctp_get_associd(stcb);
2935 
2936 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2937 	SCTP_BUF_NEXT(m_notify) = NULL;
2938 
2939 	/* append to socket */
2940 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2941 	    0, 0, 0, 0, 0, 0,
2942 	    m_notify);
2943 	if (control == NULL) {
2944 		/* no memory */
2945 		sctp_m_freem(m_notify);
2946 		return;
2947 	}
2948 	control->length = SCTP_BUF_LEN(m_notify);
2949 	control->spec_flags = M_NOTIFICATION;
2950 	/* not that we need this */
2951 	control->tail_mbuf = m_notify;
2952 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2953 	    control,
2954 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
2955 }
2956 
2957 
2958 static void
2959 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2960     struct sctp_tmit_chunk *chk, int so_locked
2961 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2962     SCTP_UNUSED
2963 #endif
2964 )
2965 {
2966 	struct mbuf *m_notify;
2967 	struct sctp_send_failed *ssf;
2968 	struct sctp_queued_to_read *control;
2969 	int length;
2970 
2971 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2972 		/* event not enabled */
2973 		return;
2974 	}
2975 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2976 	if (m_notify == NULL)
2977 		/* no space left */
2978 		return;
2979 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2980 	length -= sizeof(struct sctp_data_chunk);
2981 	SCTP_BUF_LEN(m_notify) = 0;
2982 	ssf = mtod(m_notify, struct sctp_send_failed *);
2983 	ssf->ssf_type = SCTP_SEND_FAILED;
2984 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2985 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2986 	else
2987 		ssf->ssf_flags = SCTP_DATA_SENT;
2988 	ssf->ssf_length = length;
2989 	ssf->ssf_error = error;
2990 	/* not exactly what the user sent in, but should be close :) */
2991 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2992 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2993 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2994 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2995 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2996 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2997 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2998 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2999 
3000 	SCTP_BUF_NEXT(m_notify) = chk->data;
3001 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3002 	if (chk->data) {
3003 		/*
3004 		 * trim off the sctp chunk header(it should be there)
3005 		 */
3006 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3007 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3008 			sctp_mbuf_crush(chk->data);
3009 			chk->send_size -= sizeof(struct sctp_data_chunk);
3010 		}
3011 	}
3012 	/* Steal off the mbuf */
3013 	chk->data = NULL;
3014 	/*
3015 	 * For this case, we check the actual socket buffer, since the assoc
3016 	 * is going away we don't want to overfill the socket buffer for a
3017 	 * non-reader
3018 	 */
3019 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3020 		sctp_m_freem(m_notify);
3021 		return;
3022 	}
3023 	/* append to socket */
3024 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3025 	    0, 0, 0, 0, 0, 0,
3026 	    m_notify);
3027 	if (control == NULL) {
3028 		/* no memory */
3029 		sctp_m_freem(m_notify);
3030 		return;
3031 	}
3032 	control->spec_flags = M_NOTIFICATION;
3033 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3034 	    control,
3035 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3036 }
3037 
3038 
3039 static void
3040 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3041     struct sctp_stream_queue_pending *sp, int so_locked
3042 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3043     SCTP_UNUSED
3044 #endif
3045 )
3046 {
3047 	struct mbuf *m_notify;
3048 	struct sctp_send_failed *ssf;
3049 	struct sctp_queued_to_read *control;
3050 	int length;
3051 
3052 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3053 		/* event not enabled */
3054 		return;
3055 	}
3056 	length = sizeof(struct sctp_send_failed) + sp->length;
3057 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3058 	if (m_notify == NULL)
3059 		/* no space left */
3060 		return;
3061 	SCTP_BUF_LEN(m_notify) = 0;
3062 	ssf = mtod(m_notify, struct sctp_send_failed *);
3063 	ssf->ssf_type = SCTP_SEND_FAILED;
3064 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3065 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3066 	else
3067 		ssf->ssf_flags = SCTP_DATA_SENT;
3068 	ssf->ssf_length = length;
3069 	ssf->ssf_error = error;
3070 	/* not exactly what the user sent in, but should be close :) */
3071 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3072 	ssf->ssf_info.sinfo_stream = sp->stream;
3073 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3074 	if (sp->some_taken) {
3075 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3076 	} else {
3077 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3078 	}
3079 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3080 	ssf->ssf_info.sinfo_context = sp->context;
3081 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3082 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3083 	SCTP_BUF_NEXT(m_notify) = sp->data;
3084 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3085 
3086 	/* Steal off the mbuf */
3087 	sp->data = NULL;
3088 	/*
3089 	 * For this case, we check the actual socket buffer, since the assoc
3090 	 * is going away we don't want to overfill the socket buffer for a
3091 	 * non-reader
3092 	 */
3093 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3094 		sctp_m_freem(m_notify);
3095 		return;
3096 	}
3097 	/* append to socket */
3098 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3099 	    0, 0, 0, 0, 0, 0,
3100 	    m_notify);
3101 	if (control == NULL) {
3102 		/* no memory */
3103 		sctp_m_freem(m_notify);
3104 		return;
3105 	}
3106 	control->spec_flags = M_NOTIFICATION;
3107 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3108 	    control,
3109 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3110 }
3111 
3112 
3113 
3114 static void
3115 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3116     uint32_t error)
3117 {
3118 	struct mbuf *m_notify;
3119 	struct sctp_adaptation_event *sai;
3120 	struct sctp_queued_to_read *control;
3121 
3122 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3123 		/* event not enabled */
3124 		return;
3125 	}
3126 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3127 	if (m_notify == NULL)
3128 		/* no space left */
3129 		return;
3130 	SCTP_BUF_LEN(m_notify) = 0;
3131 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3132 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3133 	sai->sai_flags = 0;
3134 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3135 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3136 	sai->sai_assoc_id = sctp_get_associd(stcb);
3137 
3138 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3139 	SCTP_BUF_NEXT(m_notify) = NULL;
3140 
3141 	/* append to socket */
3142 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3143 	    0, 0, 0, 0, 0, 0,
3144 	    m_notify);
3145 	if (control == NULL) {
3146 		/* no memory */
3147 		sctp_m_freem(m_notify);
3148 		return;
3149 	}
3150 	control->length = SCTP_BUF_LEN(m_notify);
3151 	control->spec_flags = M_NOTIFICATION;
3152 	/* not that we need this */
3153 	control->tail_mbuf = m_notify;
3154 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3155 	    control,
3156 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3157 }
3158 
3159 /* This always must be called with the read-queue LOCKED in the INP */
3160 void
3161 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3162     int nolock, uint32_t val)
3163 {
3164 	struct mbuf *m_notify;
3165 	struct sctp_pdapi_event *pdapi;
3166 	struct sctp_queued_to_read *control;
3167 	struct sockbuf *sb;
3168 
3169 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3170 		/* event not enabled */
3171 		return;
3172 	}
3173 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3174 	if (m_notify == NULL)
3175 		/* no space left */
3176 		return;
3177 	SCTP_BUF_LEN(m_notify) = 0;
3178 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3179 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3180 	pdapi->pdapi_flags = 0;
3181 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3182 	pdapi->pdapi_indication = error;
3183 	pdapi->pdapi_stream = (val >> 16);
3184 	pdapi->pdapi_seq = (val & 0x0000ffff);
3185 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3186 
3187 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3188 	SCTP_BUF_NEXT(m_notify) = NULL;
3189 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3190 	    0, 0, 0, 0, 0, 0,
3191 	    m_notify);
3192 	if (control == NULL) {
3193 		/* no memory */
3194 		sctp_m_freem(m_notify);
3195 		return;
3196 	}
3197 	control->spec_flags = M_NOTIFICATION;
3198 	control->length = SCTP_BUF_LEN(m_notify);
3199 	/* not that we need this */
3200 	control->tail_mbuf = m_notify;
3201 	control->held_length = 0;
3202 	control->length = 0;
3203 	if (nolock == 0) {
3204 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
3205 	}
3206 	sb = &stcb->sctp_socket->so_rcv;
3207 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3208 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3209 	}
3210 	sctp_sballoc(stcb, sb, m_notify);
3211 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3212 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3213 	}
3214 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3215 	control->end_added = 1;
3216 	if (stcb->asoc.control_pdapi)
3217 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3218 	else {
3219 		/* we really should not see this case */
3220 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3221 	}
3222 	if (nolock == 0) {
3223 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
3224 	}
3225 	if (stcb->sctp_ep && stcb->sctp_socket) {
3226 		/* This should always be the case */
3227 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3228 	}
3229 }
3230 
3231 static void
3232 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3233 {
3234 	struct mbuf *m_notify;
3235 	struct sctp_shutdown_event *sse;
3236 	struct sctp_queued_to_read *control;
3237 
3238 	/*
3239 	 * For TCP model AND UDP connected sockets we will send an error up
3240 	 * when an SHUTDOWN completes
3241 	 */
3242 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3243 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3244 		/* mark socket closed for read/write and wakeup! */
3245 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3246 		struct socket *so;
3247 
3248 		so = SCTP_INP_SO(stcb->sctp_ep);
3249 		atomic_add_int(&stcb->asoc.refcnt, 1);
3250 		SCTP_TCB_UNLOCK(stcb);
3251 		SCTP_SOCKET_LOCK(so, 1);
3252 		SCTP_TCB_LOCK(stcb);
3253 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3254 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3255 			SCTP_SOCKET_UNLOCK(so, 1);
3256 			return;
3257 		}
3258 #endif
3259 		socantsendmore(stcb->sctp_socket);
3260 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3261 		SCTP_SOCKET_UNLOCK(so, 1);
3262 #endif
3263 	}
3264 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3265 		/* event not enabled */
3266 		return;
3267 	}
3268 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3269 	if (m_notify == NULL)
3270 		/* no space left */
3271 		return;
3272 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3273 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3274 	sse->sse_flags = 0;
3275 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3276 	sse->sse_assoc_id = sctp_get_associd(stcb);
3277 
3278 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3279 	SCTP_BUF_NEXT(m_notify) = NULL;
3280 
3281 	/* append to socket */
3282 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3283 	    0, 0, 0, 0, 0, 0,
3284 	    m_notify);
3285 	if (control == NULL) {
3286 		/* no memory */
3287 		sctp_m_freem(m_notify);
3288 		return;
3289 	}
3290 	control->spec_flags = M_NOTIFICATION;
3291 	control->length = SCTP_BUF_LEN(m_notify);
3292 	/* not that we need this */
3293 	control->tail_mbuf = m_notify;
3294 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3295 	    control,
3296 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3297 }
3298 
3299 static void
3300 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3301     int so_locked
3302 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3303     SCTP_UNUSED
3304 #endif
3305 )
3306 {
3307 	struct mbuf *m_notify;
3308 	struct sctp_sender_dry_event *event;
3309 	struct sctp_queued_to_read *control;
3310 
3311 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3312 		/* event not enabled */
3313 		return;
3314 	}
3315 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3316 	if (m_notify == NULL) {
3317 		/* no space left */
3318 		return;
3319 	}
3320 	SCTP_BUF_LEN(m_notify) = 0;
3321 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3322 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3323 	event->sender_dry_flags = 0;
3324 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3325 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3326 
3327 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3328 	SCTP_BUF_NEXT(m_notify) = NULL;
3329 
3330 	/* append to socket */
3331 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3332 	    0, 0, 0, 0, 0, 0, m_notify);
3333 	if (control == NULL) {
3334 		/* no memory */
3335 		sctp_m_freem(m_notify);
3336 		return;
3337 	}
3338 	control->length = SCTP_BUF_LEN(m_notify);
3339 	control->spec_flags = M_NOTIFICATION;
3340 	/* not that we need this */
3341 	control->tail_mbuf = m_notify;
3342 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3343 	    &stcb->sctp_socket->so_rcv, 1, so_locked);
3344 }
3345 
3346 
3347 static void
3348 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3349 {
3350 	struct mbuf *m_notify;
3351 	struct sctp_queued_to_read *control;
3352 	struct sctp_stream_reset_event *strreset;
3353 	int len;
3354 
3355 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3356 		/* event not enabled */
3357 		return;
3358 	}
3359 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3360 	if (m_notify == NULL)
3361 		/* no space left */
3362 		return;
3363 	SCTP_BUF_LEN(m_notify) = 0;
3364 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3365 	if (len > M_TRAILINGSPACE(m_notify)) {
3366 		/* never enough room */
3367 		sctp_m_freem(m_notify);
3368 		return;
3369 	}
3370 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3371 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3372 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3373 	strreset->strreset_length = len;
3374 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3375 	strreset->strreset_list[0] = number_entries;
3376 
3377 	SCTP_BUF_LEN(m_notify) = len;
3378 	SCTP_BUF_NEXT(m_notify) = NULL;
3379 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3380 		/* no space */
3381 		sctp_m_freem(m_notify);
3382 		return;
3383 	}
3384 	/* append to socket */
3385 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3386 	    0, 0, 0, 0, 0, 0,
3387 	    m_notify);
3388 	if (control == NULL) {
3389 		/* no memory */
3390 		sctp_m_freem(m_notify);
3391 		return;
3392 	}
3393 	control->spec_flags = M_NOTIFICATION;
3394 	control->length = SCTP_BUF_LEN(m_notify);
3395 	/* not that we need this */
3396 	control->tail_mbuf = m_notify;
3397 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3398 	    control,
3399 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3400 }
3401 
3402 
3403 static void
3404 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3405     int number_entries, uint16_t * list, int flag)
3406 {
3407 	struct mbuf *m_notify;
3408 	struct sctp_queued_to_read *control;
3409 	struct sctp_stream_reset_event *strreset;
3410 	int len;
3411 
3412 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3413 		/* event not enabled */
3414 		return;
3415 	}
3416 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3417 	if (m_notify == NULL)
3418 		/* no space left */
3419 		return;
3420 	SCTP_BUF_LEN(m_notify) = 0;
3421 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3422 	if (len > M_TRAILINGSPACE(m_notify)) {
3423 		/* never enough room */
3424 		sctp_m_freem(m_notify);
3425 		return;
3426 	}
3427 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3428 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3429 	if (number_entries == 0) {
3430 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3431 	} else {
3432 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3433 	}
3434 	strreset->strreset_length = len;
3435 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3436 	if (number_entries) {
3437 		int i;
3438 
3439 		for (i = 0; i < number_entries; i++) {
3440 			strreset->strreset_list[i] = ntohs(list[i]);
3441 		}
3442 	}
3443 	SCTP_BUF_LEN(m_notify) = len;
3444 	SCTP_BUF_NEXT(m_notify) = NULL;
3445 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3446 		/* no space */
3447 		sctp_m_freem(m_notify);
3448 		return;
3449 	}
3450 	/* append to socket */
3451 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3452 	    0, 0, 0, 0, 0, 0,
3453 	    m_notify);
3454 	if (control == NULL) {
3455 		/* no memory */
3456 		sctp_m_freem(m_notify);
3457 		return;
3458 	}
3459 	control->spec_flags = M_NOTIFICATION;
3460 	control->length = SCTP_BUF_LEN(m_notify);
3461 	/* not that we need this */
3462 	control->tail_mbuf = m_notify;
3463 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3464 	    control,
3465 	    &stcb->sctp_socket->so_rcv, 1, SCTP_SO_NOT_LOCKED);
3466 }
3467 
3468 
3469 void
3470 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3471     uint32_t error, void *data, int so_locked
3472 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3473     SCTP_UNUSED
3474 #endif
3475 )
3476 {
3477 	if ((stcb == NULL) ||
3478 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3479 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3480 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3481 		/* If the socket is gone we are out of here */
3482 		return;
3483 	}
3484 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3485 		return;
3486 	}
3487 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3488 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3489 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3490 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3491 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3492 			/* Don't report these in front states */
3493 			return;
3494 		}
3495 	}
3496 	switch (notification) {
3497 	case SCTP_NOTIFY_ASSOC_UP:
3498 		if (stcb->asoc.assoc_up_sent == 0) {
3499 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3500 			stcb->asoc.assoc_up_sent = 1;
3501 		}
3502 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3503 			sctp_notify_adaptation_layer(stcb, error);
3504 		}
3505 		if (stcb->asoc.peer_supports_auth == 0) {
3506 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3507 			    NULL, so_locked);
3508 		}
3509 		break;
3510 	case SCTP_NOTIFY_ASSOC_DOWN:
3511 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3512 		break;
3513 	case SCTP_NOTIFY_INTERFACE_DOWN:
3514 		{
3515 			struct sctp_nets *net;
3516 
3517 			net = (struct sctp_nets *)data;
3518 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3519 			    (struct sockaddr *)&net->ro._l_addr, error);
3520 			break;
3521 		}
3522 	case SCTP_NOTIFY_INTERFACE_UP:
3523 		{
3524 			struct sctp_nets *net;
3525 
3526 			net = (struct sctp_nets *)data;
3527 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3528 			    (struct sockaddr *)&net->ro._l_addr, error);
3529 			break;
3530 		}
3531 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3532 		{
3533 			struct sctp_nets *net;
3534 
3535 			net = (struct sctp_nets *)data;
3536 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3537 			    (struct sockaddr *)&net->ro._l_addr, error);
3538 			break;
3539 		}
3540 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3541 		sctp_notify_send_failed2(stcb, error,
3542 		    (struct sctp_stream_queue_pending *)data, so_locked);
3543 		break;
3544 	case SCTP_NOTIFY_DG_FAIL:
3545 		sctp_notify_send_failed(stcb, error,
3546 		    (struct sctp_tmit_chunk *)data, so_locked);
3547 		break;
3548 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3549 		{
3550 			uint32_t val;
3551 
3552 			val = *((uint32_t *) data);
3553 
3554 			sctp_notify_partial_delivery_indication(stcb, error, 0, val);
3555 		}
3556 		break;
3557 	case SCTP_NOTIFY_STRDATA_ERR:
3558 		break;
3559 	case SCTP_NOTIFY_ASSOC_ABORTED:
3560 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3561 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3562 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3563 		} else {
3564 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3565 		}
3566 		break;
3567 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3568 		break;
3569 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3570 		break;
3571 	case SCTP_NOTIFY_ASSOC_RESTART:
3572 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3573 		if (stcb->asoc.peer_supports_auth == 0) {
3574 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3575 			    NULL, so_locked);
3576 		}
3577 		break;
3578 	case SCTP_NOTIFY_HB_RESP:
3579 		break;
3580 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3581 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3582 		break;
3583 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3584 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3585 		break;
3586 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3587 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3588 		break;
3589 
3590 	case SCTP_NOTIFY_STR_RESET_SEND:
3591 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3592 		break;
3593 	case SCTP_NOTIFY_STR_RESET_RECV:
3594 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3595 		break;
3596 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3597 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3598 		break;
3599 
3600 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3601 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3602 		break;
3603 
3604 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3605 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3606 		    error);
3607 		break;
3608 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3609 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3610 		    error);
3611 		break;
3612 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3613 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3614 		    error);
3615 		break;
3616 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3617 		break;
3618 	case SCTP_NOTIFY_ASCONF_FAILED:
3619 		break;
3620 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3621 		sctp_notify_shutdown_event(stcb);
3622 		break;
3623 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3624 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3625 		    (uint16_t) (uintptr_t) data,
3626 		    so_locked);
3627 		break;
3628 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3629 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3630 		    (uint16_t) (uintptr_t) data,
3631 		    so_locked);
3632 		break;
3633 	case SCTP_NOTIFY_NO_PEER_AUTH:
3634 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3635 		    (uint16_t) (uintptr_t) data,
3636 		    so_locked);
3637 		break;
3638 	case SCTP_NOTIFY_SENDER_DRY:
3639 		sctp_notify_sender_dry_event(stcb, so_locked);
3640 		break;
3641 	default:
3642 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3643 		    __FUNCTION__, notification, notification);
3644 		break;
3645 	}			/* end switch */
3646 }
3647 
3648 void
3649 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3650 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3651     SCTP_UNUSED
3652 #endif
3653 )
3654 {
3655 	struct sctp_association *asoc;
3656 	struct sctp_stream_out *outs;
3657 	struct sctp_tmit_chunk *chk;
3658 	struct sctp_stream_queue_pending *sp;
3659 	int i;
3660 
3661 	asoc = &stcb->asoc;
3662 
3663 	if (stcb == NULL) {
3664 		return;
3665 	}
3666 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3667 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3668 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3669 		return;
3670 	}
3671 	/* now through all the gunk freeing chunks */
3672 	if (holds_lock == 0) {
3673 		SCTP_TCB_SEND_LOCK(stcb);
3674 	}
3675 	/* sent queue SHOULD be empty */
3676 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3677 		chk = TAILQ_FIRST(&asoc->sent_queue);
3678 		while (chk) {
3679 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3680 			asoc->sent_queue_cnt--;
3681 			if (chk->data != NULL) {
3682 				sctp_free_bufspace(stcb, asoc, chk, 1);
3683 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3684 				    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3685 				sctp_m_freem(chk->data);
3686 				chk->data = NULL;
3687 			}
3688 			sctp_free_a_chunk(stcb, chk);
3689 			/* sa_ignore FREED_MEMORY */
3690 			chk = TAILQ_FIRST(&asoc->sent_queue);
3691 		}
3692 	}
3693 	/* pending send queue SHOULD be empty */
3694 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3695 		chk = TAILQ_FIRST(&asoc->send_queue);
3696 		while (chk) {
3697 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3698 			asoc->send_queue_cnt--;
3699 			if (chk->data != NULL) {
3700 				sctp_free_bufspace(stcb, asoc, chk, 1);
3701 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3702 				    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3703 				sctp_m_freem(chk->data);
3704 				chk->data = NULL;
3705 			}
3706 			sctp_free_a_chunk(stcb, chk);
3707 			/* sa_ignore FREED_MEMORY */
3708 			chk = TAILQ_FIRST(&asoc->send_queue);
3709 		}
3710 	}
3711 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3712 		/* For each stream */
3713 		outs = &stcb->asoc.strmout[i];
3714 		/* clean up any sends there */
3715 		stcb->asoc.locked_on_sending = NULL;
3716 		sp = TAILQ_FIRST(&outs->outqueue);
3717 		while (sp) {
3718 			stcb->asoc.stream_queue_cnt--;
3719 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3720 			sctp_free_spbufspace(stcb, asoc, sp);
3721 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3722 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3723 			if (sp->data) {
3724 				sctp_m_freem(sp->data);
3725 				sp->data = NULL;
3726 			}
3727 			if (sp->net)
3728 				sctp_free_remote_addr(sp->net);
3729 			sp->net = NULL;
3730 			/* Free the chunk */
3731 			sctp_free_a_strmoq(stcb, sp);
3732 			/* sa_ignore FREED_MEMORY */
3733 			sp = TAILQ_FIRST(&outs->outqueue);
3734 		}
3735 	}
3736 
3737 	if (holds_lock == 0) {
3738 		SCTP_TCB_SEND_UNLOCK(stcb);
3739 	}
3740 }
3741 
3742 void
3743 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3744 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3745     SCTP_UNUSED
3746 #endif
3747 )
3748 {
3749 
3750 	if (stcb == NULL) {
3751 		return;
3752 	}
3753 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3754 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3755 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3756 		return;
3757 	}
3758 	/* Tell them we lost the asoc */
3759 	sctp_report_all_outbound(stcb, 1, so_locked);
3760 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3761 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3762 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3763 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3764 	}
3765 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3766 }
3767 
3768 void
3769 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3770     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3771     uint32_t vrf_id, uint16_t port)
3772 {
3773 	uint32_t vtag;
3774 
3775 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3776 	struct socket *so;
3777 
3778 #endif
3779 
3780 	vtag = 0;
3781 	if (stcb != NULL) {
3782 		/* We have a TCB to abort, send notification too */
3783 		vtag = stcb->asoc.peer_vtag;
3784 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3785 		/* get the assoc vrf id and table id */
3786 		vrf_id = stcb->asoc.vrf_id;
3787 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3788 	}
3789 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3790 	if (stcb != NULL) {
3791 		/* Ok, now lets free it */
3792 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3793 		so = SCTP_INP_SO(inp);
3794 		atomic_add_int(&stcb->asoc.refcnt, 1);
3795 		SCTP_TCB_UNLOCK(stcb);
3796 		SCTP_SOCKET_LOCK(so, 1);
3797 		SCTP_TCB_LOCK(stcb);
3798 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3799 #endif
3800 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3801 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3802 		SCTP_SOCKET_UNLOCK(so, 1);
3803 #endif
3804 	} else {
3805 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3806 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3807 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3808 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3809 			}
3810 		}
3811 	}
3812 }
3813 
3814 #ifdef SCTP_ASOCLOG_OF_TSNS
3815 void
3816 sctp_print_out_track_log(struct sctp_tcb *stcb)
3817 {
3818 #ifdef NOSIY_PRINTS
3819 	int i;
3820 
3821 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3822 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3823 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3824 		SCTP_PRINTF("None rcvd\n");
3825 		goto none_in;
3826 	}
3827 	if (stcb->asoc.tsn_in_wrapped) {
3828 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3829 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3830 			    stcb->asoc.in_tsnlog[i].tsn,
3831 			    stcb->asoc.in_tsnlog[i].strm,
3832 			    stcb->asoc.in_tsnlog[i].seq,
3833 			    stcb->asoc.in_tsnlog[i].flgs,
3834 			    stcb->asoc.in_tsnlog[i].sz);
3835 		}
3836 	}
3837 	if (stcb->asoc.tsn_in_at) {
3838 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3839 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3840 			    stcb->asoc.in_tsnlog[i].tsn,
3841 			    stcb->asoc.in_tsnlog[i].strm,
3842 			    stcb->asoc.in_tsnlog[i].seq,
3843 			    stcb->asoc.in_tsnlog[i].flgs,
3844 			    stcb->asoc.in_tsnlog[i].sz);
3845 		}
3846 	}
3847 none_in:
3848 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3849 	if ((stcb->asoc.tsn_out_at == 0) &&
3850 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3851 		SCTP_PRINTF("None sent\n");
3852 	}
3853 	if (stcb->asoc.tsn_out_wrapped) {
3854 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3855 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3856 			    stcb->asoc.out_tsnlog[i].tsn,
3857 			    stcb->asoc.out_tsnlog[i].strm,
3858 			    stcb->asoc.out_tsnlog[i].seq,
3859 			    stcb->asoc.out_tsnlog[i].flgs,
3860 			    stcb->asoc.out_tsnlog[i].sz);
3861 		}
3862 	}
3863 	if (stcb->asoc.tsn_out_at) {
3864 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3865 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3866 			    stcb->asoc.out_tsnlog[i].tsn,
3867 			    stcb->asoc.out_tsnlog[i].strm,
3868 			    stcb->asoc.out_tsnlog[i].seq,
3869 			    stcb->asoc.out_tsnlog[i].flgs,
3870 			    stcb->asoc.out_tsnlog[i].sz);
3871 		}
3872 	}
3873 #endif
3874 }
3875 
3876 #endif
3877 
3878 void
3879 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3880     int error, struct mbuf *op_err,
3881     int so_locked
3882 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3883     SCTP_UNUSED
3884 #endif
3885 )
3886 {
3887 	uint32_t vtag;
3888 
3889 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3890 	struct socket *so;
3891 
3892 #endif
3893 
3894 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3895 	so = SCTP_INP_SO(inp);
3896 #endif
3897 	if (stcb == NULL) {
3898 		/* Got to have a TCB */
3899 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3900 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3901 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3902 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3903 			}
3904 		}
3905 		return;
3906 	} else {
3907 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3908 	}
3909 	vtag = stcb->asoc.peer_vtag;
3910 	/* notify the ulp */
3911 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3912 		sctp_abort_notification(stcb, error, so_locked);
3913 	/* notify the peer */
3914 #if defined(SCTP_PANIC_ON_ABORT)
3915 	panic("aborting an association");
3916 #endif
3917 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3918 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3919 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3920 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3921 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3922 	}
3923 	/* now free the asoc */
3924 #ifdef SCTP_ASOCLOG_OF_TSNS
3925 	sctp_print_out_track_log(stcb);
3926 #endif
3927 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3928 	if (!so_locked) {
3929 		atomic_add_int(&stcb->asoc.refcnt, 1);
3930 		SCTP_TCB_UNLOCK(stcb);
3931 		SCTP_SOCKET_LOCK(so, 1);
3932 		SCTP_TCB_LOCK(stcb);
3933 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3934 	}
3935 #endif
3936 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3937 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3938 	if (!so_locked) {
3939 		SCTP_SOCKET_UNLOCK(so, 1);
3940 	}
3941 #endif
3942 }
3943 
3944 void
3945 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3946     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3947 {
3948 	struct sctp_chunkhdr *ch, chunk_buf;
3949 	unsigned int chk_length;
3950 
3951 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3952 	/* Generate a TO address for future reference */
3953 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3954 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3955 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3956 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3957 		}
3958 	}
3959 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3960 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3961 	while (ch != NULL) {
3962 		chk_length = ntohs(ch->chunk_length);
3963 		if (chk_length < sizeof(*ch)) {
3964 			/* break to abort land */
3965 			break;
3966 		}
3967 		switch (ch->chunk_type) {
3968 		case SCTP_COOKIE_ECHO:
3969 			/* We hit here only if the assoc is being freed */
3970 			return;
3971 		case SCTP_PACKET_DROPPED:
3972 			/* we don't respond to pkt-dropped */
3973 			return;
3974 		case SCTP_ABORT_ASSOCIATION:
3975 			/* we don't respond with an ABORT to an ABORT */
3976 			return;
3977 		case SCTP_SHUTDOWN_COMPLETE:
3978 			/*
3979 			 * we ignore it since we are not waiting for it and
3980 			 * peer is gone
3981 			 */
3982 			return;
3983 		case SCTP_SHUTDOWN_ACK:
3984 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
3985 			return;
3986 		default:
3987 			break;
3988 		}
3989 		offset += SCTP_SIZE32(chk_length);
3990 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3991 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3992 	}
3993 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
3994 }
3995 
3996 /*
3997  * check the inbound datagram to make sure there is not an abort inside it,
3998  * if there is return 1, else return 0.
3999  */
4000 int
4001 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4002 {
4003 	struct sctp_chunkhdr *ch;
4004 	struct sctp_init_chunk *init_chk, chunk_buf;
4005 	int offset;
4006 	unsigned int chk_length;
4007 
4008 	offset = iphlen + sizeof(struct sctphdr);
4009 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4010 	    (uint8_t *) & chunk_buf);
4011 	while (ch != NULL) {
4012 		chk_length = ntohs(ch->chunk_length);
4013 		if (chk_length < sizeof(*ch)) {
4014 			/* packet is probably corrupt */
4015 			break;
4016 		}
4017 		/* we seem to be ok, is it an abort? */
4018 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4019 			/* yep, tell them */
4020 			return (1);
4021 		}
4022 		if (ch->chunk_type == SCTP_INITIATION) {
4023 			/* need to update the Vtag */
4024 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4025 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4026 			if (init_chk != NULL) {
4027 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4028 			}
4029 		}
4030 		/* Nope, move to the next chunk */
4031 		offset += SCTP_SIZE32(chk_length);
4032 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4033 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4034 	}
4035 	return (0);
4036 }
4037 
4038 /*
4039  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4040  * set (i.e. it's 0) so, create this function to compare link local scopes
4041  */
4042 #ifdef INET6
4043 uint32_t
4044 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4045 {
4046 	struct sockaddr_in6 a, b;
4047 
4048 	/* save copies */
4049 	a = *addr1;
4050 	b = *addr2;
4051 
4052 	if (a.sin6_scope_id == 0)
4053 		if (sa6_recoverscope(&a)) {
4054 			/* can't get scope, so can't match */
4055 			return (0);
4056 		}
4057 	if (b.sin6_scope_id == 0)
4058 		if (sa6_recoverscope(&b)) {
4059 			/* can't get scope, so can't match */
4060 			return (0);
4061 		}
4062 	if (a.sin6_scope_id != b.sin6_scope_id)
4063 		return (0);
4064 
4065 	return (1);
4066 }
4067 
4068 /*
4069  * returns a sockaddr_in6 with embedded scope recovered and removed
4070  */
4071 struct sockaddr_in6 *
4072 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4073 {
4074 	/* check and strip embedded scope junk */
4075 	if (addr->sin6_family == AF_INET6) {
4076 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4077 			if (addr->sin6_scope_id == 0) {
4078 				*store = *addr;
4079 				if (!sa6_recoverscope(store)) {
4080 					/* use the recovered scope */
4081 					addr = store;
4082 				}
4083 			} else {
4084 				/* else, return the original "to" addr */
4085 				in6_clearscope(&addr->sin6_addr);
4086 			}
4087 		}
4088 	}
4089 	return (addr);
4090 }
4091 
4092 #endif
4093 
4094 /*
4095  * are the two addresses the same?  currently a "scopeless" check returns: 1
4096  * if same, 0 if not
4097  */
4098 int
4099 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4100 {
4101 
4102 	/* must be valid */
4103 	if (sa1 == NULL || sa2 == NULL)
4104 		return (0);
4105 
4106 	/* must be the same family */
4107 	if (sa1->sa_family != sa2->sa_family)
4108 		return (0);
4109 
4110 	switch (sa1->sa_family) {
4111 #ifdef INET6
4112 	case AF_INET6:
4113 		{
4114 			/* IPv6 addresses */
4115 			struct sockaddr_in6 *sin6_1, *sin6_2;
4116 
4117 			sin6_1 = (struct sockaddr_in6 *)sa1;
4118 			sin6_2 = (struct sockaddr_in6 *)sa2;
4119 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4120 			    sin6_2));
4121 		}
4122 #endif
4123 	case AF_INET:
4124 		{
4125 			/* IPv4 addresses */
4126 			struct sockaddr_in *sin_1, *sin_2;
4127 
4128 			sin_1 = (struct sockaddr_in *)sa1;
4129 			sin_2 = (struct sockaddr_in *)sa2;
4130 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4131 		}
4132 	default:
4133 		/* we don't do these... */
4134 		return (0);
4135 	}
4136 }
4137 
4138 void
4139 sctp_print_address(struct sockaddr *sa)
4140 {
4141 #ifdef INET6
4142 	char ip6buf[INET6_ADDRSTRLEN];
4143 
4144 	ip6buf[0] = 0;
4145 #endif
4146 
4147 	switch (sa->sa_family) {
4148 #ifdef INET6
4149 	case AF_INET6:
4150 		{
4151 			struct sockaddr_in6 *sin6;
4152 
4153 			sin6 = (struct sockaddr_in6 *)sa;
4154 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4155 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4156 			    ntohs(sin6->sin6_port),
4157 			    sin6->sin6_scope_id);
4158 			break;
4159 		}
4160 #endif
4161 	case AF_INET:
4162 		{
4163 			struct sockaddr_in *sin;
4164 			unsigned char *p;
4165 
4166 			sin = (struct sockaddr_in *)sa;
4167 			p = (unsigned char *)&sin->sin_addr;
4168 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4169 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4170 			break;
4171 		}
4172 	default:
4173 		SCTP_PRINTF("?\n");
4174 		break;
4175 	}
4176 }
4177 
4178 void
4179 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4180 {
4181 	switch (iph->ip_v) {
4182 		case IPVERSION:
4183 		{
4184 			struct sockaddr_in lsa, fsa;
4185 
4186 			bzero(&lsa, sizeof(lsa));
4187 			lsa.sin_len = sizeof(lsa);
4188 			lsa.sin_family = AF_INET;
4189 			lsa.sin_addr = iph->ip_src;
4190 			lsa.sin_port = sh->src_port;
4191 			bzero(&fsa, sizeof(fsa));
4192 			fsa.sin_len = sizeof(fsa);
4193 			fsa.sin_family = AF_INET;
4194 			fsa.sin_addr = iph->ip_dst;
4195 			fsa.sin_port = sh->dest_port;
4196 			SCTP_PRINTF("src: ");
4197 			sctp_print_address((struct sockaddr *)&lsa);
4198 			SCTP_PRINTF("dest: ");
4199 			sctp_print_address((struct sockaddr *)&fsa);
4200 			break;
4201 		}
4202 #ifdef INET6
4203 	case IPV6_VERSION >> 4:
4204 		{
4205 			struct ip6_hdr *ip6;
4206 			struct sockaddr_in6 lsa6, fsa6;
4207 
4208 			ip6 = (struct ip6_hdr *)iph;
4209 			bzero(&lsa6, sizeof(lsa6));
4210 			lsa6.sin6_len = sizeof(lsa6);
4211 			lsa6.sin6_family = AF_INET6;
4212 			lsa6.sin6_addr = ip6->ip6_src;
4213 			lsa6.sin6_port = sh->src_port;
4214 			bzero(&fsa6, sizeof(fsa6));
4215 			fsa6.sin6_len = sizeof(fsa6);
4216 			fsa6.sin6_family = AF_INET6;
4217 			fsa6.sin6_addr = ip6->ip6_dst;
4218 			fsa6.sin6_port = sh->dest_port;
4219 			SCTP_PRINTF("src: ");
4220 			sctp_print_address((struct sockaddr *)&lsa6);
4221 			SCTP_PRINTF("dest: ");
4222 			sctp_print_address((struct sockaddr *)&fsa6);
4223 			break;
4224 		}
4225 #endif
4226 	default:
4227 		/* TSNH */
4228 		break;
4229 	}
4230 }
4231 
4232 void
4233 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4234     struct sctp_inpcb *new_inp,
4235     struct sctp_tcb *stcb,
4236     int waitflags)
4237 {
4238 	/*
4239 	 * go through our old INP and pull off any control structures that
4240 	 * belong to stcb and move then to the new inp.
4241 	 */
4242 	struct socket *old_so, *new_so;
4243 	struct sctp_queued_to_read *control, *nctl;
4244 	struct sctp_readhead tmp_queue;
4245 	struct mbuf *m;
4246 	int error = 0;
4247 
4248 	old_so = old_inp->sctp_socket;
4249 	new_so = new_inp->sctp_socket;
4250 	TAILQ_INIT(&tmp_queue);
4251 	error = sblock(&old_so->so_rcv, waitflags);
4252 	if (error) {
4253 		/*
4254 		 * Gak, can't get sblock, we have a problem. data will be
4255 		 * left stranded.. and we don't dare look at it since the
4256 		 * other thread may be reading something. Oh well, its a
4257 		 * screwed up app that does a peeloff OR a accept while
4258 		 * reading from the main socket... actually its only the
4259 		 * peeloff() case, since I think read will fail on a
4260 		 * listening socket..
4261 		 */
4262 		return;
4263 	}
4264 	/* lock the socket buffers */
4265 	SCTP_INP_READ_LOCK(old_inp);
4266 	control = TAILQ_FIRST(&old_inp->read_queue);
4267 	/* Pull off all for out target stcb */
4268 	while (control) {
4269 		nctl = TAILQ_NEXT(control, next);
4270 		if (control->stcb == stcb) {
4271 			/* remove it we want it */
4272 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4273 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4274 			m = control->data;
4275 			while (m) {
4276 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4277 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4278 				}
4279 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4280 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4281 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4282 				}
4283 				m = SCTP_BUF_NEXT(m);
4284 			}
4285 		}
4286 		control = nctl;
4287 	}
4288 	SCTP_INP_READ_UNLOCK(old_inp);
4289 	/* Remove the sb-lock on the old socket */
4290 
4291 	sbunlock(&old_so->so_rcv);
4292 	/* Now we move them over to the new socket buffer */
4293 	control = TAILQ_FIRST(&tmp_queue);
4294 	SCTP_INP_READ_LOCK(new_inp);
4295 	while (control) {
4296 		nctl = TAILQ_NEXT(control, next);
4297 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4298 		m = control->data;
4299 		while (m) {
4300 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4301 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4302 			}
4303 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4304 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4305 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4306 			}
4307 			m = SCTP_BUF_NEXT(m);
4308 		}
4309 		control = nctl;
4310 	}
4311 	SCTP_INP_READ_UNLOCK(new_inp);
4312 }
4313 
4314 void
4315 sctp_add_to_readq(struct sctp_inpcb *inp,
4316     struct sctp_tcb *stcb,
4317     struct sctp_queued_to_read *control,
4318     struct sockbuf *sb,
4319     int end,
4320     int so_locked
4321 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4322     SCTP_UNUSED
4323 #endif
4324 )
4325 {
4326 	/*
4327 	 * Here we must place the control on the end of the socket read
4328 	 * queue AND increment sb_cc so that select will work properly on
4329 	 * read.
4330 	 */
4331 	struct mbuf *m, *prev = NULL;
4332 
4333 	if (inp == NULL) {
4334 		/* Gak, TSNH!! */
4335 #ifdef INVARIANTS
4336 		panic("Gak, inp NULL on add_to_readq");
4337 #endif
4338 		return;
4339 	}
4340 	SCTP_INP_READ_LOCK(inp);
4341 	if (!(control->spec_flags & M_NOTIFICATION)) {
4342 		atomic_add_int(&inp->total_recvs, 1);
4343 		if (!control->do_not_ref_stcb) {
4344 			atomic_add_int(&stcb->total_recvs, 1);
4345 		}
4346 	}
4347 	m = control->data;
4348 	control->held_length = 0;
4349 	control->length = 0;
4350 	while (m) {
4351 		if (SCTP_BUF_LEN(m) == 0) {
4352 			/* Skip mbufs with NO length */
4353 			if (prev == NULL) {
4354 				/* First one */
4355 				control->data = sctp_m_free(m);
4356 				m = control->data;
4357 			} else {
4358 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4359 				m = SCTP_BUF_NEXT(prev);
4360 			}
4361 			if (m == NULL) {
4362 				control->tail_mbuf = prev;;
4363 			}
4364 			continue;
4365 		}
4366 		prev = m;
4367 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4368 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4369 		}
4370 		sctp_sballoc(stcb, sb, m);
4371 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4372 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4373 		}
4374 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4375 		m = SCTP_BUF_NEXT(m);
4376 	}
4377 	if (prev != NULL) {
4378 		control->tail_mbuf = prev;
4379 	} else {
4380 		/* Everything got collapsed out?? */
4381 		return;
4382 	}
4383 	if (end) {
4384 		control->end_added = 1;
4385 	}
4386 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4387 	SCTP_INP_READ_UNLOCK(inp);
4388 	if (inp && inp->sctp_socket) {
4389 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4390 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4391 		} else {
4392 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4393 			struct socket *so;
4394 
4395 			so = SCTP_INP_SO(inp);
4396 			if (!so_locked) {
4397 				atomic_add_int(&stcb->asoc.refcnt, 1);
4398 				SCTP_TCB_UNLOCK(stcb);
4399 				SCTP_SOCKET_LOCK(so, 1);
4400 				SCTP_TCB_LOCK(stcb);
4401 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4402 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4403 					SCTP_SOCKET_UNLOCK(so, 1);
4404 					return;
4405 				}
4406 			}
4407 #endif
4408 			sctp_sorwakeup(inp, inp->sctp_socket);
4409 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4410 			if (!so_locked) {
4411 				SCTP_SOCKET_UNLOCK(so, 1);
4412 			}
4413 #endif
4414 		}
4415 	}
4416 }
4417 
4418 
4419 int
4420 sctp_append_to_readq(struct sctp_inpcb *inp,
4421     struct sctp_tcb *stcb,
4422     struct sctp_queued_to_read *control,
4423     struct mbuf *m,
4424     int end,
4425     int ctls_cumack,
4426     struct sockbuf *sb)
4427 {
4428 	/*
4429 	 * A partial delivery API event is underway. OR we are appending on
4430 	 * the reassembly queue.
4431 	 *
4432 	 * If PDAPI this means we need to add m to the end of the data.
4433 	 * Increase the length in the control AND increment the sb_cc.
4434 	 * Otherwise sb is NULL and all we need to do is put it at the end
4435 	 * of the mbuf chain.
4436 	 */
4437 	int len = 0;
4438 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4439 
4440 	if (inp) {
4441 		SCTP_INP_READ_LOCK(inp);
4442 	}
4443 	if (control == NULL) {
4444 get_out:
4445 		if (inp) {
4446 			SCTP_INP_READ_UNLOCK(inp);
4447 		}
4448 		return (-1);
4449 	}
4450 	if (control->end_added) {
4451 		/* huh this one is complete? */
4452 		goto get_out;
4453 	}
4454 	mm = m;
4455 	if (mm == NULL) {
4456 		goto get_out;
4457 	}
4458 	while (mm) {
4459 		if (SCTP_BUF_LEN(mm) == 0) {
4460 			/* Skip mbufs with NO lenght */
4461 			if (prev == NULL) {
4462 				/* First one */
4463 				m = sctp_m_free(mm);
4464 				mm = m;
4465 			} else {
4466 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4467 				mm = SCTP_BUF_NEXT(prev);
4468 			}
4469 			continue;
4470 		}
4471 		prev = mm;
4472 		len += SCTP_BUF_LEN(mm);
4473 		if (sb) {
4474 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4475 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4476 			}
4477 			sctp_sballoc(stcb, sb, mm);
4478 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4479 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4480 			}
4481 		}
4482 		mm = SCTP_BUF_NEXT(mm);
4483 	}
4484 	if (prev) {
4485 		tail = prev;
4486 	} else {
4487 		/* Really there should always be a prev */
4488 		if (m == NULL) {
4489 			/* Huh nothing left? */
4490 #ifdef INVARIANTS
4491 			panic("Nothing left to add?");
4492 #else
4493 			goto get_out;
4494 #endif
4495 		}
4496 		tail = m;
4497 	}
4498 	if (control->tail_mbuf) {
4499 		/* append */
4500 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4501 		control->tail_mbuf = tail;
4502 	} else {
4503 		/* nothing there */
4504 #ifdef INVARIANTS
4505 		if (control->data != NULL) {
4506 			panic("This should NOT happen");
4507 		}
4508 #endif
4509 		control->data = m;
4510 		control->tail_mbuf = tail;
4511 	}
4512 	atomic_add_int(&control->length, len);
4513 	if (end) {
4514 		/* message is complete */
4515 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4516 			stcb->asoc.control_pdapi = NULL;
4517 		}
4518 		control->held_length = 0;
4519 		control->end_added = 1;
4520 	}
4521 	if (stcb == NULL) {
4522 		control->do_not_ref_stcb = 1;
4523 	}
4524 	/*
4525 	 * When we are appending in partial delivery, the cum-ack is used
4526 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4527 	 * is populated in the outbound sinfo structure from the true cumack
4528 	 * if the association exists...
4529 	 */
4530 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4531 	if (inp) {
4532 		SCTP_INP_READ_UNLOCK(inp);
4533 	}
4534 	if (inp && inp->sctp_socket) {
4535 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4536 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4537 		} else {
4538 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4539 			struct socket *so;
4540 
4541 			so = SCTP_INP_SO(inp);
4542 			atomic_add_int(&stcb->asoc.refcnt, 1);
4543 			SCTP_TCB_UNLOCK(stcb);
4544 			SCTP_SOCKET_LOCK(so, 1);
4545 			SCTP_TCB_LOCK(stcb);
4546 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4547 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4548 				SCTP_SOCKET_UNLOCK(so, 1);
4549 				return (0);
4550 			}
4551 #endif
4552 			sctp_sorwakeup(inp, inp->sctp_socket);
4553 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4554 			SCTP_SOCKET_UNLOCK(so, 1);
4555 #endif
4556 		}
4557 	}
4558 	return (0);
4559 }
4560 
4561 
4562 
4563 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4564  *************ALTERNATE ROUTING CODE
4565  */
4566 
4567 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4568  *************ALTERNATE ROUTING CODE
4569  */
4570 
4571 struct mbuf *
4572 sctp_generate_invmanparam(int err)
4573 {
4574 	/* Return a MBUF with a invalid mandatory parameter */
4575 	struct mbuf *m;
4576 
4577 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4578 	if (m) {
4579 		struct sctp_paramhdr *ph;
4580 
4581 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4582 		ph = mtod(m, struct sctp_paramhdr *);
4583 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4584 		ph->param_type = htons(err);
4585 	}
4586 	return (m);
4587 }
4588 
4589 #ifdef SCTP_MBCNT_LOGGING
4590 void
4591 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4592     struct sctp_tmit_chunk *tp1, int chk_cnt)
4593 {
4594 	if (tp1->data == NULL) {
4595 		return;
4596 	}
4597 	asoc->chunks_on_out_queue -= chk_cnt;
4598 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4599 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4600 		    asoc->total_output_queue_size,
4601 		    tp1->book_size,
4602 		    0,
4603 		    tp1->mbcnt);
4604 	}
4605 	if (asoc->total_output_queue_size >= tp1->book_size) {
4606 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4607 	} else {
4608 		asoc->total_output_queue_size = 0;
4609 	}
4610 
4611 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4612 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4613 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4614 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4615 		} else {
4616 			stcb->sctp_socket->so_snd.sb_cc = 0;
4617 
4618 		}
4619 	}
4620 }
4621 
4622 #endif
4623 
4624 int
4625 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4626     int reason, int so_locked
4627 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4628     SCTP_UNUSED
4629 #endif
4630 )
4631 {
4632 	struct sctp_stream_out *strq;
4633 	struct sctp_tmit_chunk *chk = NULL;
4634 	struct sctp_stream_queue_pending *sp;
4635 	uint16_t stream = 0, seq = 0;
4636 	uint8_t foundeom = 0;
4637 	int ret_sz = 0;
4638 	int notdone;
4639 	int do_wakeup_routine = 0;
4640 
4641 	stream = tp1->rec.data.stream_number;
4642 	seq = tp1->rec.data.stream_seq;
4643 	do {
4644 		ret_sz += tp1->book_size;
4645 		if (tp1->data != NULL) {
4646 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4647 				sctp_flight_size_decrease(tp1);
4648 				sctp_total_flight_decrease(stcb, tp1);
4649 			}
4650 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4651 			stcb->asoc.peers_rwnd += tp1->send_size;
4652 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4653 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4654 			sctp_m_freem(tp1->data);
4655 			tp1->data = NULL;
4656 			do_wakeup_routine = 1;
4657 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4658 				stcb->asoc.sent_queue_cnt_removeable--;
4659 			}
4660 		}
4661 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4662 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4663 		    SCTP_DATA_NOT_FRAG) {
4664 			/* not frag'ed we ae done   */
4665 			notdone = 0;
4666 			foundeom = 1;
4667 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4668 			/* end of frag, we are done */
4669 			notdone = 0;
4670 			foundeom = 1;
4671 		} else {
4672 			/*
4673 			 * Its a begin or middle piece, we must mark all of
4674 			 * it
4675 			 */
4676 			notdone = 1;
4677 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4678 		}
4679 	} while (tp1 && notdone);
4680 	if (foundeom == 0) {
4681 		/*
4682 		 * The multi-part message was scattered across the send and
4683 		 * sent queue.
4684 		 */
4685 next_on_sent:
4686 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4687 		/*
4688 		 * recurse throught the send_queue too, starting at the
4689 		 * beginning.
4690 		 */
4691 		if ((tp1) &&
4692 		    (tp1->rec.data.stream_number == stream) &&
4693 		    (tp1->rec.data.stream_seq == seq)
4694 		    ) {
4695 			/*
4696 			 * save to chk in case we have some on stream out
4697 			 * queue. If so and we have an un-transmitted one we
4698 			 * don't have to fudge the TSN.
4699 			 */
4700 			chk = tp1;
4701 			ret_sz += tp1->book_size;
4702 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4703 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4704 			sctp_m_freem(tp1->data);
4705 			/* No flight involved here book the size to 0 */
4706 			tp1->book_size = 0;
4707 			tp1->data = NULL;
4708 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4709 				foundeom = 1;
4710 			}
4711 			do_wakeup_routine = 1;
4712 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4713 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4714 			/*
4715 			 * on to the sent queue so we can wait for it to be
4716 			 * passed by.
4717 			 */
4718 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4719 			    sctp_next);
4720 			stcb->asoc.send_queue_cnt--;
4721 			stcb->asoc.sent_queue_cnt++;
4722 			goto next_on_sent;
4723 		}
4724 	}
4725 	if (foundeom == 0) {
4726 		/*
4727 		 * Still no eom found. That means there is stuff left on the
4728 		 * stream out queue.. yuck.
4729 		 */
4730 		strq = &stcb->asoc.strmout[stream];
4731 		SCTP_TCB_SEND_LOCK(stcb);
4732 		sp = TAILQ_FIRST(&strq->outqueue);
4733 		while (sp->strseq <= seq) {
4734 			/* Check if its our SEQ */
4735 			if (sp->strseq == seq) {
4736 				sp->discard_rest = 1;
4737 				/*
4738 				 * We may need to put a chunk on the queue
4739 				 * that holds the TSN that would have been
4740 				 * sent with the LAST bit.
4741 				 */
4742 				if (chk == NULL) {
4743 					/* Yep, we have to */
4744 					sctp_alloc_a_chunk(stcb, chk);
4745 					if (chk == NULL) {
4746 						/*
4747 						 * we are hosed. All we can
4748 						 * do is nothing.. which
4749 						 * will cause an abort if
4750 						 * the peer is paying
4751 						 * attention.
4752 						 */
4753 						goto oh_well;
4754 					}
4755 					memset(chk, 0, sizeof(*chk));
4756 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4757 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4758 					chk->asoc = &stcb->asoc;
4759 					chk->rec.data.stream_seq = sp->strseq;
4760 					chk->rec.data.stream_number = sp->stream;
4761 					chk->rec.data.payloadtype = sp->ppid;
4762 					chk->rec.data.context = sp->context;
4763 					chk->flags = sp->act_flags;
4764 					chk->addr_over = sp->addr_over;
4765 					chk->whoTo = sp->net;
4766 					atomic_add_int(&chk->whoTo->ref_count, 1);
4767 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4768 					stcb->asoc.pr_sctp_cnt++;
4769 					chk->pr_sctp_on = 1;
4770 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4771 					stcb->asoc.sent_queue_cnt++;
4772 					stcb->asoc.pr_sctp_cnt++;
4773 				} else {
4774 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4775 				}
4776 		oh_well:
4777 				if (sp->data) {
4778 					/*
4779 					 * Pull any data to free up the SB
4780 					 * and allow sender to "add more"
4781 					 * whilc we will throw away :-)
4782 					 */
4783 					sctp_free_spbufspace(stcb, &stcb->asoc,
4784 					    sp);
4785 					ret_sz += sp->length;
4786 					do_wakeup_routine = 1;
4787 					sp->some_taken = 1;
4788 					sctp_m_freem(sp->data);
4789 					sp->length = 0;
4790 					sp->data = NULL;
4791 					sp->tail_mbuf = NULL;
4792 				}
4793 				break;
4794 			} else {
4795 				/* Next one please */
4796 				sp = TAILQ_NEXT(sp, next);
4797 			}
4798 		}		/* End while */
4799 		SCTP_TCB_SEND_UNLOCK(stcb);
4800 	}
4801 	if (do_wakeup_routine) {
4802 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4803 		struct socket *so;
4804 
4805 		so = SCTP_INP_SO(stcb->sctp_ep);
4806 		if (!so_locked) {
4807 			atomic_add_int(&stcb->asoc.refcnt, 1);
4808 			SCTP_TCB_UNLOCK(stcb);
4809 			SCTP_SOCKET_LOCK(so, 1);
4810 			SCTP_TCB_LOCK(stcb);
4811 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4812 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4813 				/* assoc was freed while we were unlocked */
4814 				SCTP_SOCKET_UNLOCK(so, 1);
4815 				return (ret_sz);
4816 			}
4817 		}
4818 #endif
4819 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4820 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4821 		if (!so_locked) {
4822 			SCTP_SOCKET_UNLOCK(so, 1);
4823 		}
4824 #endif
4825 	}
4826 	return (ret_sz);
4827 }
4828 
4829 /*
4830  * checks to see if the given address, sa, is one that is currently known by
4831  * the kernel note: can't distinguish the same address on multiple interfaces
4832  * and doesn't handle multiple addresses with different zone/scope id's note:
4833  * ifa_ifwithaddr() compares the entire sockaddr struct
4834  */
4835 struct sctp_ifa *
4836 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4837     int holds_lock)
4838 {
4839 	struct sctp_laddr *laddr;
4840 
4841 	if (holds_lock == 0) {
4842 		SCTP_INP_RLOCK(inp);
4843 	}
4844 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4845 		if (laddr->ifa == NULL)
4846 			continue;
4847 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4848 			continue;
4849 		if (addr->sa_family == AF_INET) {
4850 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4851 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4852 				/* found him. */
4853 				if (holds_lock == 0) {
4854 					SCTP_INP_RUNLOCK(inp);
4855 				}
4856 				return (laddr->ifa);
4857 				break;
4858 			}
4859 		}
4860 #ifdef INET6
4861 		if (addr->sa_family == AF_INET6) {
4862 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4863 			    &laddr->ifa->address.sin6)) {
4864 				/* found him. */
4865 				if (holds_lock == 0) {
4866 					SCTP_INP_RUNLOCK(inp);
4867 				}
4868 				return (laddr->ifa);
4869 				break;
4870 			}
4871 		}
4872 #endif
4873 	}
4874 	if (holds_lock == 0) {
4875 		SCTP_INP_RUNLOCK(inp);
4876 	}
4877 	return (NULL);
4878 }
4879 
4880 uint32_t
4881 sctp_get_ifa_hash_val(struct sockaddr *addr)
4882 {
4883 	if (addr->sa_family == AF_INET) {
4884 		struct sockaddr_in *sin;
4885 
4886 		sin = (struct sockaddr_in *)addr;
4887 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4888 	} else if (addr->sa_family == AF_INET6) {
4889 		struct sockaddr_in6 *sin6;
4890 		uint32_t hash_of_addr;
4891 
4892 		sin6 = (struct sockaddr_in6 *)addr;
4893 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4894 		    sin6->sin6_addr.s6_addr32[1] +
4895 		    sin6->sin6_addr.s6_addr32[2] +
4896 		    sin6->sin6_addr.s6_addr32[3]);
4897 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4898 		return (hash_of_addr);
4899 	}
4900 	return (0);
4901 }
4902 
4903 struct sctp_ifa *
4904 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4905 {
4906 	struct sctp_ifa *sctp_ifap;
4907 	struct sctp_vrf *vrf;
4908 	struct sctp_ifalist *hash_head;
4909 	uint32_t hash_of_addr;
4910 
4911 	if (holds_lock == 0)
4912 		SCTP_IPI_ADDR_RLOCK();
4913 
4914 	vrf = sctp_find_vrf(vrf_id);
4915 	if (vrf == NULL) {
4916 stage_right:
4917 		if (holds_lock == 0)
4918 			SCTP_IPI_ADDR_RUNLOCK();
4919 		return (NULL);
4920 	}
4921 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4922 
4923 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4924 	if (hash_head == NULL) {
4925 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4926 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4927 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4928 		sctp_print_address(addr);
4929 		SCTP_PRINTF("No such bucket for address\n");
4930 		if (holds_lock == 0)
4931 			SCTP_IPI_ADDR_RUNLOCK();
4932 
4933 		return (NULL);
4934 	}
4935 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4936 		if (sctp_ifap == NULL) {
4937 #ifdef INVARIANTS
4938 			panic("Huh LIST_FOREACH corrupt");
4939 			goto stage_right;
4940 #else
4941 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4942 			goto stage_right;
4943 #endif
4944 		}
4945 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4946 			continue;
4947 		if (addr->sa_family == AF_INET) {
4948 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4949 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4950 				/* found him. */
4951 				if (holds_lock == 0)
4952 					SCTP_IPI_ADDR_RUNLOCK();
4953 				return (sctp_ifap);
4954 				break;
4955 			}
4956 		}
4957 #ifdef INET6
4958 		if (addr->sa_family == AF_INET6) {
4959 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4960 			    &sctp_ifap->address.sin6)) {
4961 				/* found him. */
4962 				if (holds_lock == 0)
4963 					SCTP_IPI_ADDR_RUNLOCK();
4964 				return (sctp_ifap);
4965 				break;
4966 			}
4967 		}
4968 #endif
4969 	}
4970 	if (holds_lock == 0)
4971 		SCTP_IPI_ADDR_RUNLOCK();
4972 	return (NULL);
4973 }
4974 
4975 static void
4976 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4977     uint32_t rwnd_req)
4978 {
4979 	/* User pulled some data, do we need a rwnd update? */
4980 	int r_unlocked = 0;
4981 	uint32_t dif, rwnd;
4982 	struct socket *so = NULL;
4983 
4984 	if (stcb == NULL)
4985 		return;
4986 
4987 	atomic_add_int(&stcb->asoc.refcnt, 1);
4988 
4989 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4990 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4991 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4992 		/* Pre-check If we are freeing no update */
4993 		goto no_lock;
4994 	}
4995 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4996 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4997 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4998 		goto out;
4999 	}
5000 	so = stcb->sctp_socket;
5001 	if (so == NULL) {
5002 		goto out;
5003 	}
5004 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5005 	/* Have you have freed enough to look */
5006 	*freed_so_far = 0;
5007 	/* Yep, its worth a look and the lock overhead */
5008 
5009 	/* Figure out what the rwnd would be */
5010 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5011 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5012 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5013 	} else {
5014 		dif = 0;
5015 	}
5016 	if (dif >= rwnd_req) {
5017 		if (hold_rlock) {
5018 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5019 			r_unlocked = 1;
5020 		}
5021 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5022 			/*
5023 			 * One last check before we allow the guy possibly
5024 			 * to get in. There is a race, where the guy has not
5025 			 * reached the gate. In that case
5026 			 */
5027 			goto out;
5028 		}
5029 		SCTP_TCB_LOCK(stcb);
5030 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5031 			/* No reports here */
5032 			SCTP_TCB_UNLOCK(stcb);
5033 			goto out;
5034 		}
5035 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5036 		/*
5037 		 * EY if nr_sacks used then send an nr-sack , a sack
5038 		 * otherwise
5039 		 */
5040 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
5041 			sctp_send_nr_sack(stcb);
5042 		else
5043 			sctp_send_sack(stcb);
5044 
5045 		sctp_chunk_output(stcb->sctp_ep, stcb,
5046 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5047 		/* make sure no timer is running */
5048 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5049 		SCTP_TCB_UNLOCK(stcb);
5050 	} else {
5051 		/* Update how much we have pending */
5052 		stcb->freed_by_sorcv_sincelast = dif;
5053 	}
5054 out:
5055 	if (so && r_unlocked && hold_rlock) {
5056 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5057 	}
5058 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5059 no_lock:
5060 	atomic_add_int(&stcb->asoc.refcnt, -1);
5061 	return;
5062 }
5063 
5064 int
5065 sctp_sorecvmsg(struct socket *so,
5066     struct uio *uio,
5067     struct mbuf **mp,
5068     struct sockaddr *from,
5069     int fromlen,
5070     int *msg_flags,
5071     struct sctp_sndrcvinfo *sinfo,
5072     int filling_sinfo)
5073 {
5074 	/*
5075 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5076 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5077 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5078 	 * On the way out we may send out any combination of:
5079 	 * MSG_NOTIFICATION MSG_EOR
5080 	 *
5081 	 */
5082 	struct sctp_inpcb *inp = NULL;
5083 	int my_len = 0;
5084 	int cp_len = 0, error = 0;
5085 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5086 	struct mbuf *m = NULL, *embuf = NULL;
5087 	struct sctp_tcb *stcb = NULL;
5088 	int wakeup_read_socket = 0;
5089 	int freecnt_applied = 0;
5090 	int out_flags = 0, in_flags = 0;
5091 	int block_allowed = 1;
5092 	uint32_t freed_so_far = 0;
5093 	uint32_t copied_so_far = 0;
5094 	int in_eeor_mode = 0;
5095 	int no_rcv_needed = 0;
5096 	uint32_t rwnd_req = 0;
5097 	int hold_sblock = 0;
5098 	int hold_rlock = 0;
5099 	int slen = 0;
5100 	uint32_t held_length = 0;
5101 	int sockbuf_lock = 0;
5102 
5103 	if (uio == NULL) {
5104 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5105 		return (EINVAL);
5106 	}
5107 	if (msg_flags) {
5108 		in_flags = *msg_flags;
5109 		if (in_flags & MSG_PEEK)
5110 			SCTP_STAT_INCR(sctps_read_peeks);
5111 	} else {
5112 		in_flags = 0;
5113 	}
5114 	slen = uio->uio_resid;
5115 
5116 	/* Pull in and set up our int flags */
5117 	if (in_flags & MSG_OOB) {
5118 		/* Out of band's NOT supported */
5119 		return (EOPNOTSUPP);
5120 	}
5121 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5122 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5123 		return (EINVAL);
5124 	}
5125 	if ((in_flags & (MSG_DONTWAIT
5126 	    | MSG_NBIO
5127 	    )) ||
5128 	    SCTP_SO_IS_NBIO(so)) {
5129 		block_allowed = 0;
5130 	}
5131 	/* setup the endpoint */
5132 	inp = (struct sctp_inpcb *)so->so_pcb;
5133 	if (inp == NULL) {
5134 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5135 		return (EFAULT);
5136 	}
5137 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5138 	/* Must be at least a MTU's worth */
5139 	if (rwnd_req < SCTP_MIN_RWND)
5140 		rwnd_req = SCTP_MIN_RWND;
5141 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5142 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5143 		sctp_misc_ints(SCTP_SORECV_ENTER,
5144 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5145 	}
5146 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5147 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5148 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5149 	}
5150 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5151 	sockbuf_lock = 1;
5152 	if (error) {
5153 		goto release_unlocked;
5154 	}
5155 restart:
5156 
5157 
5158 restart_nosblocks:
5159 	if (hold_sblock == 0) {
5160 		SOCKBUF_LOCK(&so->so_rcv);
5161 		hold_sblock = 1;
5162 	}
5163 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5164 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5165 		goto out;
5166 	}
5167 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5168 		if (so->so_error) {
5169 			error = so->so_error;
5170 			if ((in_flags & MSG_PEEK) == 0)
5171 				so->so_error = 0;
5172 			goto out;
5173 		} else {
5174 			if (so->so_rcv.sb_cc == 0) {
5175 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5176 				/* indicate EOF */
5177 				error = 0;
5178 				goto out;
5179 			}
5180 		}
5181 	}
5182 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5183 		/* we need to wait for data */
5184 		if ((so->so_rcv.sb_cc == 0) &&
5185 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5186 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5187 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5188 				/*
5189 				 * For active open side clear flags for
5190 				 * re-use passive open is blocked by
5191 				 * connect.
5192 				 */
5193 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5194 					/*
5195 					 * You were aborted, passive side
5196 					 * always hits here
5197 					 */
5198 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5199 					error = ECONNRESET;
5200 					/*
5201 					 * You get this once if you are
5202 					 * active open side
5203 					 */
5204 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5205 						/*
5206 						 * Remove flag if on the
5207 						 * active open side
5208 						 */
5209 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5210 					}
5211 				}
5212 				so->so_state &= ~(SS_ISCONNECTING |
5213 				    SS_ISDISCONNECTING |
5214 				    SS_ISCONFIRMING |
5215 				    SS_ISCONNECTED);
5216 				if (error == 0) {
5217 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5218 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5219 						error = ENOTCONN;
5220 					} else {
5221 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5222 					}
5223 				}
5224 				goto out;
5225 			}
5226 		}
5227 		error = sbwait(&so->so_rcv);
5228 		if (error) {
5229 			goto out;
5230 		}
5231 		held_length = 0;
5232 		goto restart_nosblocks;
5233 	} else if (so->so_rcv.sb_cc == 0) {
5234 		if (so->so_error) {
5235 			error = so->so_error;
5236 			if ((in_flags & MSG_PEEK) == 0)
5237 				so->so_error = 0;
5238 		} else {
5239 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5240 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5241 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5242 					/*
5243 					 * For active open side clear flags
5244 					 * for re-use passive open is
5245 					 * blocked by connect.
5246 					 */
5247 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5248 						/*
5249 						 * You were aborted, passive
5250 						 * side always hits here
5251 						 */
5252 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5253 						error = ECONNRESET;
5254 						/*
5255 						 * You get this once if you
5256 						 * are active open side
5257 						 */
5258 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5259 							/*
5260 							 * Remove flag if on
5261 							 * the active open
5262 							 * side
5263 							 */
5264 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5265 						}
5266 					}
5267 					so->so_state &= ~(SS_ISCONNECTING |
5268 					    SS_ISDISCONNECTING |
5269 					    SS_ISCONFIRMING |
5270 					    SS_ISCONNECTED);
5271 					if (error == 0) {
5272 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5273 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5274 							error = ENOTCONN;
5275 						} else {
5276 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5277 						}
5278 					}
5279 					goto out;
5280 				}
5281 			}
5282 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5283 			error = EWOULDBLOCK;
5284 		}
5285 		goto out;
5286 	}
5287 	if (hold_sblock == 1) {
5288 		SOCKBUF_UNLOCK(&so->so_rcv);
5289 		hold_sblock = 0;
5290 	}
5291 	/* we possibly have data we can read */
5292 	/* sa_ignore FREED_MEMORY */
5293 	control = TAILQ_FIRST(&inp->read_queue);
5294 	if (control == NULL) {
5295 		/*
5296 		 * This could be happening since the appender did the
5297 		 * increment but as not yet did the tailq insert onto the
5298 		 * read_queue
5299 		 */
5300 		if (hold_rlock == 0) {
5301 			SCTP_INP_READ_LOCK(inp);
5302 			hold_rlock = 1;
5303 		}
5304 		control = TAILQ_FIRST(&inp->read_queue);
5305 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5306 #ifdef INVARIANTS
5307 			panic("Huh, its non zero and nothing on control?");
5308 #endif
5309 			so->so_rcv.sb_cc = 0;
5310 		}
5311 		SCTP_INP_READ_UNLOCK(inp);
5312 		hold_rlock = 0;
5313 		goto restart;
5314 	}
5315 	if ((control->length == 0) &&
5316 	    (control->do_not_ref_stcb)) {
5317 		/*
5318 		 * Clean up code for freeing assoc that left behind a
5319 		 * pdapi.. maybe a peer in EEOR that just closed after
5320 		 * sending and never indicated a EOR.
5321 		 */
5322 		if (hold_rlock == 0) {
5323 			hold_rlock = 1;
5324 			SCTP_INP_READ_LOCK(inp);
5325 		}
5326 		control->held_length = 0;
5327 		if (control->data) {
5328 			/* Hmm there is data here .. fix */
5329 			struct mbuf *m_tmp;
5330 			int cnt = 0;
5331 
5332 			m_tmp = control->data;
5333 			while (m_tmp) {
5334 				cnt += SCTP_BUF_LEN(m_tmp);
5335 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5336 					control->tail_mbuf = m_tmp;
5337 					control->end_added = 1;
5338 				}
5339 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5340 			}
5341 			control->length = cnt;
5342 		} else {
5343 			/* remove it */
5344 			TAILQ_REMOVE(&inp->read_queue, control, next);
5345 			/* Add back any hiddend data */
5346 			sctp_free_remote_addr(control->whoFrom);
5347 			sctp_free_a_readq(stcb, control);
5348 		}
5349 		if (hold_rlock) {
5350 			hold_rlock = 0;
5351 			SCTP_INP_READ_UNLOCK(inp);
5352 		}
5353 		goto restart;
5354 	}
5355 	if (control->length == 0) {
5356 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5357 		    (filling_sinfo)) {
5358 			/* find a more suitable one then this */
5359 			ctl = TAILQ_NEXT(control, next);
5360 			while (ctl) {
5361 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5362 				    (ctl->some_taken ||
5363 				    (ctl->spec_flags & M_NOTIFICATION) ||
5364 				    ((ctl->do_not_ref_stcb == 0) &&
5365 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5366 				    ) {
5367 					/*-
5368 					 * If we have a different TCB next, and there is data
5369 					 * present. If we have already taken some (pdapi), OR we can
5370 					 * ref the tcb and no delivery as started on this stream, we
5371 					 * take it. Note we allow a notification on a different
5372 					 * assoc to be delivered..
5373 					 */
5374 					control = ctl;
5375 					goto found_one;
5376 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5377 					    (ctl->length) &&
5378 					    ((ctl->some_taken) ||
5379 					    ((ctl->do_not_ref_stcb == 0) &&
5380 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5381 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5382 				    ) {
5383 					/*-
5384 					 * If we have the same tcb, and there is data present, and we
5385 					 * have the strm interleave feature present. Then if we have
5386 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5387 					 * not started a delivery for this stream, we can take it.
5388 					 * Note we do NOT allow a notificaiton on the same assoc to
5389 					 * be delivered.
5390 					 */
5391 					control = ctl;
5392 					goto found_one;
5393 				}
5394 				ctl = TAILQ_NEXT(ctl, next);
5395 			}
5396 		}
5397 		/*
5398 		 * if we reach here, not suitable replacement is available
5399 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5400 		 * into the our held count, and its time to sleep again.
5401 		 */
5402 		held_length = so->so_rcv.sb_cc;
5403 		control->held_length = so->so_rcv.sb_cc;
5404 		goto restart;
5405 	}
5406 	/* Clear the held length since there is something to read */
5407 	control->held_length = 0;
5408 	if (hold_rlock) {
5409 		SCTP_INP_READ_UNLOCK(inp);
5410 		hold_rlock = 0;
5411 	}
5412 found_one:
5413 	/*
5414 	 * If we reach here, control has a some data for us to read off.
5415 	 * Note that stcb COULD be NULL.
5416 	 */
5417 	control->some_taken++;
5418 	if (hold_sblock) {
5419 		SOCKBUF_UNLOCK(&so->so_rcv);
5420 		hold_sblock = 0;
5421 	}
5422 	stcb = control->stcb;
5423 	if (stcb) {
5424 		if ((control->do_not_ref_stcb == 0) &&
5425 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5426 			if (freecnt_applied == 0)
5427 				stcb = NULL;
5428 		} else if (control->do_not_ref_stcb == 0) {
5429 			/* you can't free it on me please */
5430 			/*
5431 			 * The lock on the socket buffer protects us so the
5432 			 * free code will stop. But since we used the
5433 			 * socketbuf lock and the sender uses the tcb_lock
5434 			 * to increment, we need to use the atomic add to
5435 			 * the refcnt
5436 			 */
5437 			if (freecnt_applied) {
5438 #ifdef INVARIANTS
5439 				panic("refcnt already incremented");
5440 #else
5441 				printf("refcnt already incremented?\n");
5442 #endif
5443 			} else {
5444 				atomic_add_int(&stcb->asoc.refcnt, 1);
5445 				freecnt_applied = 1;
5446 			}
5447 			/*
5448 			 * Setup to remember how much we have not yet told
5449 			 * the peer our rwnd has opened up. Note we grab the
5450 			 * value from the tcb from last time. Note too that
5451 			 * sack sending clears this when a sack is sent,
5452 			 * which is fine. Once we hit the rwnd_req, we then
5453 			 * will go to the sctp_user_rcvd() that will not
5454 			 * lock until it KNOWs it MUST send a WUP-SACK.
5455 			 */
5456 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5457 			stcb->freed_by_sorcv_sincelast = 0;
5458 		}
5459 	}
5460 	if (stcb &&
5461 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5462 	    control->do_not_ref_stcb == 0) {
5463 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5464 	}
5465 	/* First lets get off the sinfo and sockaddr info */
5466 	if ((sinfo) && filling_sinfo) {
5467 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5468 		nxt = TAILQ_NEXT(control, next);
5469 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5470 			struct sctp_extrcvinfo *s_extra;
5471 
5472 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5473 			if ((nxt) &&
5474 			    (nxt->length)) {
5475 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5476 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5477 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5478 				}
5479 				if (nxt->spec_flags & M_NOTIFICATION) {
5480 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5481 				}
5482 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5483 				s_extra->sreinfo_next_length = nxt->length;
5484 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5485 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5486 				if (nxt->tail_mbuf != NULL) {
5487 					if (nxt->end_added) {
5488 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5489 					}
5490 				}
5491 			} else {
5492 				/*
5493 				 * we explicitly 0 this, since the memcpy
5494 				 * got some other things beyond the older
5495 				 * sinfo_ that is on the control's structure
5496 				 * :-D
5497 				 */
5498 				nxt = NULL;
5499 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5500 				s_extra->sreinfo_next_aid = 0;
5501 				s_extra->sreinfo_next_length = 0;
5502 				s_extra->sreinfo_next_ppid = 0;
5503 				s_extra->sreinfo_next_stream = 0;
5504 			}
5505 		}
5506 		/*
5507 		 * update off the real current cum-ack, if we have an stcb.
5508 		 */
5509 		if ((control->do_not_ref_stcb == 0) && stcb)
5510 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5511 		/*
5512 		 * mask off the high bits, we keep the actual chunk bits in
5513 		 * there.
5514 		 */
5515 		sinfo->sinfo_flags &= 0x00ff;
5516 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5517 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5518 		}
5519 	}
5520 #ifdef SCTP_ASOCLOG_OF_TSNS
5521 	{
5522 		int index, newindex;
5523 		struct sctp_pcbtsn_rlog *entry;
5524 
5525 		do {
5526 			index = inp->readlog_index;
5527 			newindex = index + 1;
5528 			if (newindex >= SCTP_READ_LOG_SIZE) {
5529 				newindex = 0;
5530 			}
5531 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5532 		entry = &inp->readlog[index];
5533 		entry->vtag = control->sinfo_assoc_id;
5534 		entry->strm = control->sinfo_stream;
5535 		entry->seq = control->sinfo_ssn;
5536 		entry->sz = control->length;
5537 		entry->flgs = control->sinfo_flags;
5538 	}
5539 #endif
5540 	if (fromlen && from) {
5541 		struct sockaddr *to;
5542 
5543 #ifdef INET
5544 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5545 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5546 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5547 #else
5548 		/* No AF_INET use AF_INET6 */
5549 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5550 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5551 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5552 #endif
5553 
5554 		to = from;
5555 #if defined(INET) && defined(INET6)
5556 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5557 		    (to->sa_family == AF_INET) &&
5558 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5559 			struct sockaddr_in *sin;
5560 			struct sockaddr_in6 sin6;
5561 
5562 			sin = (struct sockaddr_in *)to;
5563 			bzero(&sin6, sizeof(sin6));
5564 			sin6.sin6_family = AF_INET6;
5565 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5566 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5567 			bcopy(&sin->sin_addr,
5568 			    &sin6.sin6_addr.s6_addr32[3],
5569 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5570 			sin6.sin6_port = sin->sin_port;
5571 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5572 		}
5573 #endif
5574 #if defined(INET6)
5575 		{
5576 			struct sockaddr_in6 lsa6, *to6;
5577 
5578 			to6 = (struct sockaddr_in6 *)to;
5579 			sctp_recover_scope_mac(to6, (&lsa6));
5580 		}
5581 #endif
5582 	}
5583 	/* now copy out what data we can */
5584 	if (mp == NULL) {
5585 		/* copy out each mbuf in the chain up to length */
5586 get_more_data:
5587 		m = control->data;
5588 		while (m) {
5589 			/* Move out all we can */
5590 			cp_len = (int)uio->uio_resid;
5591 			my_len = (int)SCTP_BUF_LEN(m);
5592 			if (cp_len > my_len) {
5593 				/* not enough in this buf */
5594 				cp_len = my_len;
5595 			}
5596 			if (hold_rlock) {
5597 				SCTP_INP_READ_UNLOCK(inp);
5598 				hold_rlock = 0;
5599 			}
5600 			if (cp_len > 0)
5601 				error = uiomove(mtod(m, char *), cp_len, uio);
5602 			/* re-read */
5603 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5604 				goto release;
5605 			}
5606 			if ((control->do_not_ref_stcb == 0) && stcb &&
5607 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5608 				no_rcv_needed = 1;
5609 			}
5610 			if (error) {
5611 				/* error we are out of here */
5612 				goto release;
5613 			}
5614 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5615 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5616 			    ((control->end_added == 0) ||
5617 			    (control->end_added &&
5618 			    (TAILQ_NEXT(control, next) == NULL)))
5619 			    ) {
5620 				SCTP_INP_READ_LOCK(inp);
5621 				hold_rlock = 1;
5622 			}
5623 			if (cp_len == SCTP_BUF_LEN(m)) {
5624 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5625 				    (control->end_added)) {
5626 					out_flags |= MSG_EOR;
5627 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5628 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5629 				}
5630 				if (control->spec_flags & M_NOTIFICATION) {
5631 					out_flags |= MSG_NOTIFICATION;
5632 				}
5633 				/* we ate up the mbuf */
5634 				if (in_flags & MSG_PEEK) {
5635 					/* just looking */
5636 					m = SCTP_BUF_NEXT(m);
5637 					copied_so_far += cp_len;
5638 				} else {
5639 					/* dispose of the mbuf */
5640 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5641 						sctp_sblog(&so->so_rcv,
5642 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5643 					}
5644 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5645 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5646 						sctp_sblog(&so->so_rcv,
5647 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5648 					}
5649 					embuf = m;
5650 					copied_so_far += cp_len;
5651 					freed_so_far += cp_len;
5652 					freed_so_far += MSIZE;
5653 					atomic_subtract_int(&control->length, cp_len);
5654 					control->data = sctp_m_free(m);
5655 					m = control->data;
5656 					/*
5657 					 * been through it all, must hold sb
5658 					 * lock ok to null tail
5659 					 */
5660 					if (control->data == NULL) {
5661 #ifdef INVARIANTS
5662 						if ((control->end_added == 0) ||
5663 						    (TAILQ_NEXT(control, next) == NULL)) {
5664 							/*
5665 							 * If the end is not
5666 							 * added, OR the
5667 							 * next is NOT null
5668 							 * we MUST have the
5669 							 * lock.
5670 							 */
5671 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5672 								panic("Hmm we don't own the lock?");
5673 							}
5674 						}
5675 #endif
5676 						control->tail_mbuf = NULL;
5677 #ifdef INVARIANTS
5678 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5679 							panic("end_added, nothing left and no MSG_EOR");
5680 						}
5681 #endif
5682 					}
5683 				}
5684 			} else {
5685 				/* Do we need to trim the mbuf? */
5686 				if (control->spec_flags & M_NOTIFICATION) {
5687 					out_flags |= MSG_NOTIFICATION;
5688 				}
5689 				if ((in_flags & MSG_PEEK) == 0) {
5690 					SCTP_BUF_RESV_UF(m, cp_len);
5691 					SCTP_BUF_LEN(m) -= cp_len;
5692 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5693 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5694 					}
5695 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5696 					if ((control->do_not_ref_stcb == 0) &&
5697 					    stcb) {
5698 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5699 					}
5700 					copied_so_far += cp_len;
5701 					embuf = m;
5702 					freed_so_far += cp_len;
5703 					freed_so_far += MSIZE;
5704 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5705 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5706 						    SCTP_LOG_SBRESULT, 0);
5707 					}
5708 					atomic_subtract_int(&control->length, cp_len);
5709 				} else {
5710 					copied_so_far += cp_len;
5711 				}
5712 			}
5713 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5714 				break;
5715 			}
5716 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5717 			    (control->do_not_ref_stcb == 0) &&
5718 			    (freed_so_far >= rwnd_req)) {
5719 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5720 			}
5721 		}		/* end while(m) */
5722 		/*
5723 		 * At this point we have looked at it all and we either have
5724 		 * a MSG_EOR/or read all the user wants... <OR>
5725 		 * control->length == 0.
5726 		 */
5727 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5728 			/* we are done with this control */
5729 			if (control->length == 0) {
5730 				if (control->data) {
5731 #ifdef INVARIANTS
5732 					panic("control->data not null at read eor?");
5733 #else
5734 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5735 					sctp_m_freem(control->data);
5736 					control->data = NULL;
5737 #endif
5738 				}
5739 		done_with_control:
5740 				if (TAILQ_NEXT(control, next) == NULL) {
5741 					/*
5742 					 * If we don't have a next we need a
5743 					 * lock, if there is a next
5744 					 * interrupt is filling ahead of us
5745 					 * and we don't need a lock to
5746 					 * remove this guy (which is the
5747 					 * head of the queue).
5748 					 */
5749 					if (hold_rlock == 0) {
5750 						SCTP_INP_READ_LOCK(inp);
5751 						hold_rlock = 1;
5752 					}
5753 				}
5754 				TAILQ_REMOVE(&inp->read_queue, control, next);
5755 				/* Add back any hiddend data */
5756 				if (control->held_length) {
5757 					held_length = 0;
5758 					control->held_length = 0;
5759 					wakeup_read_socket = 1;
5760 				}
5761 				if (control->aux_data) {
5762 					sctp_m_free(control->aux_data);
5763 					control->aux_data = NULL;
5764 				}
5765 				no_rcv_needed = control->do_not_ref_stcb;
5766 				sctp_free_remote_addr(control->whoFrom);
5767 				control->data = NULL;
5768 				sctp_free_a_readq(stcb, control);
5769 				control = NULL;
5770 				if ((freed_so_far >= rwnd_req) &&
5771 				    (no_rcv_needed == 0))
5772 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5773 
5774 			} else {
5775 				/*
5776 				 * The user did not read all of this
5777 				 * message, turn off the returned MSG_EOR
5778 				 * since we are leaving more behind on the
5779 				 * control to read.
5780 				 */
5781 #ifdef INVARIANTS
5782 				if (control->end_added &&
5783 				    (control->data == NULL) &&
5784 				    (control->tail_mbuf == NULL)) {
5785 					panic("Gak, control->length is corrupt?");
5786 				}
5787 #endif
5788 				no_rcv_needed = control->do_not_ref_stcb;
5789 				out_flags &= ~MSG_EOR;
5790 			}
5791 		}
5792 		if (out_flags & MSG_EOR) {
5793 			goto release;
5794 		}
5795 		if ((uio->uio_resid == 0) ||
5796 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5797 		    ) {
5798 			goto release;
5799 		}
5800 		/*
5801 		 * If I hit here the receiver wants more and this message is
5802 		 * NOT done (pd-api). So two questions. Can we block? if not
5803 		 * we are done. Did the user NOT set MSG_WAITALL?
5804 		 */
5805 		if (block_allowed == 0) {
5806 			goto release;
5807 		}
5808 		/*
5809 		 * We need to wait for more data a few things: - We don't
5810 		 * sbunlock() so we don't get someone else reading. - We
5811 		 * must be sure to account for the case where what is added
5812 		 * is NOT to our control when we wakeup.
5813 		 */
5814 
5815 		/*
5816 		 * Do we need to tell the transport a rwnd update might be
5817 		 * needed before we go to sleep?
5818 		 */
5819 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5820 		    ((freed_so_far >= rwnd_req) &&
5821 		    (control->do_not_ref_stcb == 0) &&
5822 		    (no_rcv_needed == 0))) {
5823 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5824 		}
5825 wait_some_more:
5826 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5827 			goto release;
5828 		}
5829 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5830 			goto release;
5831 
5832 		if (hold_rlock == 1) {
5833 			SCTP_INP_READ_UNLOCK(inp);
5834 			hold_rlock = 0;
5835 		}
5836 		if (hold_sblock == 0) {
5837 			SOCKBUF_LOCK(&so->so_rcv);
5838 			hold_sblock = 1;
5839 		}
5840 		if ((copied_so_far) && (control->length == 0) &&
5841 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5842 		    ) {
5843 			goto release;
5844 		}
5845 		if (so->so_rcv.sb_cc <= control->held_length) {
5846 			error = sbwait(&so->so_rcv);
5847 			if (error) {
5848 				goto release;
5849 			}
5850 			control->held_length = 0;
5851 		}
5852 		if (hold_sblock) {
5853 			SOCKBUF_UNLOCK(&so->so_rcv);
5854 			hold_sblock = 0;
5855 		}
5856 		if (control->length == 0) {
5857 			/* still nothing here */
5858 			if (control->end_added == 1) {
5859 				/* he aborted, or is done i.e.did a shutdown */
5860 				out_flags |= MSG_EOR;
5861 				if (control->pdapi_aborted) {
5862 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5863 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5864 
5865 					out_flags |= MSG_TRUNC;
5866 				} else {
5867 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5868 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5869 				}
5870 				goto done_with_control;
5871 			}
5872 			if (so->so_rcv.sb_cc > held_length) {
5873 				control->held_length = so->so_rcv.sb_cc;
5874 				held_length = 0;
5875 			}
5876 			goto wait_some_more;
5877 		} else if (control->data == NULL) {
5878 			/*
5879 			 * we must re-sync since data is probably being
5880 			 * added
5881 			 */
5882 			SCTP_INP_READ_LOCK(inp);
5883 			if ((control->length > 0) && (control->data == NULL)) {
5884 				/*
5885 				 * big trouble.. we have the lock and its
5886 				 * corrupt?
5887 				 */
5888 #ifdef INVARIANTS
5889 				panic("Impossible data==NULL length !=0");
5890 #endif
5891 				out_flags |= MSG_EOR;
5892 				out_flags |= MSG_TRUNC;
5893 				control->length = 0;
5894 				SCTP_INP_READ_UNLOCK(inp);
5895 				goto done_with_control;
5896 			}
5897 			SCTP_INP_READ_UNLOCK(inp);
5898 			/* We will fall around to get more data */
5899 		}
5900 		goto get_more_data;
5901 	} else {
5902 		/*-
5903 		 * Give caller back the mbuf chain,
5904 		 * store in uio_resid the length
5905 		 */
5906 		wakeup_read_socket = 0;
5907 		if ((control->end_added == 0) ||
5908 		    (TAILQ_NEXT(control, next) == NULL)) {
5909 			/* Need to get rlock */
5910 			if (hold_rlock == 0) {
5911 				SCTP_INP_READ_LOCK(inp);
5912 				hold_rlock = 1;
5913 			}
5914 		}
5915 		if (control->end_added) {
5916 			out_flags |= MSG_EOR;
5917 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5918 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5919 		}
5920 		if (control->spec_flags & M_NOTIFICATION) {
5921 			out_flags |= MSG_NOTIFICATION;
5922 		}
5923 		uio->uio_resid = control->length;
5924 		*mp = control->data;
5925 		m = control->data;
5926 		while (m) {
5927 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5928 				sctp_sblog(&so->so_rcv,
5929 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5930 			}
5931 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5932 			freed_so_far += SCTP_BUF_LEN(m);
5933 			freed_so_far += MSIZE;
5934 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5935 				sctp_sblog(&so->so_rcv,
5936 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5937 			}
5938 			m = SCTP_BUF_NEXT(m);
5939 		}
5940 		control->data = control->tail_mbuf = NULL;
5941 		control->length = 0;
5942 		if (out_flags & MSG_EOR) {
5943 			/* Done with this control */
5944 			goto done_with_control;
5945 		}
5946 	}
5947 release:
5948 	if (hold_rlock == 1) {
5949 		SCTP_INP_READ_UNLOCK(inp);
5950 		hold_rlock = 0;
5951 	}
5952 	if (hold_sblock == 1) {
5953 		SOCKBUF_UNLOCK(&so->so_rcv);
5954 		hold_sblock = 0;
5955 	}
5956 	sbunlock(&so->so_rcv);
5957 	sockbuf_lock = 0;
5958 
5959 release_unlocked:
5960 	if (hold_sblock) {
5961 		SOCKBUF_UNLOCK(&so->so_rcv);
5962 		hold_sblock = 0;
5963 	}
5964 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5965 		if ((freed_so_far >= rwnd_req) &&
5966 		    (control && (control->do_not_ref_stcb == 0)) &&
5967 		    (no_rcv_needed == 0))
5968 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5969 	}
5970 out:
5971 	if (msg_flags) {
5972 		*msg_flags = out_flags;
5973 	}
5974 	if (((out_flags & MSG_EOR) == 0) &&
5975 	    ((in_flags & MSG_PEEK) == 0) &&
5976 	    (sinfo) &&
5977 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5978 		struct sctp_extrcvinfo *s_extra;
5979 
5980 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5981 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5982 	}
5983 	if (hold_rlock == 1) {
5984 		SCTP_INP_READ_UNLOCK(inp);
5985 		hold_rlock = 0;
5986 	}
5987 	if (hold_sblock) {
5988 		SOCKBUF_UNLOCK(&so->so_rcv);
5989 		hold_sblock = 0;
5990 	}
5991 	if (sockbuf_lock) {
5992 		sbunlock(&so->so_rcv);
5993 	}
5994 	if (freecnt_applied) {
5995 		/*
5996 		 * The lock on the socket buffer protects us so the free
5997 		 * code will stop. But since we used the socketbuf lock and
5998 		 * the sender uses the tcb_lock to increment, we need to use
5999 		 * the atomic add to the refcnt.
6000 		 */
6001 		if (stcb == NULL) {
6002 #ifdef INVARIANTS
6003 			panic("stcb for refcnt has gone NULL?");
6004 			goto stage_left;
6005 #else
6006 			goto stage_left;
6007 #endif
6008 		}
6009 		atomic_add_int(&stcb->asoc.refcnt, -1);
6010 		freecnt_applied = 0;
6011 		/* Save the value back for next time */
6012 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6013 	}
6014 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6015 		if (stcb) {
6016 			sctp_misc_ints(SCTP_SORECV_DONE,
6017 			    freed_so_far,
6018 			    ((uio) ? (slen - uio->uio_resid) : slen),
6019 			    stcb->asoc.my_rwnd,
6020 			    so->so_rcv.sb_cc);
6021 		} else {
6022 			sctp_misc_ints(SCTP_SORECV_DONE,
6023 			    freed_so_far,
6024 			    ((uio) ? (slen - uio->uio_resid) : slen),
6025 			    0,
6026 			    so->so_rcv.sb_cc);
6027 		}
6028 	}
6029 stage_left:
6030 	if (wakeup_read_socket) {
6031 		sctp_sorwakeup(inp, so);
6032 	}
6033 	return (error);
6034 }
6035 
6036 
6037 #ifdef SCTP_MBUF_LOGGING
6038 struct mbuf *
6039 sctp_m_free(struct mbuf *m)
6040 {
6041 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6042 		if (SCTP_BUF_IS_EXTENDED(m)) {
6043 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6044 		}
6045 	}
6046 	return (m_free(m));
6047 }
6048 
6049 void
6050 sctp_m_freem(struct mbuf *mb)
6051 {
6052 	while (mb != NULL)
6053 		mb = sctp_m_free(mb);
6054 }
6055 
6056 #endif
6057 
6058 int
6059 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6060 {
6061 	/*
6062 	 * Given a local address. For all associations that holds the
6063 	 * address, request a peer-set-primary.
6064 	 */
6065 	struct sctp_ifa *ifa;
6066 	struct sctp_laddr *wi;
6067 
6068 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6069 	if (ifa == NULL) {
6070 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6071 		return (EADDRNOTAVAIL);
6072 	}
6073 	/*
6074 	 * Now that we have the ifa we must awaken the iterator with this
6075 	 * message.
6076 	 */
6077 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6078 	if (wi == NULL) {
6079 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6080 		return (ENOMEM);
6081 	}
6082 	/* Now incr the count and int wi structure */
6083 	SCTP_INCR_LADDR_COUNT();
6084 	bzero(wi, sizeof(*wi));
6085 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6086 	wi->ifa = ifa;
6087 	wi->action = SCTP_SET_PRIM_ADDR;
6088 	atomic_add_int(&ifa->refcount, 1);
6089 
6090 	/* Now add it to the work queue */
6091 	SCTP_IPI_ITERATOR_WQ_LOCK();
6092 	/*
6093 	 * Should this really be a tailq? As it is we will process the
6094 	 * newest first :-0
6095 	 */
6096 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6097 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6098 	    (struct sctp_inpcb *)NULL,
6099 	    (struct sctp_tcb *)NULL,
6100 	    (struct sctp_nets *)NULL);
6101 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
6102 	return (0);
6103 }
6104 
6105 
6106 int
6107 sctp_soreceive(struct socket *so,
6108     struct sockaddr **psa,
6109     struct uio *uio,
6110     struct mbuf **mp0,
6111     struct mbuf **controlp,
6112     int *flagsp)
6113 {
6114 	int error, fromlen;
6115 	uint8_t sockbuf[256];
6116 	struct sockaddr *from;
6117 	struct sctp_extrcvinfo sinfo;
6118 	int filling_sinfo = 1;
6119 	struct sctp_inpcb *inp;
6120 
6121 	inp = (struct sctp_inpcb *)so->so_pcb;
6122 	/* pickup the assoc we are reading from */
6123 	if (inp == NULL) {
6124 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6125 		return (EINVAL);
6126 	}
6127 	if ((sctp_is_feature_off(inp,
6128 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6129 	    (controlp == NULL)) {
6130 		/* user does not want the sndrcv ctl */
6131 		filling_sinfo = 0;
6132 	}
6133 	if (psa) {
6134 		from = (struct sockaddr *)sockbuf;
6135 		fromlen = sizeof(sockbuf);
6136 		from->sa_len = 0;
6137 	} else {
6138 		from = NULL;
6139 		fromlen = 0;
6140 	}
6141 
6142 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6143 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6144 	if ((controlp) && (filling_sinfo)) {
6145 		/* copy back the sinfo in a CMSG format */
6146 		if (filling_sinfo)
6147 			*controlp = sctp_build_ctl_nchunk(inp,
6148 			    (struct sctp_sndrcvinfo *)&sinfo);
6149 		else
6150 			*controlp = NULL;
6151 	}
6152 	if (psa) {
6153 		/* copy back the address info */
6154 		if (from && from->sa_len) {
6155 			*psa = sodupsockaddr(from, M_NOWAIT);
6156 		} else {
6157 			*psa = NULL;
6158 		}
6159 	}
6160 	return (error);
6161 }
6162 
6163 
6164 int
6165 sctp_l_soreceive(struct socket *so,
6166     struct sockaddr **name,
6167     struct uio *uio,
6168     char **controlp,
6169     int *controllen,
6170     int *flag)
6171 {
6172 	int error, fromlen;
6173 	uint8_t sockbuf[256];
6174 	struct sockaddr *from;
6175 	struct sctp_extrcvinfo sinfo;
6176 	int filling_sinfo = 1;
6177 	struct sctp_inpcb *inp;
6178 
6179 	inp = (struct sctp_inpcb *)so->so_pcb;
6180 	/* pickup the assoc we are reading from */
6181 	if (inp == NULL) {
6182 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6183 		return (EINVAL);
6184 	}
6185 	if ((sctp_is_feature_off(inp,
6186 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6187 	    (controlp == NULL)) {
6188 		/* user does not want the sndrcv ctl */
6189 		filling_sinfo = 0;
6190 	}
6191 	if (name) {
6192 		from = (struct sockaddr *)sockbuf;
6193 		fromlen = sizeof(sockbuf);
6194 		from->sa_len = 0;
6195 	} else {
6196 		from = NULL;
6197 		fromlen = 0;
6198 	}
6199 
6200 	error = sctp_sorecvmsg(so, uio,
6201 	    (struct mbuf **)NULL,
6202 	    from, fromlen, flag,
6203 	    (struct sctp_sndrcvinfo *)&sinfo,
6204 	    filling_sinfo);
6205 	if ((controlp) && (filling_sinfo)) {
6206 		/*
6207 		 * copy back the sinfo in a CMSG format note that the caller
6208 		 * has reponsibility for freeing the memory.
6209 		 */
6210 		if (filling_sinfo)
6211 			*controlp = sctp_build_ctl_cchunk(inp,
6212 			    controllen,
6213 			    (struct sctp_sndrcvinfo *)&sinfo);
6214 	}
6215 	if (name) {
6216 		/* copy back the address info */
6217 		if (from && from->sa_len) {
6218 			*name = sodupsockaddr(from, M_WAIT);
6219 		} else {
6220 			*name = NULL;
6221 		}
6222 	}
6223 	return (error);
6224 }
6225 
6226 
6227 
6228 
6229 
6230 
6231 
6232 int
6233 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6234     int totaddr, int *error)
6235 {
6236 	int added = 0;
6237 	int i;
6238 	struct sctp_inpcb *inp;
6239 	struct sockaddr *sa;
6240 	size_t incr = 0;
6241 
6242 	sa = addr;
6243 	inp = stcb->sctp_ep;
6244 	*error = 0;
6245 	for (i = 0; i < totaddr; i++) {
6246 		if (sa->sa_family == AF_INET) {
6247 			incr = sizeof(struct sockaddr_in);
6248 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6249 				/* assoc gone no un-lock */
6250 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6251 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6252 				*error = ENOBUFS;
6253 				goto out_now;
6254 			}
6255 			added++;
6256 		} else if (sa->sa_family == AF_INET6) {
6257 			incr = sizeof(struct sockaddr_in6);
6258 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6259 				/* assoc gone no un-lock */
6260 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6261 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6262 				*error = ENOBUFS;
6263 				goto out_now;
6264 			}
6265 			added++;
6266 		}
6267 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6268 	}
6269 out_now:
6270 	return (added);
6271 }
6272 
6273 struct sctp_tcb *
6274 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6275     int *totaddr, int *num_v4, int *num_v6, int *error,
6276     int limit, int *bad_addr)
6277 {
6278 	struct sockaddr *sa;
6279 	struct sctp_tcb *stcb = NULL;
6280 	size_t incr, at, i;
6281 
6282 	at = incr = 0;
6283 	sa = addr;
6284 	*error = *num_v6 = *num_v4 = 0;
6285 	/* account and validate addresses */
6286 	for (i = 0; i < (size_t)*totaddr; i++) {
6287 		if (sa->sa_family == AF_INET) {
6288 			(*num_v4) += 1;
6289 			incr = sizeof(struct sockaddr_in);
6290 			if (sa->sa_len != incr) {
6291 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6292 				*error = EINVAL;
6293 				*bad_addr = 1;
6294 				return (NULL);
6295 			}
6296 		} else if (sa->sa_family == AF_INET6) {
6297 			struct sockaddr_in6 *sin6;
6298 
6299 			sin6 = (struct sockaddr_in6 *)sa;
6300 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6301 				/* Must be non-mapped for connectx */
6302 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6303 				*error = EINVAL;
6304 				*bad_addr = 1;
6305 				return (NULL);
6306 			}
6307 			(*num_v6) += 1;
6308 			incr = sizeof(struct sockaddr_in6);
6309 			if (sa->sa_len != incr) {
6310 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6311 				*error = EINVAL;
6312 				*bad_addr = 1;
6313 				return (NULL);
6314 			}
6315 		} else {
6316 			*totaddr = i;
6317 			/* we are done */
6318 			break;
6319 		}
6320 		SCTP_INP_INCR_REF(inp);
6321 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6322 		if (stcb != NULL) {
6323 			/* Already have or am bring up an association */
6324 			return (stcb);
6325 		} else {
6326 			SCTP_INP_DECR_REF(inp);
6327 		}
6328 		if ((at + incr) > (size_t)limit) {
6329 			*totaddr = i;
6330 			break;
6331 		}
6332 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6333 	}
6334 	return ((struct sctp_tcb *)NULL);
6335 }
6336 
6337 /*
6338  * sctp_bindx(ADD) for one address.
6339  * assumes all arguments are valid/checked by caller.
6340  */
6341 void
6342 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6343     struct sockaddr *sa, sctp_assoc_t assoc_id,
6344     uint32_t vrf_id, int *error, void *p)
6345 {
6346 	struct sockaddr *addr_touse;
6347 
6348 #ifdef INET6
6349 	struct sockaddr_in sin;
6350 
6351 #endif
6352 
6353 	/* see if we're bound all already! */
6354 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6355 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6356 		*error = EINVAL;
6357 		return;
6358 	}
6359 	addr_touse = sa;
6360 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6361 	if (sa->sa_family == AF_INET6) {
6362 		struct sockaddr_in6 *sin6;
6363 
6364 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6365 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6366 			*error = EINVAL;
6367 			return;
6368 		}
6369 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6370 			/* can only bind v6 on PF_INET6 sockets */
6371 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6372 			*error = EINVAL;
6373 			return;
6374 		}
6375 		sin6 = (struct sockaddr_in6 *)addr_touse;
6376 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6377 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6378 			    SCTP_IPV6_V6ONLY(inp)) {
6379 				/* can't bind v4-mapped on PF_INET sockets */
6380 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6381 				*error = EINVAL;
6382 				return;
6383 			}
6384 			in6_sin6_2_sin(&sin, sin6);
6385 			addr_touse = (struct sockaddr *)&sin;
6386 		}
6387 	}
6388 #endif
6389 	if (sa->sa_family == AF_INET) {
6390 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6391 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6392 			*error = EINVAL;
6393 			return;
6394 		}
6395 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6396 		    SCTP_IPV6_V6ONLY(inp)) {
6397 			/* can't bind v4 on PF_INET sockets */
6398 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6399 			*error = EINVAL;
6400 			return;
6401 		}
6402 	}
6403 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6404 		if (p == NULL) {
6405 			/* Can't get proc for Net/Open BSD */
6406 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6407 			*error = EINVAL;
6408 			return;
6409 		}
6410 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6411 		return;
6412 	}
6413 	/*
6414 	 * No locks required here since bind and mgmt_ep_sa all do their own
6415 	 * locking. If we do something for the FIX: below we may need to
6416 	 * lock in that case.
6417 	 */
6418 	if (assoc_id == 0) {
6419 		/* add the address */
6420 		struct sctp_inpcb *lep;
6421 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6422 
6423 		/* validate the incoming port */
6424 		if ((lsin->sin_port != 0) &&
6425 		    (lsin->sin_port != inp->sctp_lport)) {
6426 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6427 			*error = EINVAL;
6428 			return;
6429 		} else {
6430 			/* user specified 0 port, set it to existing port */
6431 			lsin->sin_port = inp->sctp_lport;
6432 		}
6433 
6434 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6435 		if (lep != NULL) {
6436 			/*
6437 			 * We must decrement the refcount since we have the
6438 			 * ep already and are binding. No remove going on
6439 			 * here.
6440 			 */
6441 			SCTP_INP_DECR_REF(lep);
6442 		}
6443 		if (lep == inp) {
6444 			/* already bound to it.. ok */
6445 			return;
6446 		} else if (lep == NULL) {
6447 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6448 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6449 			    SCTP_ADD_IP_ADDRESS,
6450 			    vrf_id, NULL);
6451 		} else {
6452 			*error = EADDRINUSE;
6453 		}
6454 		if (*error)
6455 			return;
6456 	} else {
6457 		/*
6458 		 * FIX: decide whether we allow assoc based bindx
6459 		 */
6460 	}
6461 }
6462 
6463 /*
6464  * sctp_bindx(DELETE) for one address.
6465  * assumes all arguments are valid/checked by caller.
6466  */
6467 void
6468 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6469     struct sockaddr *sa, sctp_assoc_t assoc_id,
6470     uint32_t vrf_id, int *error)
6471 {
6472 	struct sockaddr *addr_touse;
6473 
6474 #ifdef INET6
6475 	struct sockaddr_in sin;
6476 
6477 #endif
6478 
6479 	/* see if we're bound all already! */
6480 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6481 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6482 		*error = EINVAL;
6483 		return;
6484 	}
6485 	addr_touse = sa;
6486 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6487 	if (sa->sa_family == AF_INET6) {
6488 		struct sockaddr_in6 *sin6;
6489 
6490 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6491 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6492 			*error = EINVAL;
6493 			return;
6494 		}
6495 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6496 			/* can only bind v6 on PF_INET6 sockets */
6497 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6498 			*error = EINVAL;
6499 			return;
6500 		}
6501 		sin6 = (struct sockaddr_in6 *)addr_touse;
6502 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6503 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6504 			    SCTP_IPV6_V6ONLY(inp)) {
6505 				/* can't bind mapped-v4 on PF_INET sockets */
6506 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6507 				*error = EINVAL;
6508 				return;
6509 			}
6510 			in6_sin6_2_sin(&sin, sin6);
6511 			addr_touse = (struct sockaddr *)&sin;
6512 		}
6513 	}
6514 #endif
6515 	if (sa->sa_family == AF_INET) {
6516 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6517 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6518 			*error = EINVAL;
6519 			return;
6520 		}
6521 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6522 		    SCTP_IPV6_V6ONLY(inp)) {
6523 			/* can't bind v4 on PF_INET sockets */
6524 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6525 			*error = EINVAL;
6526 			return;
6527 		}
6528 	}
6529 	/*
6530 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6531 	 * below is ever changed we may need to lock before calling
6532 	 * association level binding.
6533 	 */
6534 	if (assoc_id == 0) {
6535 		/* delete the address */
6536 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6537 		    SCTP_DEL_IP_ADDRESS,
6538 		    vrf_id, NULL);
6539 	} else {
6540 		/*
6541 		 * FIX: decide whether we allow assoc based bindx
6542 		 */
6543 	}
6544 }
6545 
6546 /*
6547  * returns the valid local address count for an assoc, taking into account
6548  * all scoping rules
6549  */
6550 int
6551 sctp_local_addr_count(struct sctp_tcb *stcb)
6552 {
6553 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6554 	int ipv4_addr_legal, ipv6_addr_legal;
6555 	struct sctp_vrf *vrf;
6556 	struct sctp_ifn *sctp_ifn;
6557 	struct sctp_ifa *sctp_ifa;
6558 	int count = 0;
6559 
6560 	/* Turn on all the appropriate scopes */
6561 	loopback_scope = stcb->asoc.loopback_scope;
6562 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6563 	local_scope = stcb->asoc.local_scope;
6564 	site_scope = stcb->asoc.site_scope;
6565 	ipv4_addr_legal = ipv6_addr_legal = 0;
6566 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6567 		ipv6_addr_legal = 1;
6568 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6569 			ipv4_addr_legal = 1;
6570 		}
6571 	} else {
6572 		ipv4_addr_legal = 1;
6573 	}
6574 
6575 	SCTP_IPI_ADDR_RLOCK();
6576 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6577 	if (vrf == NULL) {
6578 		/* no vrf, no addresses */
6579 		SCTP_IPI_ADDR_RUNLOCK();
6580 		return (0);
6581 	}
6582 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6583 		/*
6584 		 * bound all case: go through all ifns on the vrf
6585 		 */
6586 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6587 			if ((loopback_scope == 0) &&
6588 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6589 				continue;
6590 			}
6591 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6592 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6593 					continue;
6594 				switch (sctp_ifa->address.sa.sa_family) {
6595 				case AF_INET:
6596 					if (ipv4_addr_legal) {
6597 						struct sockaddr_in *sin;
6598 
6599 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6600 						if (sin->sin_addr.s_addr == 0) {
6601 							/*
6602 							 * skip unspecified
6603 							 * addrs
6604 							 */
6605 							continue;
6606 						}
6607 						if ((ipv4_local_scope == 0) &&
6608 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6609 							continue;
6610 						}
6611 						/* count this one */
6612 						count++;
6613 					} else {
6614 						continue;
6615 					}
6616 					break;
6617 #ifdef INET6
6618 				case AF_INET6:
6619 					if (ipv6_addr_legal) {
6620 						struct sockaddr_in6 *sin6;
6621 
6622 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6623 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6624 							continue;
6625 						}
6626 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6627 							if (local_scope == 0)
6628 								continue;
6629 							if (sin6->sin6_scope_id == 0) {
6630 								if (sa6_recoverscope(sin6) != 0)
6631 									/*
6632 									 *
6633 									 * bad
6634 									 *
6635 									 * li
6636 									 * nk
6637 									 *
6638 									 * loc
6639 									 * al
6640 									 *
6641 									 * add
6642 									 * re
6643 									 * ss
6644 									 * */
6645 									continue;
6646 							}
6647 						}
6648 						if ((site_scope == 0) &&
6649 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6650 							continue;
6651 						}
6652 						/* count this one */
6653 						count++;
6654 					}
6655 					break;
6656 #endif
6657 				default:
6658 					/* TSNH */
6659 					break;
6660 				}
6661 			}
6662 		}
6663 	} else {
6664 		/*
6665 		 * subset bound case
6666 		 */
6667 		struct sctp_laddr *laddr;
6668 
6669 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6670 		    sctp_nxt_addr) {
6671 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6672 				continue;
6673 			}
6674 			/* count this one */
6675 			count++;
6676 		}
6677 	}
6678 	SCTP_IPI_ADDR_RUNLOCK();
6679 	return (count);
6680 }
6681 
6682 #if defined(SCTP_LOCAL_TRACE_BUF)
6683 
6684 void
6685 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6686 {
6687 	uint32_t saveindex, newindex;
6688 
6689 	do {
6690 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6691 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6692 			newindex = 1;
6693 		} else {
6694 			newindex = saveindex + 1;
6695 		}
6696 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6697 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6698 		saveindex = 0;
6699 	}
6700 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6701 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6702 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6703 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6704 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6705 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6706 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6707 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6708 }
6709 
6710 #endif
6711 /* We will need to add support
6712  * to bind the ports and such here
6713  * so we can do UDP tunneling. In
6714  * the mean-time, we return error
6715  */
6716 #include <netinet/udp.h>
6717 #include <netinet/udp_var.h>
6718 #include <sys/proc.h>
6719 #ifdef INET6
6720 #include <netinet6/sctp6_var.h>
6721 #endif
6722 
6723 static void
6724 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6725 {
6726 	struct ip *iph;
6727 	struct mbuf *sp, *last;
6728 	struct udphdr *uhdr;
6729 	uint16_t port = 0, len;
6730 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6731 
6732 	/*
6733 	 * Split out the mbuf chain. Leave the IP header in m, place the
6734 	 * rest in the sp.
6735 	 */
6736 	if ((m->m_flags & M_PKTHDR) == 0) {
6737 		/* Can't handle one that is not a pkt hdr */
6738 		goto out;
6739 	}
6740 	/* pull the src port */
6741 	iph = mtod(m, struct ip *);
6742 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6743 
6744 	port = uhdr->uh_sport;
6745 	sp = m_split(m, off, M_DONTWAIT);
6746 	if (sp == NULL) {
6747 		/* Gak, drop packet, we can't do a split */
6748 		goto out;
6749 	}
6750 	if (sp->m_pkthdr.len < header_size) {
6751 		/* Gak, packet can't have an SCTP header in it - to small */
6752 		m_freem(sp);
6753 		goto out;
6754 	}
6755 	/* ok now pull up the UDP header and SCTP header together */
6756 	sp = m_pullup(sp, header_size);
6757 	if (sp == NULL) {
6758 		/* Gak pullup failed */
6759 		goto out;
6760 	}
6761 	/* trim out the UDP header */
6762 	m_adj(sp, sizeof(struct udphdr));
6763 
6764 	/* Now reconstruct the mbuf chain */
6765 	/* 1) find last one */
6766 	last = m;
6767 	while (last->m_next != NULL) {
6768 		last = last->m_next;
6769 	}
6770 	last->m_next = sp;
6771 	m->m_pkthdr.len += sp->m_pkthdr.len;
6772 	last = m;
6773 	while (last != NULL) {
6774 		last = last->m_next;
6775 	}
6776 	/* Now its ready for sctp_input or sctp6_input */
6777 	iph = mtod(m, struct ip *);
6778 	switch (iph->ip_v) {
6779 	case IPVERSION:
6780 		{
6781 			/* its IPv4 */
6782 			len = SCTP_GET_IPV4_LENGTH(iph);
6783 			len -= sizeof(struct udphdr);
6784 			SCTP_GET_IPV4_LENGTH(iph) = len;
6785 			sctp_input_with_port(m, off, port);
6786 			break;
6787 		}
6788 #ifdef INET6
6789 	case IPV6_VERSION >> 4:
6790 		{
6791 			/* its IPv6 - NOT supported */
6792 			goto out;
6793 			break;
6794 
6795 		}
6796 #endif
6797 	default:
6798 		{
6799 			m_freem(m);
6800 			break;
6801 		}
6802 	}
6803 	return;
6804 out:
6805 	m_freem(m);
6806 }
6807 
6808 void
6809 sctp_over_udp_stop(void)
6810 {
6811 	struct socket *sop;
6812 
6813 	/*
6814 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6815 	 * for writting!
6816 	 */
6817 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6818 		/* Nothing to do */
6819 		return;
6820 	}
6821 	sop = SCTP_BASE_INFO(udp_tun_socket);
6822 	soclose(sop);
6823 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6824 }
6825 int
6826 sctp_over_udp_start(void)
6827 {
6828 	uint16_t port;
6829 	int ret;
6830 	struct sockaddr_in sin;
6831 	struct socket *sop = NULL;
6832 	struct thread *th;
6833 	struct ucred *cred;
6834 
6835 	/*
6836 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6837 	 * for writting!
6838 	 */
6839 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6840 	if (port == 0) {
6841 		/* Must have a port set */
6842 		return (EINVAL);
6843 	}
6844 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6845 		/* Already running -- must stop first */
6846 		return (EALREADY);
6847 	}
6848 	th = curthread;
6849 	cred = th->td_ucred;
6850 	if ((ret = socreate(PF_INET, &sop,
6851 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6852 		return (ret);
6853 	}
6854 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6855 	/* call the special UDP hook */
6856 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6857 	if (ret) {
6858 		goto exit_stage_left;
6859 	}
6860 	/* Ok we have a socket, bind it to the port */
6861 	memset(&sin, 0, sizeof(sin));
6862 	sin.sin_len = sizeof(sin);
6863 	sin.sin_family = AF_INET;
6864 	sin.sin_port = htons(port);
6865 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6866 	if (ret) {
6867 		/* Close up we cant get the port */
6868 exit_stage_left:
6869 		sctp_over_udp_stop();
6870 		return (ret);
6871 	}
6872 	/*
6873 	 * Ok we should now get UDP packets directly to our input routine
6874 	 * sctp_recv_upd_tunneled_packet().
6875 	 */
6876 	return (0);
6877 }
6878