xref: /freebsd/sys/netinet/sctputil.c (revision c4eb8f475a00040ffa2e99fdce5b56bbfc2cd00d)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 #include <netinet/sctp_bsd_addr.h>
52 
53 #define NUMBER_OF_MTU_SIZES 18
54 
55 
56 #if defined(__Windows__) && !defined(SCTP_LOCAL_TRACE_BUF)
57 #include "eventrace_netinet.h"
58 #include "sctputil.tmh"		/* this is the file that will be auto
59 				 * generated */
60 #else
61 #ifndef KTR_SCTP
62 #define KTR_SCTP KTR_SUBSYS
63 #endif
64 #endif
65 
66 void
67 sctp_sblog(struct sockbuf *sb,
68     struct sctp_tcb *stcb, int from, int incr)
69 {
70 	struct sctp_cwnd_log sctp_clog;
71 
72 	sctp_clog.x.sb.stcb = stcb;
73 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
74 	if (stcb)
75 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
76 	else
77 		sctp_clog.x.sb.stcb_sbcc = 0;
78 	sctp_clog.x.sb.incr = incr;
79 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
80 	    SCTP_LOG_EVENT_SB,
81 	    from,
82 	    sctp_clog.x.misc.log1,
83 	    sctp_clog.x.misc.log2,
84 	    sctp_clog.x.misc.log3,
85 	    sctp_clog.x.misc.log4);
86 }
87 
88 void
89 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
90 {
91 	struct sctp_cwnd_log sctp_clog;
92 
93 	sctp_clog.x.close.inp = (void *)inp;
94 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
95 	if (stcb) {
96 		sctp_clog.x.close.stcb = (void *)stcb;
97 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
98 	} else {
99 		sctp_clog.x.close.stcb = 0;
100 		sctp_clog.x.close.state = 0;
101 	}
102 	sctp_clog.x.close.loc = loc;
103 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
104 	    SCTP_LOG_EVENT_CLOSE,
105 	    0,
106 	    sctp_clog.x.misc.log1,
107 	    sctp_clog.x.misc.log2,
108 	    sctp_clog.x.misc.log3,
109 	    sctp_clog.x.misc.log4);
110 }
111 
112 
113 void
114 rto_logging(struct sctp_nets *net, int from)
115 {
116 	struct sctp_cwnd_log sctp_clog;
117 
118 	memset(&sctp_clog, 0, sizeof(sctp_clog));
119 	sctp_clog.x.rto.net = (void *)net;
120 	sctp_clog.x.rto.rtt = net->prev_rtt;
121 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
122 	    SCTP_LOG_EVENT_RTT,
123 	    from,
124 	    sctp_clog.x.misc.log1,
125 	    sctp_clog.x.misc.log2,
126 	    sctp_clog.x.misc.log3,
127 	    sctp_clog.x.misc.log4);
128 
129 }
130 
131 void
132 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
133 {
134 	struct sctp_cwnd_log sctp_clog;
135 
136 	sctp_clog.x.strlog.stcb = stcb;
137 	sctp_clog.x.strlog.n_tsn = tsn;
138 	sctp_clog.x.strlog.n_sseq = sseq;
139 	sctp_clog.x.strlog.e_tsn = 0;
140 	sctp_clog.x.strlog.e_sseq = 0;
141 	sctp_clog.x.strlog.strm = stream;
142 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
143 	    SCTP_LOG_EVENT_STRM,
144 	    from,
145 	    sctp_clog.x.misc.log1,
146 	    sctp_clog.x.misc.log2,
147 	    sctp_clog.x.misc.log3,
148 	    sctp_clog.x.misc.log4);
149 
150 }
151 
152 void
153 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
154 {
155 	struct sctp_cwnd_log sctp_clog;
156 
157 	sctp_clog.x.nagle.stcb = (void *)stcb;
158 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
159 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
160 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
161 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
162 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
163 	    SCTP_LOG_EVENT_NAGLE,
164 	    action,
165 	    sctp_clog.x.misc.log1,
166 	    sctp_clog.x.misc.log2,
167 	    sctp_clog.x.misc.log3,
168 	    sctp_clog.x.misc.log4);
169 }
170 
171 
172 void
173 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
174 {
175 	struct sctp_cwnd_log sctp_clog;
176 
177 	sctp_clog.x.sack.cumack = cumack;
178 	sctp_clog.x.sack.oldcumack = old_cumack;
179 	sctp_clog.x.sack.tsn = tsn;
180 	sctp_clog.x.sack.numGaps = gaps;
181 	sctp_clog.x.sack.numDups = dups;
182 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
183 	    SCTP_LOG_EVENT_SACK,
184 	    from,
185 	    sctp_clog.x.misc.log1,
186 	    sctp_clog.x.misc.log2,
187 	    sctp_clog.x.misc.log3,
188 	    sctp_clog.x.misc.log4);
189 }
190 
191 void
192 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
193 {
194 	struct sctp_cwnd_log sctp_clog;
195 
196 	memset(&sctp_clog, 0, sizeof(sctp_clog));
197 	sctp_clog.x.map.base = map;
198 	sctp_clog.x.map.cum = cum;
199 	sctp_clog.x.map.high = high;
200 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
201 	    SCTP_LOG_EVENT_MAP,
202 	    from,
203 	    sctp_clog.x.misc.log1,
204 	    sctp_clog.x.misc.log2,
205 	    sctp_clog.x.misc.log3,
206 	    sctp_clog.x.misc.log4);
207 }
208 
209 void
210 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
211     int from)
212 {
213 	struct sctp_cwnd_log sctp_clog;
214 
215 	memset(&sctp_clog, 0, sizeof(sctp_clog));
216 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
217 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
218 	sctp_clog.x.fr.tsn = tsn;
219 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
220 	    SCTP_LOG_EVENT_FR,
221 	    from,
222 	    sctp_clog.x.misc.log1,
223 	    sctp_clog.x.misc.log2,
224 	    sctp_clog.x.misc.log3,
225 	    sctp_clog.x.misc.log4);
226 
227 }
228 
229 
230 void
231 sctp_log_mb(struct mbuf *m, int from)
232 {
233 	struct sctp_cwnd_log sctp_clog;
234 
235 	sctp_clog.x.mb.mp = m;
236 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
237 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
238 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
239 	if (SCTP_BUF_IS_EXTENDED(m)) {
240 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
241 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
242 	} else {
243 		sctp_clog.x.mb.ext = 0;
244 		sctp_clog.x.mb.refcnt = 0;
245 	}
246 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
247 	    SCTP_LOG_EVENT_MBUF,
248 	    from,
249 	    sctp_clog.x.misc.log1,
250 	    sctp_clog.x.misc.log2,
251 	    sctp_clog.x.misc.log3,
252 	    sctp_clog.x.misc.log4);
253 }
254 
255 
256 void
257 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
258     int from)
259 {
260 	struct sctp_cwnd_log sctp_clog;
261 
262 	if (control == NULL) {
263 		SCTP_PRINTF("Gak log of NULL?\n");
264 		return;
265 	}
266 	sctp_clog.x.strlog.stcb = control->stcb;
267 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
268 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
269 	sctp_clog.x.strlog.strm = control->sinfo_stream;
270 	if (poschk != NULL) {
271 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
272 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
273 	} else {
274 		sctp_clog.x.strlog.e_tsn = 0;
275 		sctp_clog.x.strlog.e_sseq = 0;
276 	}
277 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
278 	    SCTP_LOG_EVENT_STRM,
279 	    from,
280 	    sctp_clog.x.misc.log1,
281 	    sctp_clog.x.misc.log2,
282 	    sctp_clog.x.misc.log3,
283 	    sctp_clog.x.misc.log4);
284 
285 }
286 
287 void
288 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
289 {
290 	struct sctp_cwnd_log sctp_clog;
291 
292 	sctp_clog.x.cwnd.net = net;
293 	if (stcb->asoc.send_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_send = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
297 	if (stcb->asoc.stream_queue_cnt > 255)
298 		sctp_clog.x.cwnd.cnt_in_str = 255;
299 	else
300 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
301 
302 	if (net) {
303 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
304 		sctp_clog.x.cwnd.inflight = net->flight_size;
305 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
307 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
308 	}
309 	if (SCTP_CWNDLOG_PRESEND == from) {
310 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
311 	}
312 	sctp_clog.x.cwnd.cwnd_augment = augment;
313 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
314 	    SCTP_LOG_EVENT_CWND,
315 	    from,
316 	    sctp_clog.x.misc.log1,
317 	    sctp_clog.x.misc.log2,
318 	    sctp_clog.x.misc.log3,
319 	    sctp_clog.x.misc.log4);
320 
321 }
322 
323 void
324 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
325 {
326 	struct sctp_cwnd_log sctp_clog;
327 
328 	memset(&sctp_clog, 0, sizeof(sctp_clog));
329 	if (inp) {
330 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
331 
332 	} else {
333 		sctp_clog.x.lock.sock = (void *)NULL;
334 	}
335 	sctp_clog.x.lock.inp = (void *)inp;
336 	if (stcb) {
337 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
338 	} else {
339 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
340 	}
341 	if (inp) {
342 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
343 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
344 	} else {
345 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
346 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
347 	}
348 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
349 	if (inp->sctp_socket) {
350 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
352 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
353 	} else {
354 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
356 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
357 	}
358 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
359 	    SCTP_LOG_LOCK_EVENT,
360 	    from,
361 	    sctp_clog.x.misc.log1,
362 	    sctp_clog.x.misc.log2,
363 	    sctp_clog.x.misc.log3,
364 	    sctp_clog.x.misc.log4);
365 
366 }
367 
368 void
369 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
370 {
371 	struct sctp_cwnd_log sctp_clog;
372 
373 	memset(&sctp_clog, 0, sizeof(sctp_clog));
374 	sctp_clog.x.cwnd.net = net;
375 	sctp_clog.x.cwnd.cwnd_new_value = error;
376 	sctp_clog.x.cwnd.inflight = net->flight_size;
377 	sctp_clog.x.cwnd.cwnd_augment = burst;
378 	if (stcb->asoc.send_queue_cnt > 255)
379 		sctp_clog.x.cwnd.cnt_in_send = 255;
380 	else
381 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
382 	if (stcb->asoc.stream_queue_cnt > 255)
383 		sctp_clog.x.cwnd.cnt_in_str = 255;
384 	else
385 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
386 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
387 	    SCTP_LOG_EVENT_MAXBURST,
388 	    from,
389 	    sctp_clog.x.misc.log1,
390 	    sctp_clog.x.misc.log2,
391 	    sctp_clog.x.misc.log3,
392 	    sctp_clog.x.misc.log4);
393 
394 }
395 
396 void
397 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
398 {
399 	struct sctp_cwnd_log sctp_clog;
400 
401 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
402 	sctp_clog.x.rwnd.send_size = snd_size;
403 	sctp_clog.x.rwnd.overhead = overhead;
404 	sctp_clog.x.rwnd.new_rwnd = 0;
405 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
406 	    SCTP_LOG_EVENT_RWND,
407 	    from,
408 	    sctp_clog.x.misc.log1,
409 	    sctp_clog.x.misc.log2,
410 	    sctp_clog.x.misc.log3,
411 	    sctp_clog.x.misc.log4);
412 }
413 
414 void
415 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
416 {
417 	struct sctp_cwnd_log sctp_clog;
418 
419 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
420 	sctp_clog.x.rwnd.send_size = flight_size;
421 	sctp_clog.x.rwnd.overhead = overhead;
422 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
423 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
424 	    SCTP_LOG_EVENT_RWND,
425 	    from,
426 	    sctp_clog.x.misc.log1,
427 	    sctp_clog.x.misc.log2,
428 	    sctp_clog.x.misc.log3,
429 	    sctp_clog.x.misc.log4);
430 }
431 
432 void
433 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
434 {
435 	struct sctp_cwnd_log sctp_clog;
436 
437 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
438 	sctp_clog.x.mbcnt.size_change = book;
439 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
440 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
441 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
442 	    SCTP_LOG_EVENT_MBCNT,
443 	    from,
444 	    sctp_clog.x.misc.log1,
445 	    sctp_clog.x.misc.log2,
446 	    sctp_clog.x.misc.log3,
447 	    sctp_clog.x.misc.log4);
448 
449 }
450 
451 void
452 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
453 {
454 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
455 	    SCTP_LOG_MISC_EVENT,
456 	    from,
457 	    a, b, c, d);
458 }
459 
460 void
461 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
462 {
463 	struct sctp_cwnd_log sctp_clog;
464 
465 	sctp_clog.x.wake.stcb = (void *)stcb;
466 	sctp_clog.x.wake.wake_cnt = wake_cnt;
467 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
468 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
469 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
470 
471 	if (stcb->asoc.stream_queue_cnt < 0xff)
472 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
473 	else
474 		sctp_clog.x.wake.stream_qcnt = 0xff;
475 
476 	if (stcb->asoc.chunks_on_out_queue < 0xff)
477 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
478 	else
479 		sctp_clog.x.wake.chunks_on_oque = 0xff;
480 
481 	sctp_clog.x.wake.sctpflags = 0;
482 	/* set in the defered mode stuff */
483 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
484 		sctp_clog.x.wake.sctpflags |= 1;
485 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
486 		sctp_clog.x.wake.sctpflags |= 2;
487 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
488 		sctp_clog.x.wake.sctpflags |= 4;
489 	/* what about the sb */
490 	if (stcb->sctp_socket) {
491 		struct socket *so = stcb->sctp_socket;
492 
493 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
494 	} else {
495 		sctp_clog.x.wake.sbflags = 0xff;
496 	}
497 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
498 	    SCTP_LOG_EVENT_WAKE,
499 	    from,
500 	    sctp_clog.x.misc.log1,
501 	    sctp_clog.x.misc.log2,
502 	    sctp_clog.x.misc.log3,
503 	    sctp_clog.x.misc.log4);
504 
505 }
506 
507 void
508 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
509 {
510 	struct sctp_cwnd_log sctp_clog;
511 
512 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
513 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
514 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
515 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
516 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
517 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
518 	sctp_clog.x.blk.sndlen = sendlen;
519 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
520 	    SCTP_LOG_EVENT_BLOCK,
521 	    from,
522 	    sctp_clog.x.misc.log1,
523 	    sctp_clog.x.misc.log2,
524 	    sctp_clog.x.misc.log3,
525 	    sctp_clog.x.misc.log4);
526 
527 }
528 
529 int
530 sctp_fill_stat_log(void *optval, size_t *optsize)
531 {
532 	/* May need to fix this if ktrdump does not work */
533 	return (0);
534 }
535 
536 #ifdef SCTP_AUDITING_ENABLED
537 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
538 static int sctp_audit_indx = 0;
539 
540 static
541 void
542 sctp_print_audit_report(void)
543 {
544 	int i;
545 	int cnt;
546 
547 	cnt = 0;
548 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
549 		if ((sctp_audit_data[i][0] == 0xe0) &&
550 		    (sctp_audit_data[i][1] == 0x01)) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if (sctp_audit_data[i][0] == 0xf0) {
554 			cnt = 0;
555 			SCTP_PRINTF("\n");
556 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
557 		    (sctp_audit_data[i][1] == 0x01)) {
558 			SCTP_PRINTF("\n");
559 			cnt = 0;
560 		}
561 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562 		    (uint32_t) sctp_audit_data[i][1]);
563 		cnt++;
564 		if ((cnt % 14) == 0)
565 			SCTP_PRINTF("\n");
566 	}
567 	for (i = 0; i < sctp_audit_indx; i++) {
568 		if ((sctp_audit_data[i][0] == 0xe0) &&
569 		    (sctp_audit_data[i][1] == 0x01)) {
570 			cnt = 0;
571 			SCTP_PRINTF("\n");
572 		} else if (sctp_audit_data[i][0] == 0xf0) {
573 			cnt = 0;
574 			SCTP_PRINTF("\n");
575 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
576 		    (sctp_audit_data[i][1] == 0x01)) {
577 			SCTP_PRINTF("\n");
578 			cnt = 0;
579 		}
580 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
581 		    (uint32_t) sctp_audit_data[i][1]);
582 		cnt++;
583 		if ((cnt % 14) == 0)
584 			SCTP_PRINTF("\n");
585 	}
586 	SCTP_PRINTF("\n");
587 }
588 
589 void
590 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
591     struct sctp_nets *net)
592 {
593 	int resend_cnt, tot_out, rep, tot_book_cnt;
594 	struct sctp_nets *lnet;
595 	struct sctp_tmit_chunk *chk;
596 
597 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
598 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
599 	sctp_audit_indx++;
600 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
601 		sctp_audit_indx = 0;
602 	}
603 	if (inp == NULL) {
604 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
605 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
606 		sctp_audit_indx++;
607 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
608 			sctp_audit_indx = 0;
609 		}
610 		return;
611 	}
612 	if (stcb == NULL) {
613 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
614 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
615 		sctp_audit_indx++;
616 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
617 			sctp_audit_indx = 0;
618 		}
619 		return;
620 	}
621 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
622 	sctp_audit_data[sctp_audit_indx][1] =
623 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
624 	sctp_audit_indx++;
625 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
626 		sctp_audit_indx = 0;
627 	}
628 	rep = 0;
629 	tot_book_cnt = 0;
630 	resend_cnt = tot_out = 0;
631 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
632 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
633 			resend_cnt++;
634 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
635 			tot_out += chk->book_size;
636 			tot_book_cnt++;
637 		}
638 	}
639 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
640 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
642 		sctp_audit_indx++;
643 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644 			sctp_audit_indx = 0;
645 		}
646 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
647 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
648 		rep = 1;
649 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
650 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
651 		sctp_audit_data[sctp_audit_indx][1] =
652 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
653 		sctp_audit_indx++;
654 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
655 			sctp_audit_indx = 0;
656 		}
657 	}
658 	if (tot_out != stcb->asoc.total_flight) {
659 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
660 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
661 		sctp_audit_indx++;
662 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
663 			sctp_audit_indx = 0;
664 		}
665 		rep = 1;
666 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
667 		    (int)stcb->asoc.total_flight);
668 		stcb->asoc.total_flight = tot_out;
669 	}
670 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
671 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
672 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
673 		sctp_audit_indx++;
674 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
675 			sctp_audit_indx = 0;
676 		}
677 		rep = 1;
678 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
679 
680 		stcb->asoc.total_flight_count = tot_book_cnt;
681 	}
682 	tot_out = 0;
683 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
684 		tot_out += lnet->flight_size;
685 	}
686 	if (tot_out != stcb->asoc.total_flight) {
687 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
688 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
689 		sctp_audit_indx++;
690 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
691 			sctp_audit_indx = 0;
692 		}
693 		rep = 1;
694 		SCTP_PRINTF("real flight:%d net total was %d\n",
695 		    stcb->asoc.total_flight, tot_out);
696 		/* now corrective action */
697 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
698 
699 			tot_out = 0;
700 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
701 				if ((chk->whoTo == lnet) &&
702 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
703 					tot_out += chk->book_size;
704 				}
705 			}
706 			if (lnet->flight_size != tot_out) {
707 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
708 				    lnet, lnet->flight_size,
709 				    tot_out);
710 				lnet->flight_size = tot_out;
711 			}
712 		}
713 	}
714 	if (rep) {
715 		sctp_print_audit_report();
716 	}
717 }
718 
719 void
720 sctp_audit_log(uint8_t ev, uint8_t fd)
721 {
722 
723 	sctp_audit_data[sctp_audit_indx][0] = ev;
724 	sctp_audit_data[sctp_audit_indx][1] = fd;
725 	sctp_audit_indx++;
726 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
727 		sctp_audit_indx = 0;
728 	}
729 }
730 
731 #endif
732 
733 /*
734  * a list of sizes based on typical mtu's, used only if next hop size not
735  * returned.
736  */
737 static int sctp_mtu_sizes[] = {
738 	68,
739 	296,
740 	508,
741 	512,
742 	544,
743 	576,
744 	1006,
745 	1492,
746 	1500,
747 	1536,
748 	2002,
749 	2048,
750 	4352,
751 	4464,
752 	8166,
753 	17914,
754 	32000,
755 	65535
756 };
757 
758 void
759 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
760 {
761 	struct sctp_association *asoc;
762 	struct sctp_nets *net;
763 
764 	asoc = &stcb->asoc;
765 
766 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
767 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
768 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
769 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
770 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
771 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
772 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
773 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
774 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
775 	}
776 }
777 
778 int
779 find_next_best_mtu(int totsz)
780 {
781 	int i, perfer;
782 
783 	/*
784 	 * if we are in here we must find the next best fit based on the
785 	 * size of the dg that failed to be sent.
786 	 */
787 	perfer = 0;
788 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
789 		if (totsz < sctp_mtu_sizes[i]) {
790 			perfer = i - 1;
791 			if (perfer < 0)
792 				perfer = 0;
793 			break;
794 		}
795 	}
796 	return (sctp_mtu_sizes[perfer]);
797 }
798 
799 void
800 sctp_fill_random_store(struct sctp_pcb *m)
801 {
802 	/*
803 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
804 	 * our counter. The result becomes our good random numbers and we
805 	 * then setup to give these out. Note that we do no locking to
806 	 * protect this. This is ok, since if competing folks call this we
807 	 * will get more gobbled gook in the random store which is what we
808 	 * want. There is a danger that two guys will use the same random
809 	 * numbers, but thats ok too since that is random as well :->
810 	 */
811 	m->store_at = 0;
812 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
813 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
814 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
815 	m->random_counter++;
816 }
817 
818 uint32_t
819 sctp_select_initial_TSN(struct sctp_pcb *inp)
820 {
821 	/*
822 	 * A true implementation should use random selection process to get
823 	 * the initial stream sequence number, using RFC1750 as a good
824 	 * guideline
825 	 */
826 	uint32_t x, *xp;
827 	uint8_t *p;
828 	int store_at, new_store;
829 
830 	if (inp->initial_sequence_debug != 0) {
831 		uint32_t ret;
832 
833 		ret = inp->initial_sequence_debug;
834 		inp->initial_sequence_debug++;
835 		return (ret);
836 	}
837 retry:
838 	store_at = inp->store_at;
839 	new_store = store_at + sizeof(uint32_t);
840 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
841 		new_store = 0;
842 	}
843 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
844 		goto retry;
845 	}
846 	if (new_store == 0) {
847 		/* Refill the random store */
848 		sctp_fill_random_store(inp);
849 	}
850 	p = &inp->random_store[store_at];
851 	xp = (uint32_t *) p;
852 	x = *xp;
853 	return (x);
854 }
855 
856 uint32_t
857 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
858 {
859 	uint32_t x, not_done;
860 	struct timeval now;
861 
862 	(void)SCTP_GETTIME_TIMEVAL(&now);
863 	not_done = 1;
864 	while (not_done) {
865 		x = sctp_select_initial_TSN(&inp->sctp_ep);
866 		if (x == 0) {
867 			/* we never use 0 */
868 			continue;
869 		}
870 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
871 			not_done = 0;
872 		}
873 	}
874 	return (x);
875 }
876 
877 int
878 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
879     uint32_t override_tag, uint32_t vrf_id)
880 {
881 	struct sctp_association *asoc;
882 
883 	/*
884 	 * Anything set to zero is taken care of by the allocation routine's
885 	 * bzero
886 	 */
887 
888 	/*
889 	 * Up front select what scoping to apply on addresses I tell my peer
890 	 * Not sure what to do with these right now, we will need to come up
891 	 * with a way to set them. We may need to pass them through from the
892 	 * caller in the sctp_aloc_assoc() function.
893 	 */
894 	int i;
895 
896 	asoc = &stcb->asoc;
897 	/* init all variables to a known value. */
898 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
899 	asoc->max_burst = m->sctp_ep.max_burst;
900 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
901 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
902 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
903 	/* EY Init nr_sack variable */
904 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
905 	/* JRS 5/21/07 - Init CMT PF variables */
906 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
907 	asoc->sctp_frag_point = m->sctp_frag_point;
908 #ifdef INET
909 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
910 #else
911 	asoc->default_tos = 0;
912 #endif
913 
914 #ifdef INET6
915 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
916 #else
917 	asoc->default_flowlabel = 0;
918 #endif
919 	asoc->sb_send_resv = 0;
920 	if (override_tag) {
921 		asoc->my_vtag = override_tag;
922 	} else {
923 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
924 	}
925 	/* Get the nonce tags */
926 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
927 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
928 	asoc->vrf_id = vrf_id;
929 
930 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
931 		asoc->hb_is_disabled = 1;
932 	else
933 		asoc->hb_is_disabled = 0;
934 
935 #ifdef SCTP_ASOCLOG_OF_TSNS
936 	asoc->tsn_in_at = 0;
937 	asoc->tsn_out_at = 0;
938 	asoc->tsn_in_wrapped = 0;
939 	asoc->tsn_out_wrapped = 0;
940 	asoc->cumack_log_at = 0;
941 	asoc->cumack_log_atsnt = 0;
942 #endif
943 #ifdef SCTP_FS_SPEC_LOG
944 	asoc->fs_index = 0;
945 #endif
946 	asoc->refcnt = 0;
947 	asoc->assoc_up_sent = 0;
948 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
949 	    sctp_select_initial_TSN(&m->sctp_ep);
950 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
951 	/* we are optimisitic here */
952 	asoc->peer_supports_pktdrop = 1;
953 	asoc->peer_supports_nat = 0;
954 	asoc->sent_queue_retran_cnt = 0;
955 
956 	/* for CMT */
957 	asoc->last_net_cmt_send_started = NULL;
958 
959 	/* This will need to be adjusted */
960 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
961 	asoc->last_acked_seq = asoc->init_seq_number - 1;
962 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
963 	asoc->asconf_seq_in = asoc->last_acked_seq;
964 
965 	/* here we are different, we hold the next one we expect */
966 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
967 
968 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
969 	asoc->initial_rto = m->sctp_ep.initial_rto;
970 
971 	asoc->max_init_times = m->sctp_ep.max_init_times;
972 	asoc->max_send_times = m->sctp_ep.max_send_times;
973 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
974 	asoc->free_chunk_cnt = 0;
975 
976 	asoc->iam_blocking = 0;
977 	/* ECN Nonce initialization */
978 	asoc->context = m->sctp_context;
979 	asoc->def_send = m->def_send;
980 	asoc->ecn_nonce_allowed = 0;
981 	asoc->receiver_nonce_sum = 1;
982 	asoc->nonce_sum_expect_base = 1;
983 	asoc->nonce_sum_check = 1;
984 	asoc->nonce_resync_tsn = 0;
985 	asoc->nonce_wait_for_ecne = 0;
986 	asoc->nonce_wait_tsn = 0;
987 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
988 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
989 	asoc->pr_sctp_cnt = 0;
990 	asoc->total_output_queue_size = 0;
991 
992 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
993 		struct in6pcb *inp6;
994 
995 		/* Its a V6 socket */
996 		inp6 = (struct in6pcb *)m;
997 		asoc->ipv6_addr_legal = 1;
998 		/* Now look at the binding flag to see if V4 will be legal */
999 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1000 			asoc->ipv4_addr_legal = 1;
1001 		} else {
1002 			/* V4 addresses are NOT legal on the association */
1003 			asoc->ipv4_addr_legal = 0;
1004 		}
1005 	} else {
1006 		/* Its a V4 socket, no - V6 */
1007 		asoc->ipv4_addr_legal = 1;
1008 		asoc->ipv6_addr_legal = 0;
1009 	}
1010 
1011 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1012 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1013 
1014 	asoc->smallest_mtu = m->sctp_frag_point;
1015 #ifdef SCTP_PRINT_FOR_B_AND_M
1016 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1017 	    asoc->smallest_mtu);
1018 #endif
1019 	asoc->minrto = m->sctp_ep.sctp_minrto;
1020 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1021 
1022 	asoc->locked_on_sending = NULL;
1023 	asoc->stream_locked_on = 0;
1024 	asoc->ecn_echo_cnt_onq = 0;
1025 	asoc->stream_locked = 0;
1026 
1027 	asoc->send_sack = 1;
1028 
1029 	LIST_INIT(&asoc->sctp_restricted_addrs);
1030 
1031 	TAILQ_INIT(&asoc->nets);
1032 	TAILQ_INIT(&asoc->pending_reply_queue);
1033 	TAILQ_INIT(&asoc->asconf_ack_sent);
1034 	/* Setup to fill the hb random cache at first HB */
1035 	asoc->hb_random_idx = 4;
1036 
1037 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1038 
1039 	/*
1040 	 * JRS - Pick the default congestion control module based on the
1041 	 * sysctl.
1042 	 */
1043 	switch (m->sctp_ep.sctp_default_cc_module) {
1044 		/* JRS - Standard TCP congestion control */
1045 	case SCTP_CC_RFC2581:
1046 		{
1047 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1048 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1049 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1050 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1051 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1052 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1053 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1054 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1055 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1056 			break;
1057 		}
1058 		/* JRS - High Speed TCP congestion control (Floyd) */
1059 	case SCTP_CC_HSTCP:
1060 		{
1061 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1062 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1064 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1065 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1066 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1067 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1068 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1069 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1070 			break;
1071 		}
1072 		/* JRS - HTCP congestion control */
1073 	case SCTP_CC_HTCP:
1074 		{
1075 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1076 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1078 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1079 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1080 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1081 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1082 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1083 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1084 			break;
1085 		}
1086 		/* JRS - By default, use RFC2581 */
1087 	default:
1088 		{
1089 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1090 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1092 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1093 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1094 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1095 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1096 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1097 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1098 			break;
1099 		}
1100 	}
1101 
1102 	/*
1103 	 * Now the stream parameters, here we allocate space for all streams
1104 	 * that we request by default.
1105 	 */
1106 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1107 	    m->sctp_ep.pre_open_stream_count;
1108 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1109 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1110 	    SCTP_M_STRMO);
1111 	if (asoc->strmout == NULL) {
1112 		/* big trouble no memory */
1113 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1114 		return (ENOMEM);
1115 	}
1116 	for (i = 0; i < asoc->streamoutcnt; i++) {
1117 		/*
1118 		 * inbound side must be set to 0xffff, also NOTE when we get
1119 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1120 		 * count (streamoutcnt) but first check if we sent to any of
1121 		 * the upper streams that were dropped (if some were). Those
1122 		 * that were dropped must be notified to the upper layer as
1123 		 * failed to send.
1124 		 */
1125 		asoc->strmout[i].next_sequence_sent = 0x0;
1126 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1127 		asoc->strmout[i].stream_no = i;
1128 		asoc->strmout[i].last_msg_incomplete = 0;
1129 		asoc->strmout[i].next_spoke.tqe_next = 0;
1130 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1131 	}
1132 	/* Now the mapping array */
1133 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1134 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1135 	    SCTP_M_MAP);
1136 	if (asoc->mapping_array == NULL) {
1137 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1138 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1139 		return (ENOMEM);
1140 	}
1141 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1142 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1143 	    SCTP_M_MAP);
1144 	if (asoc->nr_mapping_array == NULL) {
1145 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1146 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1147 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1148 		return (ENOMEM);
1149 	}
1150 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1151 
1152 	/* Now the init of the other outqueues */
1153 	TAILQ_INIT(&asoc->free_chunks);
1154 	TAILQ_INIT(&asoc->out_wheel);
1155 	TAILQ_INIT(&asoc->control_send_queue);
1156 	TAILQ_INIT(&asoc->asconf_send_queue);
1157 	TAILQ_INIT(&asoc->send_queue);
1158 	TAILQ_INIT(&asoc->sent_queue);
1159 	TAILQ_INIT(&asoc->reasmqueue);
1160 	TAILQ_INIT(&asoc->resetHead);
1161 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1162 	TAILQ_INIT(&asoc->asconf_queue);
1163 	/* authentication fields */
1164 	asoc->authinfo.random = NULL;
1165 	asoc->authinfo.active_keyid = 0;
1166 	asoc->authinfo.assoc_key = NULL;
1167 	asoc->authinfo.assoc_keyid = 0;
1168 	asoc->authinfo.recv_key = NULL;
1169 	asoc->authinfo.recv_keyid = 0;
1170 	LIST_INIT(&asoc->shared_keys);
1171 	asoc->marked_retrans = 0;
1172 	asoc->timoinit = 0;
1173 	asoc->timodata = 0;
1174 	asoc->timosack = 0;
1175 	asoc->timoshutdown = 0;
1176 	asoc->timoheartbeat = 0;
1177 	asoc->timocookie = 0;
1178 	asoc->timoshutdownack = 0;
1179 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1180 	asoc->discontinuity_time = asoc->start_time;
1181 	/*
1182 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1183 	 * freed later when the association is freed.
1184 	 */
1185 	return (0);
1186 }
1187 
1188 void
1189 sctp_print_mapping_array(struct sctp_association *asoc)
1190 {
1191 	unsigned int i, limit;
1192 
1193 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1194 	    asoc->mapping_array_size,
1195 	    asoc->mapping_array_base_tsn,
1196 	    asoc->cumulative_tsn,
1197 	    asoc->highest_tsn_inside_map,
1198 	    asoc->highest_tsn_inside_nr_map);
1199 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1200 		if (asoc->mapping_array[limit - 1]) {
1201 			break;
1202 		}
1203 	}
1204 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1205 	for (i = 0; i < limit; i++) {
1206 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1207 		if (((i + 1) % 16) == 0)
1208 			printf("\n");
1209 	}
1210 	if (limit % 16)
1211 		printf("\n");
1212 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1213 		if (asoc->nr_mapping_array[limit - 1]) {
1214 			break;
1215 		}
1216 	}
1217 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1218 	for (i = 0; i < limit; i++) {
1219 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1220 	}
1221 	if (limit % 16)
1222 		printf("\n");
1223 }
1224 
1225 int
1226 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1227 {
1228 	/* mapping array needs to grow */
1229 	uint8_t *new_array1, *new_array2;
1230 	uint32_t new_size;
1231 
1232 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1233 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1234 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1235 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1236 		/* can't get more, forget it */
1237 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1238 		if (new_array1) {
1239 			SCTP_FREE(new_array1, SCTP_M_MAP);
1240 		}
1241 		if (new_array2) {
1242 			SCTP_FREE(new_array2, SCTP_M_MAP);
1243 		}
1244 		return (-1);
1245 	}
1246 	memset(new_array1, 0, new_size);
1247 	memset(new_array2, 0, new_size);
1248 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1249 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1250 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1251 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1252 	asoc->mapping_array = new_array1;
1253 	asoc->nr_mapping_array = new_array2;
1254 	asoc->mapping_array_size = new_size;
1255 	return (0);
1256 }
1257 
1258 
1259 static void
1260 sctp_iterator_work(struct sctp_iterator *it)
1261 {
1262 	int iteration_count = 0;
1263 	int inp_skip = 0;
1264 
1265 	SCTP_ITERATOR_LOCK();
1266 	if (it->inp) {
1267 		SCTP_INP_DECR_REF(it->inp);
1268 	}
1269 	if (it->inp == NULL) {
1270 		/* iterator is complete */
1271 done_with_iterator:
1272 		SCTP_ITERATOR_UNLOCK();
1273 		if (it->function_atend != NULL) {
1274 			(*it->function_atend) (it->pointer, it->val);
1275 		}
1276 		SCTP_FREE(it, SCTP_M_ITER);
1277 		return;
1278 	}
1279 select_a_new_ep:
1280 	SCTP_INP_RLOCK(it->inp);
1281 	while (((it->pcb_flags) &&
1282 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1283 	    ((it->pcb_features) &&
1284 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1285 		/* endpoint flags or features don't match, so keep looking */
1286 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1287 			SCTP_INP_RUNLOCK(it->inp);
1288 			goto done_with_iterator;
1289 		}
1290 		SCTP_INP_RUNLOCK(it->inp);
1291 		it->inp = LIST_NEXT(it->inp, sctp_list);
1292 		if (it->inp == NULL) {
1293 			goto done_with_iterator;
1294 		}
1295 		SCTP_INP_RLOCK(it->inp);
1296 	}
1297 	/* now go through each assoc which is in the desired state */
1298 	if (it->done_current_ep == 0) {
1299 		if (it->function_inp != NULL)
1300 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1301 		it->done_current_ep = 1;
1302 	}
1303 	if (it->stcb == NULL) {
1304 		/* run the per instance function */
1305 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1306 	}
1307 	if ((inp_skip) || it->stcb == NULL) {
1308 		if (it->function_inp_end != NULL) {
1309 			inp_skip = (*it->function_inp_end) (it->inp,
1310 			    it->pointer,
1311 			    it->val);
1312 		}
1313 		SCTP_INP_RUNLOCK(it->inp);
1314 		goto no_stcb;
1315 	}
1316 	while (it->stcb) {
1317 		SCTP_TCB_LOCK(it->stcb);
1318 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1319 			/* not in the right state... keep looking */
1320 			SCTP_TCB_UNLOCK(it->stcb);
1321 			goto next_assoc;
1322 		}
1323 		/* see if we have limited out the iterator loop */
1324 		iteration_count++;
1325 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1326 			/* Pause to let others grab the lock */
1327 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1328 			SCTP_TCB_UNLOCK(it->stcb);
1329 			SCTP_INP_INCR_REF(it->inp);
1330 			SCTP_INP_RUNLOCK(it->inp);
1331 			SCTP_ITERATOR_UNLOCK();
1332 			SCTP_ITERATOR_LOCK();
1333 			if (sctp_it_ctl.iterator_flags) {
1334 				/* We won't be staying here */
1335 				SCTP_INP_DECR_REF(it->inp);
1336 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1337 				if (sctp_it_ctl.iterator_flags &
1338 				    SCTP_ITERATOR_MUST_EXIT) {
1339 					goto done_with_iterator;
1340 				}
1341 				if (sctp_it_ctl.iterator_flags &
1342 				    SCTP_ITERATOR_STOP_CUR_IT) {
1343 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1344 					goto done_with_iterator;
1345 				}
1346 				if (sctp_it_ctl.iterator_flags &
1347 				    SCTP_ITERATOR_STOP_CUR_INP) {
1348 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1349 					goto no_stcb;
1350 				}
1351 				/* If we reach here huh? */
1352 				printf("Unknown it ctl flag %x\n",
1353 				    sctp_it_ctl.iterator_flags);
1354 				sctp_it_ctl.iterator_flags = 0;
1355 			}
1356 			SCTP_INP_RLOCK(it->inp);
1357 			SCTP_INP_DECR_REF(it->inp);
1358 			SCTP_TCB_LOCK(it->stcb);
1359 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1360 			iteration_count = 0;
1361 		}
1362 		/* run function on this one */
1363 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1364 
1365 		/*
1366 		 * we lie here, it really needs to have its own type but
1367 		 * first I must verify that this won't effect things :-0
1368 		 */
1369 		if (it->no_chunk_output == 0)
1370 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1371 
1372 		SCTP_TCB_UNLOCK(it->stcb);
1373 next_assoc:
1374 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1375 		if (it->stcb == NULL) {
1376 			/* Run last function */
1377 			if (it->function_inp_end != NULL) {
1378 				inp_skip = (*it->function_inp_end) (it->inp,
1379 				    it->pointer,
1380 				    it->val);
1381 			}
1382 		}
1383 	}
1384 	SCTP_INP_RUNLOCK(it->inp);
1385 no_stcb:
1386 	/* done with all assocs on this endpoint, move on to next endpoint */
1387 	it->done_current_ep = 0;
1388 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1389 		it->inp = NULL;
1390 	} else {
1391 		SCTP_INP_INFO_RLOCK();
1392 		it->inp = LIST_NEXT(it->inp, sctp_list);
1393 		SCTP_INP_INFO_RUNLOCK();
1394 	}
1395 	if (it->inp == NULL) {
1396 		goto done_with_iterator;
1397 	}
1398 	goto select_a_new_ep;
1399 }
1400 
1401 void
1402 sctp_iterator_worker(void)
1403 {
1404 	struct sctp_iterator *it = NULL;
1405 
1406 	/* This function is called with the WQ lock in place */
1407 
1408 	sctp_it_ctl.iterator_running = 1;
1409 	sctp_it_ctl.cur_it = it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
1410 	while (it) {
1411 		/* now lets work on this one */
1412 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1413 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1414 		CURVNET_SET(it->vn);
1415 		sctp_iterator_work(it);
1416 
1417 		CURVNET_RESTORE();
1418 		SCTP_IPI_ITERATOR_WQ_LOCK();
1419 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1420 			sctp_it_ctl.cur_it = NULL;
1421 			break;
1422 		}
1423 		/* sa_ignore FREED_MEMORY */
1424 		sctp_it_ctl.cur_it = it = TAILQ_FIRST(&sctp_it_ctl.iteratorhead);
1425 	}
1426 	sctp_it_ctl.iterator_running = 0;
1427 	return;
1428 }
1429 
1430 
1431 static void
1432 sctp_handle_addr_wq(void)
1433 {
1434 	/* deal with the ADDR wq from the rtsock calls */
1435 	struct sctp_laddr *wi;
1436 	struct sctp_asconf_iterator *asc;
1437 
1438 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1439 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1440 	if (asc == NULL) {
1441 		/* Try later, no memory */
1442 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1443 		    (struct sctp_inpcb *)NULL,
1444 		    (struct sctp_tcb *)NULL,
1445 		    (struct sctp_nets *)NULL);
1446 		return;
1447 	}
1448 	LIST_INIT(&asc->list_of_work);
1449 	asc->cnt = 0;
1450 
1451 	SCTP_WQ_ADDR_LOCK();
1452 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1453 	while (wi != NULL) {
1454 		LIST_REMOVE(wi, sctp_nxt_addr);
1455 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1456 		asc->cnt++;
1457 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1458 	}
1459 	SCTP_WQ_ADDR_UNLOCK();
1460 
1461 	if (asc->cnt == 0) {
1462 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1463 	} else {
1464 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1465 		    sctp_asconf_iterator_stcb,
1466 		    NULL,	/* No ep end for boundall */
1467 		    SCTP_PCB_FLAGS_BOUNDALL,
1468 		    SCTP_PCB_ANY_FEATURES,
1469 		    SCTP_ASOC_ANY_STATE,
1470 		    (void *)asc, 0,
1471 		    sctp_asconf_iterator_end, NULL, 0);
1472 	}
1473 }
1474 
1475 int retcode = 0;
1476 int cur_oerr = 0;
1477 
1478 void
1479 sctp_timeout_handler(void *t)
1480 {
1481 	struct sctp_inpcb *inp;
1482 	struct sctp_tcb *stcb;
1483 	struct sctp_nets *net;
1484 	struct sctp_timer *tmr;
1485 
1486 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1487 	struct socket *so;
1488 
1489 #endif
1490 	int did_output, type;
1491 
1492 	tmr = (struct sctp_timer *)t;
1493 	inp = (struct sctp_inpcb *)tmr->ep;
1494 	stcb = (struct sctp_tcb *)tmr->tcb;
1495 	net = (struct sctp_nets *)tmr->net;
1496 	CURVNET_SET((struct vnet *)tmr->vnet);
1497 	did_output = 1;
1498 
1499 #ifdef SCTP_AUDITING_ENABLED
1500 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1501 	sctp_auditing(3, inp, stcb, net);
1502 #endif
1503 
1504 	/* sanity checks... */
1505 	if (tmr->self != (void *)tmr) {
1506 		/*
1507 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1508 		 * tmr);
1509 		 */
1510 		CURVNET_RESTORE();
1511 		return;
1512 	}
1513 	tmr->stopped_from = 0xa001;
1514 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1515 		/*
1516 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1517 		 * tmr->type);
1518 		 */
1519 		CURVNET_RESTORE();
1520 		return;
1521 	}
1522 	tmr->stopped_from = 0xa002;
1523 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1524 		CURVNET_RESTORE();
1525 		return;
1526 	}
1527 	/* if this is an iterator timeout, get the struct and clear inp */
1528 	tmr->stopped_from = 0xa003;
1529 	type = tmr->type;
1530 	if (inp) {
1531 		SCTP_INP_INCR_REF(inp);
1532 		if ((inp->sctp_socket == 0) &&
1533 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1534 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1535 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1536 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1537 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1538 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1539 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1540 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1541 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1542 		    ) {
1543 			SCTP_INP_DECR_REF(inp);
1544 			CURVNET_RESTORE();
1545 			return;
1546 		}
1547 	}
1548 	tmr->stopped_from = 0xa004;
1549 	if (stcb) {
1550 		atomic_add_int(&stcb->asoc.refcnt, 1);
1551 		if (stcb->asoc.state == 0) {
1552 			atomic_add_int(&stcb->asoc.refcnt, -1);
1553 			if (inp) {
1554 				SCTP_INP_DECR_REF(inp);
1555 			}
1556 			CURVNET_RESTORE();
1557 			return;
1558 		}
1559 	}
1560 	tmr->stopped_from = 0xa005;
1561 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1562 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1563 		if (inp) {
1564 			SCTP_INP_DECR_REF(inp);
1565 		}
1566 		if (stcb) {
1567 			atomic_add_int(&stcb->asoc.refcnt, -1);
1568 		}
1569 		CURVNET_RESTORE();
1570 		return;
1571 	}
1572 	tmr->stopped_from = 0xa006;
1573 
1574 	if (stcb) {
1575 		SCTP_TCB_LOCK(stcb);
1576 		atomic_add_int(&stcb->asoc.refcnt, -1);
1577 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1578 		    ((stcb->asoc.state == 0) ||
1579 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1580 			SCTP_TCB_UNLOCK(stcb);
1581 			if (inp) {
1582 				SCTP_INP_DECR_REF(inp);
1583 			}
1584 			CURVNET_RESTORE();
1585 			return;
1586 		}
1587 	}
1588 	/* record in stopped what t-o occured */
1589 	tmr->stopped_from = tmr->type;
1590 
1591 	/* mark as being serviced now */
1592 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1593 		/*
1594 		 * Callout has been rescheduled.
1595 		 */
1596 		goto get_out;
1597 	}
1598 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1599 		/*
1600 		 * Not active, so no action.
1601 		 */
1602 		goto get_out;
1603 	}
1604 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1605 
1606 	/* call the handler for the appropriate timer type */
1607 	switch (tmr->type) {
1608 	case SCTP_TIMER_TYPE_ZERO_COPY:
1609 		if (inp == NULL) {
1610 			break;
1611 		}
1612 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1613 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1614 		}
1615 		break;
1616 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1617 		if (inp == NULL) {
1618 			break;
1619 		}
1620 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1621 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1622 		}
1623 		break;
1624 	case SCTP_TIMER_TYPE_ADDR_WQ:
1625 		sctp_handle_addr_wq();
1626 		break;
1627 	case SCTP_TIMER_TYPE_SEND:
1628 		if ((stcb == NULL) || (inp == NULL)) {
1629 			break;
1630 		}
1631 		SCTP_STAT_INCR(sctps_timodata);
1632 		stcb->asoc.timodata++;
1633 		stcb->asoc.num_send_timers_up--;
1634 		if (stcb->asoc.num_send_timers_up < 0) {
1635 			stcb->asoc.num_send_timers_up = 0;
1636 		}
1637 		SCTP_TCB_LOCK_ASSERT(stcb);
1638 		cur_oerr = stcb->asoc.overall_error_count;
1639 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1640 		if (retcode) {
1641 			/* no need to unlock on tcb its gone */
1642 
1643 			goto out_decr;
1644 		}
1645 		SCTP_TCB_LOCK_ASSERT(stcb);
1646 #ifdef SCTP_AUDITING_ENABLED
1647 		sctp_auditing(4, inp, stcb, net);
1648 #endif
1649 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1650 		if ((stcb->asoc.num_send_timers_up == 0) &&
1651 		    (stcb->asoc.sent_queue_cnt > 0)
1652 		    ) {
1653 			struct sctp_tmit_chunk *chk;
1654 
1655 			/*
1656 			 * safeguard. If there on some on the sent queue
1657 			 * somewhere but no timers running something is
1658 			 * wrong... so we start a timer on the first chunk
1659 			 * on the send queue on whatever net it is sent to.
1660 			 */
1661 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1662 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1663 			    chk->whoTo);
1664 		}
1665 		break;
1666 	case SCTP_TIMER_TYPE_INIT:
1667 		if ((stcb == NULL) || (inp == NULL)) {
1668 			break;
1669 		}
1670 		SCTP_STAT_INCR(sctps_timoinit);
1671 		stcb->asoc.timoinit++;
1672 		if (sctp_t1init_timer(inp, stcb, net)) {
1673 			/* no need to unlock on tcb its gone */
1674 			goto out_decr;
1675 		}
1676 		/* We do output but not here */
1677 		did_output = 0;
1678 		break;
1679 	case SCTP_TIMER_TYPE_RECV:
1680 		if ((stcb == NULL) || (inp == NULL)) {
1681 			break;
1682 		} {
1683 			SCTP_STAT_INCR(sctps_timosack);
1684 			stcb->asoc.timosack++;
1685 			sctp_send_sack(stcb);
1686 		}
1687 #ifdef SCTP_AUDITING_ENABLED
1688 		sctp_auditing(4, inp, stcb, net);
1689 #endif
1690 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1691 		break;
1692 	case SCTP_TIMER_TYPE_SHUTDOWN:
1693 		if ((stcb == NULL) || (inp == NULL)) {
1694 			break;
1695 		}
1696 		if (sctp_shutdown_timer(inp, stcb, net)) {
1697 			/* no need to unlock on tcb its gone */
1698 			goto out_decr;
1699 		}
1700 		SCTP_STAT_INCR(sctps_timoshutdown);
1701 		stcb->asoc.timoshutdown++;
1702 #ifdef SCTP_AUDITING_ENABLED
1703 		sctp_auditing(4, inp, stcb, net);
1704 #endif
1705 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1706 		break;
1707 	case SCTP_TIMER_TYPE_HEARTBEAT:
1708 		{
1709 			struct sctp_nets *lnet;
1710 			int cnt_of_unconf = 0;
1711 
1712 			if ((stcb == NULL) || (inp == NULL)) {
1713 				break;
1714 			}
1715 			SCTP_STAT_INCR(sctps_timoheartbeat);
1716 			stcb->asoc.timoheartbeat++;
1717 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1718 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1719 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1720 					cnt_of_unconf++;
1721 				}
1722 			}
1723 			if (cnt_of_unconf == 0) {
1724 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1725 				    cnt_of_unconf)) {
1726 					/* no need to unlock on tcb its gone */
1727 					goto out_decr;
1728 				}
1729 			}
1730 #ifdef SCTP_AUDITING_ENABLED
1731 			sctp_auditing(4, inp, stcb, lnet);
1732 #endif
1733 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1734 			    stcb->sctp_ep, stcb, lnet);
1735 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1736 		}
1737 		break;
1738 	case SCTP_TIMER_TYPE_COOKIE:
1739 		if ((stcb == NULL) || (inp == NULL)) {
1740 			break;
1741 		}
1742 		if (sctp_cookie_timer(inp, stcb, net)) {
1743 			/* no need to unlock on tcb its gone */
1744 			goto out_decr;
1745 		}
1746 		SCTP_STAT_INCR(sctps_timocookie);
1747 		stcb->asoc.timocookie++;
1748 #ifdef SCTP_AUDITING_ENABLED
1749 		sctp_auditing(4, inp, stcb, net);
1750 #endif
1751 		/*
1752 		 * We consider T3 and Cookie timer pretty much the same with
1753 		 * respect to where from in chunk_output.
1754 		 */
1755 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1756 		break;
1757 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1758 		{
1759 			struct timeval tv;
1760 			int i, secret;
1761 
1762 			if (inp == NULL) {
1763 				break;
1764 			}
1765 			SCTP_STAT_INCR(sctps_timosecret);
1766 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1767 			SCTP_INP_WLOCK(inp);
1768 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1769 			inp->sctp_ep.last_secret_number =
1770 			    inp->sctp_ep.current_secret_number;
1771 			inp->sctp_ep.current_secret_number++;
1772 			if (inp->sctp_ep.current_secret_number >=
1773 			    SCTP_HOW_MANY_SECRETS) {
1774 				inp->sctp_ep.current_secret_number = 0;
1775 			}
1776 			secret = (int)inp->sctp_ep.current_secret_number;
1777 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1778 				inp->sctp_ep.secret_key[secret][i] =
1779 				    sctp_select_initial_TSN(&inp->sctp_ep);
1780 			}
1781 			SCTP_INP_WUNLOCK(inp);
1782 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1783 		}
1784 		did_output = 0;
1785 		break;
1786 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1787 		if ((stcb == NULL) || (inp == NULL)) {
1788 			break;
1789 		}
1790 		SCTP_STAT_INCR(sctps_timopathmtu);
1791 		sctp_pathmtu_timer(inp, stcb, net);
1792 		did_output = 0;
1793 		break;
1794 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1795 		if ((stcb == NULL) || (inp == NULL)) {
1796 			break;
1797 		}
1798 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1799 			/* no need to unlock on tcb its gone */
1800 			goto out_decr;
1801 		}
1802 		SCTP_STAT_INCR(sctps_timoshutdownack);
1803 		stcb->asoc.timoshutdownack++;
1804 #ifdef SCTP_AUDITING_ENABLED
1805 		sctp_auditing(4, inp, stcb, net);
1806 #endif
1807 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1808 		break;
1809 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1810 		if ((stcb == NULL) || (inp == NULL)) {
1811 			break;
1812 		}
1813 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1814 		sctp_abort_an_association(inp, stcb,
1815 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1816 		/* no need to unlock on tcb its gone */
1817 		goto out_decr;
1818 
1819 	case SCTP_TIMER_TYPE_STRRESET:
1820 		if ((stcb == NULL) || (inp == NULL)) {
1821 			break;
1822 		}
1823 		if (sctp_strreset_timer(inp, stcb, net)) {
1824 			/* no need to unlock on tcb its gone */
1825 			goto out_decr;
1826 		}
1827 		SCTP_STAT_INCR(sctps_timostrmrst);
1828 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1829 		break;
1830 	case SCTP_TIMER_TYPE_EARLYFR:
1831 		/* Need to do FR of things for net */
1832 		if ((stcb == NULL) || (inp == NULL)) {
1833 			break;
1834 		}
1835 		SCTP_STAT_INCR(sctps_timoearlyfr);
1836 		sctp_early_fr_timer(inp, stcb, net);
1837 		break;
1838 	case SCTP_TIMER_TYPE_ASCONF:
1839 		if ((stcb == NULL) || (inp == NULL)) {
1840 			break;
1841 		}
1842 		if (sctp_asconf_timer(inp, stcb, net)) {
1843 			/* no need to unlock on tcb its gone */
1844 			goto out_decr;
1845 		}
1846 		SCTP_STAT_INCR(sctps_timoasconf);
1847 #ifdef SCTP_AUDITING_ENABLED
1848 		sctp_auditing(4, inp, stcb, net);
1849 #endif
1850 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1851 		break;
1852 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1853 		if ((stcb == NULL) || (inp == NULL)) {
1854 			break;
1855 		}
1856 		sctp_delete_prim_timer(inp, stcb, net);
1857 		SCTP_STAT_INCR(sctps_timodelprim);
1858 		break;
1859 
1860 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1861 		if ((stcb == NULL) || (inp == NULL)) {
1862 			break;
1863 		}
1864 		SCTP_STAT_INCR(sctps_timoautoclose);
1865 		sctp_autoclose_timer(inp, stcb, net);
1866 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1867 		did_output = 0;
1868 		break;
1869 	case SCTP_TIMER_TYPE_ASOCKILL:
1870 		if ((stcb == NULL) || (inp == NULL)) {
1871 			break;
1872 		}
1873 		SCTP_STAT_INCR(sctps_timoassockill);
1874 		/* Can we free it yet? */
1875 		SCTP_INP_DECR_REF(inp);
1876 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1877 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1878 		so = SCTP_INP_SO(inp);
1879 		atomic_add_int(&stcb->asoc.refcnt, 1);
1880 		SCTP_TCB_UNLOCK(stcb);
1881 		SCTP_SOCKET_LOCK(so, 1);
1882 		SCTP_TCB_LOCK(stcb);
1883 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1884 #endif
1885 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1886 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1887 		SCTP_SOCKET_UNLOCK(so, 1);
1888 #endif
1889 		/*
1890 		 * free asoc, always unlocks (or destroy's) so prevent
1891 		 * duplicate unlock or unlock of a free mtx :-0
1892 		 */
1893 		stcb = NULL;
1894 		goto out_no_decr;
1895 	case SCTP_TIMER_TYPE_INPKILL:
1896 		SCTP_STAT_INCR(sctps_timoinpkill);
1897 		if (inp == NULL) {
1898 			break;
1899 		}
1900 		/*
1901 		 * special case, take away our increment since WE are the
1902 		 * killer
1903 		 */
1904 		SCTP_INP_DECR_REF(inp);
1905 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1906 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1907 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1908 		inp = NULL;
1909 		goto out_no_decr;
1910 	default:
1911 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1912 		    tmr->type);
1913 		break;
1914 	};
1915 #ifdef SCTP_AUDITING_ENABLED
1916 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1917 	if (inp)
1918 		sctp_auditing(5, inp, stcb, net);
1919 #endif
1920 	if ((did_output) && stcb) {
1921 		/*
1922 		 * Now we need to clean up the control chunk chain if an
1923 		 * ECNE is on it. It must be marked as UNSENT again so next
1924 		 * call will continue to send it until such time that we get
1925 		 * a CWR, to remove it. It is, however, less likely that we
1926 		 * will find a ecn echo on the chain though.
1927 		 */
1928 		sctp_fix_ecn_echo(&stcb->asoc);
1929 	}
1930 get_out:
1931 	if (stcb) {
1932 		SCTP_TCB_UNLOCK(stcb);
1933 	}
1934 out_decr:
1935 	if (inp) {
1936 		SCTP_INP_DECR_REF(inp);
1937 	}
1938 out_no_decr:
1939 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1940 	    type);
1941 	CURVNET_RESTORE();
1942 }
1943 
1944 void
1945 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1946     struct sctp_nets *net)
1947 {
1948 	int to_ticks;
1949 	struct sctp_timer *tmr;
1950 
1951 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1952 		return;
1953 
1954 	to_ticks = 0;
1955 
1956 	tmr = NULL;
1957 	if (stcb) {
1958 		SCTP_TCB_LOCK_ASSERT(stcb);
1959 	}
1960 	switch (t_type) {
1961 	case SCTP_TIMER_TYPE_ZERO_COPY:
1962 		tmr = &inp->sctp_ep.zero_copy_timer;
1963 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1964 		break;
1965 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1966 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1967 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1968 		break;
1969 	case SCTP_TIMER_TYPE_ADDR_WQ:
1970 		/* Only 1 tick away :-) */
1971 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1972 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1973 		break;
1974 	case SCTP_TIMER_TYPE_SEND:
1975 		/* Here we use the RTO timer */
1976 		{
1977 			int rto_val;
1978 
1979 			if ((stcb == NULL) || (net == NULL)) {
1980 				return;
1981 			}
1982 			tmr = &net->rxt_timer;
1983 			if (net->RTO == 0) {
1984 				rto_val = stcb->asoc.initial_rto;
1985 			} else {
1986 				rto_val = net->RTO;
1987 			}
1988 			to_ticks = MSEC_TO_TICKS(rto_val);
1989 		}
1990 		break;
1991 	case SCTP_TIMER_TYPE_INIT:
1992 		/*
1993 		 * Here we use the INIT timer default usually about 1
1994 		 * minute.
1995 		 */
1996 		if ((stcb == NULL) || (net == NULL)) {
1997 			return;
1998 		}
1999 		tmr = &net->rxt_timer;
2000 		if (net->RTO == 0) {
2001 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2002 		} else {
2003 			to_ticks = MSEC_TO_TICKS(net->RTO);
2004 		}
2005 		break;
2006 	case SCTP_TIMER_TYPE_RECV:
2007 		/*
2008 		 * Here we use the Delayed-Ack timer value from the inp
2009 		 * ususually about 200ms.
2010 		 */
2011 		if (stcb == NULL) {
2012 			return;
2013 		}
2014 		tmr = &stcb->asoc.dack_timer;
2015 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
2016 		break;
2017 	case SCTP_TIMER_TYPE_SHUTDOWN:
2018 		/* Here we use the RTO of the destination. */
2019 		if ((stcb == NULL) || (net == NULL)) {
2020 			return;
2021 		}
2022 		if (net->RTO == 0) {
2023 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2024 		} else {
2025 			to_ticks = MSEC_TO_TICKS(net->RTO);
2026 		}
2027 		tmr = &net->rxt_timer;
2028 		break;
2029 	case SCTP_TIMER_TYPE_HEARTBEAT:
2030 		/*
2031 		 * the net is used here so that we can add in the RTO. Even
2032 		 * though we use a different timer. We also add the HB timer
2033 		 * PLUS a random jitter.
2034 		 */
2035 		if ((inp == NULL) || (stcb == NULL)) {
2036 			return;
2037 		} else {
2038 			uint32_t rndval;
2039 			uint8_t this_random;
2040 			int cnt_of_unconf = 0;
2041 			struct sctp_nets *lnet;
2042 
2043 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2044 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2045 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2046 					cnt_of_unconf++;
2047 				}
2048 			}
2049 			if (cnt_of_unconf) {
2050 				net = lnet = NULL;
2051 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2052 			}
2053 			if (stcb->asoc.hb_random_idx > 3) {
2054 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2055 				memcpy(stcb->asoc.hb_random_values, &rndval,
2056 				    sizeof(stcb->asoc.hb_random_values));
2057 				stcb->asoc.hb_random_idx = 0;
2058 			}
2059 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2060 			stcb->asoc.hb_random_idx++;
2061 			stcb->asoc.hb_ect_randombit = 0;
2062 			/*
2063 			 * this_random will be 0 - 256 ms RTO is in ms.
2064 			 */
2065 			if ((stcb->asoc.hb_is_disabled) &&
2066 			    (cnt_of_unconf == 0)) {
2067 				return;
2068 			}
2069 			if (net) {
2070 				int delay;
2071 
2072 				delay = stcb->asoc.heart_beat_delay;
2073 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2074 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2075 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2076 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2077 						delay = 0;
2078 					}
2079 				}
2080 				if (net->RTO == 0) {
2081 					/* Never been checked */
2082 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2083 				} else {
2084 					/* set rto_val to the ms */
2085 					to_ticks = delay + net->RTO + this_random;
2086 				}
2087 			} else {
2088 				if (cnt_of_unconf) {
2089 					to_ticks = this_random + stcb->asoc.initial_rto;
2090 				} else {
2091 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2092 				}
2093 			}
2094 			/*
2095 			 * Now we must convert the to_ticks that are now in
2096 			 * ms to ticks.
2097 			 */
2098 			to_ticks = MSEC_TO_TICKS(to_ticks);
2099 			tmr = &stcb->asoc.hb_timer;
2100 		}
2101 		break;
2102 	case SCTP_TIMER_TYPE_COOKIE:
2103 		/*
2104 		 * Here we can use the RTO timer from the network since one
2105 		 * RTT was compelete. If a retran happened then we will be
2106 		 * using the RTO initial value.
2107 		 */
2108 		if ((stcb == NULL) || (net == NULL)) {
2109 			return;
2110 		}
2111 		if (net->RTO == 0) {
2112 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2113 		} else {
2114 			to_ticks = MSEC_TO_TICKS(net->RTO);
2115 		}
2116 		tmr = &net->rxt_timer;
2117 		break;
2118 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2119 		/*
2120 		 * nothing needed but the endpoint here ususually about 60
2121 		 * minutes.
2122 		 */
2123 		if (inp == NULL) {
2124 			return;
2125 		}
2126 		tmr = &inp->sctp_ep.signature_change;
2127 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2128 		break;
2129 	case SCTP_TIMER_TYPE_ASOCKILL:
2130 		if (stcb == NULL) {
2131 			return;
2132 		}
2133 		tmr = &stcb->asoc.strreset_timer;
2134 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2135 		break;
2136 	case SCTP_TIMER_TYPE_INPKILL:
2137 		/*
2138 		 * The inp is setup to die. We re-use the signature_chage
2139 		 * timer since that has stopped and we are in the GONE
2140 		 * state.
2141 		 */
2142 		if (inp == NULL) {
2143 			return;
2144 		}
2145 		tmr = &inp->sctp_ep.signature_change;
2146 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2147 		break;
2148 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2149 		/*
2150 		 * Here we use the value found in the EP for PMTU ususually
2151 		 * about 10 minutes.
2152 		 */
2153 		if ((stcb == NULL) || (inp == NULL)) {
2154 			return;
2155 		}
2156 		if (net == NULL) {
2157 			return;
2158 		}
2159 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2160 		tmr = &net->pmtu_timer;
2161 		break;
2162 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2163 		/* Here we use the RTO of the destination */
2164 		if ((stcb == NULL) || (net == NULL)) {
2165 			return;
2166 		}
2167 		if (net->RTO == 0) {
2168 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2169 		} else {
2170 			to_ticks = MSEC_TO_TICKS(net->RTO);
2171 		}
2172 		tmr = &net->rxt_timer;
2173 		break;
2174 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2175 		/*
2176 		 * Here we use the endpoints shutdown guard timer usually
2177 		 * about 3 minutes.
2178 		 */
2179 		if ((inp == NULL) || (stcb == NULL)) {
2180 			return;
2181 		}
2182 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2183 		tmr = &stcb->asoc.shut_guard_timer;
2184 		break;
2185 	case SCTP_TIMER_TYPE_STRRESET:
2186 		/*
2187 		 * Here the timer comes from the stcb but its value is from
2188 		 * the net's RTO.
2189 		 */
2190 		if ((stcb == NULL) || (net == NULL)) {
2191 			return;
2192 		}
2193 		if (net->RTO == 0) {
2194 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2195 		} else {
2196 			to_ticks = MSEC_TO_TICKS(net->RTO);
2197 		}
2198 		tmr = &stcb->asoc.strreset_timer;
2199 		break;
2200 
2201 	case SCTP_TIMER_TYPE_EARLYFR:
2202 		{
2203 			unsigned int msec;
2204 
2205 			if ((stcb == NULL) || (net == NULL)) {
2206 				return;
2207 			}
2208 			if (net->flight_size > net->cwnd) {
2209 				/* no need to start */
2210 				return;
2211 			}
2212 			SCTP_STAT_INCR(sctps_earlyfrstart);
2213 			if (net->lastsa == 0) {
2214 				/* Hmm no rtt estimate yet? */
2215 				msec = stcb->asoc.initial_rto >> 2;
2216 			} else {
2217 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2218 			}
2219 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2220 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2221 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2222 					msec = SCTP_MINFR_MSEC_FLOOR;
2223 				}
2224 			}
2225 			to_ticks = MSEC_TO_TICKS(msec);
2226 			tmr = &net->fr_timer;
2227 		}
2228 		break;
2229 	case SCTP_TIMER_TYPE_ASCONF:
2230 		/*
2231 		 * Here the timer comes from the stcb but its value is from
2232 		 * the net's RTO.
2233 		 */
2234 		if ((stcb == NULL) || (net == NULL)) {
2235 			return;
2236 		}
2237 		if (net->RTO == 0) {
2238 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2239 		} else {
2240 			to_ticks = MSEC_TO_TICKS(net->RTO);
2241 		}
2242 		tmr = &stcb->asoc.asconf_timer;
2243 		break;
2244 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2245 		if ((stcb == NULL) || (net != NULL)) {
2246 			return;
2247 		}
2248 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2249 		tmr = &stcb->asoc.delete_prim_timer;
2250 		break;
2251 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2252 		if (stcb == NULL) {
2253 			return;
2254 		}
2255 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2256 			/*
2257 			 * Really an error since stcb is NOT set to
2258 			 * autoclose
2259 			 */
2260 			return;
2261 		}
2262 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2263 		tmr = &stcb->asoc.autoclose_timer;
2264 		break;
2265 	default:
2266 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2267 		    __FUNCTION__, t_type);
2268 		return;
2269 		break;
2270 	};
2271 	if ((to_ticks <= 0) || (tmr == NULL)) {
2272 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2273 		    __FUNCTION__, t_type, to_ticks, tmr);
2274 		return;
2275 	}
2276 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2277 		/*
2278 		 * we do NOT allow you to have it already running. if it is
2279 		 * we leave the current one up unchanged
2280 		 */
2281 		return;
2282 	}
2283 	/* At this point we can proceed */
2284 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2285 		stcb->asoc.num_send_timers_up++;
2286 	}
2287 	tmr->stopped_from = 0;
2288 	tmr->type = t_type;
2289 	tmr->ep = (void *)inp;
2290 	tmr->tcb = (void *)stcb;
2291 	tmr->net = (void *)net;
2292 	tmr->self = (void *)tmr;
2293 	tmr->vnet = (void *)curvnet;
2294 	tmr->ticks = sctp_get_tick_count();
2295 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2296 	return;
2297 }
2298 
2299 void
2300 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2301     struct sctp_nets *net, uint32_t from)
2302 {
2303 	struct sctp_timer *tmr;
2304 
2305 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2306 	    (inp == NULL))
2307 		return;
2308 
2309 	tmr = NULL;
2310 	if (stcb) {
2311 		SCTP_TCB_LOCK_ASSERT(stcb);
2312 	}
2313 	switch (t_type) {
2314 	case SCTP_TIMER_TYPE_ZERO_COPY:
2315 		tmr = &inp->sctp_ep.zero_copy_timer;
2316 		break;
2317 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2318 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2319 		break;
2320 	case SCTP_TIMER_TYPE_ADDR_WQ:
2321 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2322 		break;
2323 	case SCTP_TIMER_TYPE_EARLYFR:
2324 		if ((stcb == NULL) || (net == NULL)) {
2325 			return;
2326 		}
2327 		tmr = &net->fr_timer;
2328 		SCTP_STAT_INCR(sctps_earlyfrstop);
2329 		break;
2330 	case SCTP_TIMER_TYPE_SEND:
2331 		if ((stcb == NULL) || (net == NULL)) {
2332 			return;
2333 		}
2334 		tmr = &net->rxt_timer;
2335 		break;
2336 	case SCTP_TIMER_TYPE_INIT:
2337 		if ((stcb == NULL) || (net == NULL)) {
2338 			return;
2339 		}
2340 		tmr = &net->rxt_timer;
2341 		break;
2342 	case SCTP_TIMER_TYPE_RECV:
2343 		if (stcb == NULL) {
2344 			return;
2345 		}
2346 		tmr = &stcb->asoc.dack_timer;
2347 		break;
2348 	case SCTP_TIMER_TYPE_SHUTDOWN:
2349 		if ((stcb == NULL) || (net == NULL)) {
2350 			return;
2351 		}
2352 		tmr = &net->rxt_timer;
2353 		break;
2354 	case SCTP_TIMER_TYPE_HEARTBEAT:
2355 		if (stcb == NULL) {
2356 			return;
2357 		}
2358 		tmr = &stcb->asoc.hb_timer;
2359 		break;
2360 	case SCTP_TIMER_TYPE_COOKIE:
2361 		if ((stcb == NULL) || (net == NULL)) {
2362 			return;
2363 		}
2364 		tmr = &net->rxt_timer;
2365 		break;
2366 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2367 		/* nothing needed but the endpoint here */
2368 		tmr = &inp->sctp_ep.signature_change;
2369 		/*
2370 		 * We re-use the newcookie timer for the INP kill timer. We
2371 		 * must assure that we do not kill it by accident.
2372 		 */
2373 		break;
2374 	case SCTP_TIMER_TYPE_ASOCKILL:
2375 		/*
2376 		 * Stop the asoc kill timer.
2377 		 */
2378 		if (stcb == NULL) {
2379 			return;
2380 		}
2381 		tmr = &stcb->asoc.strreset_timer;
2382 		break;
2383 
2384 	case SCTP_TIMER_TYPE_INPKILL:
2385 		/*
2386 		 * The inp is setup to die. We re-use the signature_chage
2387 		 * timer since that has stopped and we are in the GONE
2388 		 * state.
2389 		 */
2390 		tmr = &inp->sctp_ep.signature_change;
2391 		break;
2392 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2393 		if ((stcb == NULL) || (net == NULL)) {
2394 			return;
2395 		}
2396 		tmr = &net->pmtu_timer;
2397 		break;
2398 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2399 		if ((stcb == NULL) || (net == NULL)) {
2400 			return;
2401 		}
2402 		tmr = &net->rxt_timer;
2403 		break;
2404 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2405 		if (stcb == NULL) {
2406 			return;
2407 		}
2408 		tmr = &stcb->asoc.shut_guard_timer;
2409 		break;
2410 	case SCTP_TIMER_TYPE_STRRESET:
2411 		if (stcb == NULL) {
2412 			return;
2413 		}
2414 		tmr = &stcb->asoc.strreset_timer;
2415 		break;
2416 	case SCTP_TIMER_TYPE_ASCONF:
2417 		if (stcb == NULL) {
2418 			return;
2419 		}
2420 		tmr = &stcb->asoc.asconf_timer;
2421 		break;
2422 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2423 		if (stcb == NULL) {
2424 			return;
2425 		}
2426 		tmr = &stcb->asoc.delete_prim_timer;
2427 		break;
2428 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2429 		if (stcb == NULL) {
2430 			return;
2431 		}
2432 		tmr = &stcb->asoc.autoclose_timer;
2433 		break;
2434 	default:
2435 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2436 		    __FUNCTION__, t_type);
2437 		break;
2438 	};
2439 	if (tmr == NULL) {
2440 		return;
2441 	}
2442 	if ((tmr->type != t_type) && tmr->type) {
2443 		/*
2444 		 * Ok we have a timer that is under joint use. Cookie timer
2445 		 * per chance with the SEND timer. We therefore are NOT
2446 		 * running the timer that the caller wants stopped.  So just
2447 		 * return.
2448 		 */
2449 		return;
2450 	}
2451 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2452 		stcb->asoc.num_send_timers_up--;
2453 		if (stcb->asoc.num_send_timers_up < 0) {
2454 			stcb->asoc.num_send_timers_up = 0;
2455 		}
2456 	}
2457 	tmr->self = NULL;
2458 	tmr->stopped_from = from;
2459 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2460 	return;
2461 }
2462 
2463 uint32_t
2464 sctp_calculate_len(struct mbuf *m)
2465 {
2466 	uint32_t tlen = 0;
2467 	struct mbuf *at;
2468 
2469 	at = m;
2470 	while (at) {
2471 		tlen += SCTP_BUF_LEN(at);
2472 		at = SCTP_BUF_NEXT(at);
2473 	}
2474 	return (tlen);
2475 }
2476 
2477 void
2478 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2479     struct sctp_association *asoc, uint32_t mtu)
2480 {
2481 	/*
2482 	 * Reset the P-MTU size on this association, this involves changing
2483 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2484 	 * allow the DF flag to be cleared.
2485 	 */
2486 	struct sctp_tmit_chunk *chk;
2487 	unsigned int eff_mtu, ovh;
2488 
2489 #ifdef SCTP_PRINT_FOR_B_AND_M
2490 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2491 	    inp, asoc, mtu);
2492 #endif
2493 	asoc->smallest_mtu = mtu;
2494 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2495 		ovh = SCTP_MIN_OVERHEAD;
2496 	} else {
2497 		ovh = SCTP_MIN_V4_OVERHEAD;
2498 	}
2499 	eff_mtu = mtu - ovh;
2500 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2501 
2502 		if (chk->send_size > eff_mtu) {
2503 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2504 		}
2505 	}
2506 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2507 		if (chk->send_size > eff_mtu) {
2508 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2509 		}
2510 	}
2511 }
2512 
2513 
2514 /*
2515  * given an association and starting time of the current RTT period return
2516  * RTO in number of msecs net should point to the current network
2517  */
2518 uint32_t
2519 sctp_calculate_rto(struct sctp_tcb *stcb,
2520     struct sctp_association *asoc,
2521     struct sctp_nets *net,
2522     struct timeval *told,
2523     int safe)
2524 {
2525 	/*-
2526 	 * given an association and the starting time of the current RTT
2527 	 * period (in value1/value2) return RTO in number of msecs.
2528 	 */
2529 	int calc_time = 0;
2530 	int o_calctime;
2531 	uint32_t new_rto = 0;
2532 	int first_measure = 0;
2533 	struct timeval now, then, *old;
2534 
2535 	/* Copy it out for sparc64 */
2536 	if (safe == sctp_align_unsafe_makecopy) {
2537 		old = &then;
2538 		memcpy(&then, told, sizeof(struct timeval));
2539 	} else if (safe == sctp_align_safe_nocopy) {
2540 		old = told;
2541 	} else {
2542 		/* error */
2543 		SCTP_PRINTF("Huh, bad rto calc call\n");
2544 		return (0);
2545 	}
2546 	/************************/
2547 	/* 1. calculate new RTT */
2548 	/************************/
2549 	/* get the current time */
2550 	(void)SCTP_GETTIME_TIMEVAL(&now);
2551 	/* compute the RTT value */
2552 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2553 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2554 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2555 			calc_time += (((u_long)now.tv_usec -
2556 			    (u_long)old->tv_usec) / 1000);
2557 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2558 			/* Borrow 1,000ms from current calculation */
2559 			calc_time -= 1000;
2560 			/* Add in the slop over */
2561 			calc_time += ((int)now.tv_usec / 1000);
2562 			/* Add in the pre-second ms's */
2563 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2564 		}
2565 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2566 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2567 			calc_time = ((u_long)now.tv_usec -
2568 			    (u_long)old->tv_usec) / 1000;
2569 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2570 			/* impossible .. garbage in nothing out */
2571 			goto calc_rto;
2572 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2573 			/*
2574 			 * We have to have 1 usec :-D this must be the
2575 			 * loopback.
2576 			 */
2577 			calc_time = 1;
2578 		} else {
2579 			/* impossible .. garbage in nothing out */
2580 			goto calc_rto;
2581 		}
2582 	} else {
2583 		/* Clock wrapped? */
2584 		goto calc_rto;
2585 	}
2586 	/***************************/
2587 	/* 2. update RTTVAR & SRTT */
2588 	/***************************/
2589 	net->rtt = o_calctime = calc_time;
2590 	/* this is Van Jacobson's integer version */
2591 	if (net->RTO_measured) {
2592 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2593 								 * shift=3 */
2594 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2595 			rto_logging(net, SCTP_LOG_RTTVAR);
2596 		}
2597 		net->prev_rtt = o_calctime;
2598 		net->lastsa += calc_time;	/* add 7/8th into sa when
2599 						 * shift=3 */
2600 		if (calc_time < 0) {
2601 			calc_time = -calc_time;
2602 		}
2603 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2604 									 * VAR shift=2 */
2605 		net->lastsv += calc_time;
2606 		if (net->lastsv == 0) {
2607 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2608 		}
2609 	} else {
2610 		/* First RTO measurment */
2611 		net->RTO_measured = 1;
2612 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2613 								 * shift=3 */
2614 		net->lastsv = calc_time;
2615 		if (net->lastsv == 0) {
2616 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2617 		}
2618 		first_measure = 1;
2619 		net->prev_rtt = o_calctime;
2620 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2621 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2622 		}
2623 	}
2624 calc_rto:
2625 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2626 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2627 	    (stcb->asoc.sat_network_lockout == 0)) {
2628 		stcb->asoc.sat_network = 1;
2629 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2630 		stcb->asoc.sat_network = 0;
2631 		stcb->asoc.sat_network_lockout = 1;
2632 	}
2633 	/* bound it, per C6/C7 in Section 5.3.1 */
2634 	if (new_rto < stcb->asoc.minrto) {
2635 		new_rto = stcb->asoc.minrto;
2636 	}
2637 	if (new_rto > stcb->asoc.maxrto) {
2638 		new_rto = stcb->asoc.maxrto;
2639 	}
2640 	/* we are now returning the RTO */
2641 	return (new_rto);
2642 }
2643 
2644 /*
2645  * return a pointer to a contiguous piece of data from the given mbuf chain
2646  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2647  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2648  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2649  */
2650 caddr_t
2651 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2652 {
2653 	uint32_t count;
2654 	uint8_t *ptr;
2655 
2656 	ptr = in_ptr;
2657 	if ((off < 0) || (len <= 0))
2658 		return (NULL);
2659 
2660 	/* find the desired start location */
2661 	while ((m != NULL) && (off > 0)) {
2662 		if (off < SCTP_BUF_LEN(m))
2663 			break;
2664 		off -= SCTP_BUF_LEN(m);
2665 		m = SCTP_BUF_NEXT(m);
2666 	}
2667 	if (m == NULL)
2668 		return (NULL);
2669 
2670 	/* is the current mbuf large enough (eg. contiguous)? */
2671 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2672 		return (mtod(m, caddr_t)+off);
2673 	} else {
2674 		/* else, it spans more than one mbuf, so save a temp copy... */
2675 		while ((m != NULL) && (len > 0)) {
2676 			count = min(SCTP_BUF_LEN(m) - off, len);
2677 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2678 			len -= count;
2679 			ptr += count;
2680 			off = 0;
2681 			m = SCTP_BUF_NEXT(m);
2682 		}
2683 		if ((m == NULL) && (len > 0))
2684 			return (NULL);
2685 		else
2686 			return ((caddr_t)in_ptr);
2687 	}
2688 }
2689 
2690 
2691 
2692 struct sctp_paramhdr *
2693 sctp_get_next_param(struct mbuf *m,
2694     int offset,
2695     struct sctp_paramhdr *pull,
2696     int pull_limit)
2697 {
2698 	/* This just provides a typed signature to Peter's Pull routine */
2699 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2700 	    (uint8_t *) pull));
2701 }
2702 
2703 
2704 int
2705 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2706 {
2707 	/*
2708 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2709 	 * padlen is > 3 this routine will fail.
2710 	 */
2711 	uint8_t *dp;
2712 	int i;
2713 
2714 	if (padlen > 3) {
2715 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2716 		return (ENOBUFS);
2717 	}
2718 	if (padlen <= M_TRAILINGSPACE(m)) {
2719 		/*
2720 		 * The easy way. We hope the majority of the time we hit
2721 		 * here :)
2722 		 */
2723 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2724 		SCTP_BUF_LEN(m) += padlen;
2725 	} else {
2726 		/* Hard way we must grow the mbuf */
2727 		struct mbuf *tmp;
2728 
2729 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2730 		if (tmp == NULL) {
2731 			/* Out of space GAK! we are in big trouble. */
2732 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2733 			return (ENOSPC);
2734 		}
2735 		/* setup and insert in middle */
2736 		SCTP_BUF_LEN(tmp) = padlen;
2737 		SCTP_BUF_NEXT(tmp) = NULL;
2738 		SCTP_BUF_NEXT(m) = tmp;
2739 		dp = mtod(tmp, uint8_t *);
2740 	}
2741 	/* zero out the pad */
2742 	for (i = 0; i < padlen; i++) {
2743 		*dp = 0;
2744 		dp++;
2745 	}
2746 	return (0);
2747 }
2748 
2749 int
2750 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2751 {
2752 	/* find the last mbuf in chain and pad it */
2753 	struct mbuf *m_at;
2754 
2755 	m_at = m;
2756 	if (last_mbuf) {
2757 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2758 	} else {
2759 		while (m_at) {
2760 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2761 				return (sctp_add_pad_tombuf(m_at, padval));
2762 			}
2763 			m_at = SCTP_BUF_NEXT(m_at);
2764 		}
2765 	}
2766 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2767 	return (EFAULT);
2768 }
2769 
2770 static void
2771 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2772     uint32_t error, void *data, int so_locked
2773 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2774     SCTP_UNUSED
2775 #endif
2776 )
2777 {
2778 	struct mbuf *m_notify;
2779 	struct sctp_assoc_change *sac;
2780 	struct sctp_queued_to_read *control;
2781 
2782 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2783 	struct socket *so;
2784 
2785 #endif
2786 
2787 	/*
2788 	 * For TCP model AND UDP connected sockets we will send an error up
2789 	 * when an ABORT comes in.
2790 	 */
2791 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2792 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2793 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2794 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2795 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2796 			stcb->sctp_socket->so_error = ECONNREFUSED;
2797 		} else {
2798 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2799 			stcb->sctp_socket->so_error = ECONNRESET;
2800 		}
2801 		/* Wake ANY sleepers */
2802 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2803 		so = SCTP_INP_SO(stcb->sctp_ep);
2804 		if (!so_locked) {
2805 			atomic_add_int(&stcb->asoc.refcnt, 1);
2806 			SCTP_TCB_UNLOCK(stcb);
2807 			SCTP_SOCKET_LOCK(so, 1);
2808 			SCTP_TCB_LOCK(stcb);
2809 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2810 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2811 				SCTP_SOCKET_UNLOCK(so, 1);
2812 				return;
2813 			}
2814 		}
2815 #endif
2816 		sorwakeup(stcb->sctp_socket);
2817 		sowwakeup(stcb->sctp_socket);
2818 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2819 		if (!so_locked) {
2820 			SCTP_SOCKET_UNLOCK(so, 1);
2821 		}
2822 #endif
2823 	}
2824 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2825 		/* event not enabled */
2826 		return;
2827 	}
2828 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2829 	if (m_notify == NULL)
2830 		/* no space left */
2831 		return;
2832 	SCTP_BUF_LEN(m_notify) = 0;
2833 
2834 	sac = mtod(m_notify, struct sctp_assoc_change *);
2835 	sac->sac_type = SCTP_ASSOC_CHANGE;
2836 	sac->sac_flags = 0;
2837 	sac->sac_length = sizeof(struct sctp_assoc_change);
2838 	sac->sac_state = event;
2839 	sac->sac_error = error;
2840 	/* XXX verify these stream counts */
2841 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2842 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2843 	sac->sac_assoc_id = sctp_get_associd(stcb);
2844 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2845 	SCTP_BUF_NEXT(m_notify) = NULL;
2846 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2847 	    0, 0, 0, 0, 0, 0,
2848 	    m_notify);
2849 	if (control == NULL) {
2850 		/* no memory */
2851 		sctp_m_freem(m_notify);
2852 		return;
2853 	}
2854 	control->length = SCTP_BUF_LEN(m_notify);
2855 	/* not that we need this */
2856 	control->tail_mbuf = m_notify;
2857 	control->spec_flags = M_NOTIFICATION;
2858 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2859 	    control,
2860 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2861 	    so_locked);
2862 	if (event == SCTP_COMM_LOST) {
2863 		/* Wake up any sleeper */
2864 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2865 		so = SCTP_INP_SO(stcb->sctp_ep);
2866 		if (!so_locked) {
2867 			atomic_add_int(&stcb->asoc.refcnt, 1);
2868 			SCTP_TCB_UNLOCK(stcb);
2869 			SCTP_SOCKET_LOCK(so, 1);
2870 			SCTP_TCB_LOCK(stcb);
2871 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2872 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2873 				SCTP_SOCKET_UNLOCK(so, 1);
2874 				return;
2875 			}
2876 		}
2877 #endif
2878 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2879 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2880 		if (!so_locked) {
2881 			SCTP_SOCKET_UNLOCK(so, 1);
2882 		}
2883 #endif
2884 	}
2885 }
2886 
2887 static void
2888 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2889     struct sockaddr *sa, uint32_t error)
2890 {
2891 	struct mbuf *m_notify;
2892 	struct sctp_paddr_change *spc;
2893 	struct sctp_queued_to_read *control;
2894 
2895 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2896 		/* event not enabled */
2897 		return;
2898 	}
2899 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2900 	if (m_notify == NULL)
2901 		return;
2902 	SCTP_BUF_LEN(m_notify) = 0;
2903 	spc = mtod(m_notify, struct sctp_paddr_change *);
2904 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2905 	spc->spc_flags = 0;
2906 	spc->spc_length = sizeof(struct sctp_paddr_change);
2907 	switch (sa->sa_family) {
2908 	case AF_INET:
2909 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2910 		break;
2911 #ifdef INET6
2912 	case AF_INET6:
2913 		{
2914 			struct sockaddr_in6 *sin6;
2915 
2916 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2917 
2918 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2919 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2920 				if (sin6->sin6_scope_id == 0) {
2921 					/* recover scope_id for user */
2922 					(void)sa6_recoverscope(sin6);
2923 				} else {
2924 					/* clear embedded scope_id for user */
2925 					in6_clearscope(&sin6->sin6_addr);
2926 				}
2927 			}
2928 			break;
2929 		}
2930 #endif
2931 	default:
2932 		/* TSNH */
2933 		break;
2934 	}
2935 	spc->spc_state = state;
2936 	spc->spc_error = error;
2937 	spc->spc_assoc_id = sctp_get_associd(stcb);
2938 
2939 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2940 	SCTP_BUF_NEXT(m_notify) = NULL;
2941 
2942 	/* append to socket */
2943 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2944 	    0, 0, 0, 0, 0, 0,
2945 	    m_notify);
2946 	if (control == NULL) {
2947 		/* no memory */
2948 		sctp_m_freem(m_notify);
2949 		return;
2950 	}
2951 	control->length = SCTP_BUF_LEN(m_notify);
2952 	control->spec_flags = M_NOTIFICATION;
2953 	/* not that we need this */
2954 	control->tail_mbuf = m_notify;
2955 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2956 	    control,
2957 	    &stcb->sctp_socket->so_rcv, 1,
2958 	    SCTP_READ_LOCK_NOT_HELD,
2959 	    SCTP_SO_NOT_LOCKED);
2960 }
2961 
2962 
2963 static void
2964 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2965     struct sctp_tmit_chunk *chk, int so_locked
2966 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2967     SCTP_UNUSED
2968 #endif
2969 )
2970 {
2971 	struct mbuf *m_notify;
2972 	struct sctp_send_failed *ssf;
2973 	struct sctp_queued_to_read *control;
2974 	int length;
2975 
2976 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2977 		/* event not enabled */
2978 		return;
2979 	}
2980 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2981 	if (m_notify == NULL)
2982 		/* no space left */
2983 		return;
2984 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2985 	length -= sizeof(struct sctp_data_chunk);
2986 	SCTP_BUF_LEN(m_notify) = 0;
2987 	ssf = mtod(m_notify, struct sctp_send_failed *);
2988 	ssf->ssf_type = SCTP_SEND_FAILED;
2989 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2990 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2991 	else
2992 		ssf->ssf_flags = SCTP_DATA_SENT;
2993 	ssf->ssf_length = length;
2994 	ssf->ssf_error = error;
2995 	/* not exactly what the user sent in, but should be close :) */
2996 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2997 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2998 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2999 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
3000 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
3001 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
3002 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3003 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3004 
3005 	if (chk->data) {
3006 		/*
3007 		 * trim off the sctp chunk header(it should be there)
3008 		 */
3009 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3010 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3011 			sctp_mbuf_crush(chk->data);
3012 			chk->send_size -= sizeof(struct sctp_data_chunk);
3013 		}
3014 	}
3015 	SCTP_BUF_NEXT(m_notify) = chk->data;
3016 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3017 	/* Steal off the mbuf */
3018 	chk->data = NULL;
3019 	/*
3020 	 * For this case, we check the actual socket buffer, since the assoc
3021 	 * is going away we don't want to overfill the socket buffer for a
3022 	 * non-reader
3023 	 */
3024 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3025 		sctp_m_freem(m_notify);
3026 		return;
3027 	}
3028 	/* append to socket */
3029 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3030 	    0, 0, 0, 0, 0, 0,
3031 	    m_notify);
3032 	if (control == NULL) {
3033 		/* no memory */
3034 		sctp_m_freem(m_notify);
3035 		return;
3036 	}
3037 	control->spec_flags = M_NOTIFICATION;
3038 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3039 	    control,
3040 	    &stcb->sctp_socket->so_rcv, 1,
3041 	    SCTP_READ_LOCK_NOT_HELD,
3042 	    so_locked);
3043 }
3044 
3045 
3046 static void
3047 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3048     struct sctp_stream_queue_pending *sp, int so_locked
3049 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3050     SCTP_UNUSED
3051 #endif
3052 )
3053 {
3054 	struct mbuf *m_notify;
3055 	struct sctp_send_failed *ssf;
3056 	struct sctp_queued_to_read *control;
3057 	int length;
3058 
3059 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3060 		/* event not enabled */
3061 		return;
3062 	}
3063 	length = sizeof(struct sctp_send_failed) + sp->length;
3064 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3065 	if (m_notify == NULL)
3066 		/* no space left */
3067 		return;
3068 	SCTP_BUF_LEN(m_notify) = 0;
3069 	ssf = mtod(m_notify, struct sctp_send_failed *);
3070 	ssf->ssf_type = SCTP_SEND_FAILED;
3071 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3072 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3073 	else
3074 		ssf->ssf_flags = SCTP_DATA_SENT;
3075 	ssf->ssf_length = length;
3076 	ssf->ssf_error = error;
3077 	/* not exactly what the user sent in, but should be close :) */
3078 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3079 	ssf->ssf_info.sinfo_stream = sp->stream;
3080 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3081 	if (sp->some_taken) {
3082 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3083 	} else {
3084 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3085 	}
3086 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3087 	ssf->ssf_info.sinfo_context = sp->context;
3088 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3089 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3090 	SCTP_BUF_NEXT(m_notify) = sp->data;
3091 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3092 
3093 	/* Steal off the mbuf */
3094 	sp->data = NULL;
3095 	/*
3096 	 * For this case, we check the actual socket buffer, since the assoc
3097 	 * is going away we don't want to overfill the socket buffer for a
3098 	 * non-reader
3099 	 */
3100 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3101 		sctp_m_freem(m_notify);
3102 		return;
3103 	}
3104 	/* append to socket */
3105 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3106 	    0, 0, 0, 0, 0, 0,
3107 	    m_notify);
3108 	if (control == NULL) {
3109 		/* no memory */
3110 		sctp_m_freem(m_notify);
3111 		return;
3112 	}
3113 	control->spec_flags = M_NOTIFICATION;
3114 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3115 	    control,
3116 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3117 }
3118 
3119 
3120 
3121 static void
3122 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3123     uint32_t error)
3124 {
3125 	struct mbuf *m_notify;
3126 	struct sctp_adaptation_event *sai;
3127 	struct sctp_queued_to_read *control;
3128 
3129 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3130 		/* event not enabled */
3131 		return;
3132 	}
3133 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3134 	if (m_notify == NULL)
3135 		/* no space left */
3136 		return;
3137 	SCTP_BUF_LEN(m_notify) = 0;
3138 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3139 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3140 	sai->sai_flags = 0;
3141 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3142 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3143 	sai->sai_assoc_id = sctp_get_associd(stcb);
3144 
3145 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3146 	SCTP_BUF_NEXT(m_notify) = NULL;
3147 
3148 	/* append to socket */
3149 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3150 	    0, 0, 0, 0, 0, 0,
3151 	    m_notify);
3152 	if (control == NULL) {
3153 		/* no memory */
3154 		sctp_m_freem(m_notify);
3155 		return;
3156 	}
3157 	control->length = SCTP_BUF_LEN(m_notify);
3158 	control->spec_flags = M_NOTIFICATION;
3159 	/* not that we need this */
3160 	control->tail_mbuf = m_notify;
3161 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3162 	    control,
3163 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3164 }
3165 
3166 /* This always must be called with the read-queue LOCKED in the INP */
3167 static void
3168 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3169     uint32_t val, int so_locked
3170 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3171     SCTP_UNUSED
3172 #endif
3173 )
3174 {
3175 	struct mbuf *m_notify;
3176 	struct sctp_pdapi_event *pdapi;
3177 	struct sctp_queued_to_read *control;
3178 	struct sockbuf *sb;
3179 
3180 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3181 		/* event not enabled */
3182 		return;
3183 	}
3184 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3185 	if (m_notify == NULL)
3186 		/* no space left */
3187 		return;
3188 	SCTP_BUF_LEN(m_notify) = 0;
3189 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3190 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3191 	pdapi->pdapi_flags = 0;
3192 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3193 	pdapi->pdapi_indication = error;
3194 	pdapi->pdapi_stream = (val >> 16);
3195 	pdapi->pdapi_seq = (val & 0x0000ffff);
3196 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3197 
3198 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3199 	SCTP_BUF_NEXT(m_notify) = NULL;
3200 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3201 	    0, 0, 0, 0, 0, 0,
3202 	    m_notify);
3203 	if (control == NULL) {
3204 		/* no memory */
3205 		sctp_m_freem(m_notify);
3206 		return;
3207 	}
3208 	control->spec_flags = M_NOTIFICATION;
3209 	control->length = SCTP_BUF_LEN(m_notify);
3210 	/* not that we need this */
3211 	control->tail_mbuf = m_notify;
3212 	control->held_length = 0;
3213 	control->length = 0;
3214 	sb = &stcb->sctp_socket->so_rcv;
3215 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3216 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3217 	}
3218 	sctp_sballoc(stcb, sb, m_notify);
3219 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3220 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3221 	}
3222 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3223 	control->end_added = 1;
3224 	if (stcb->asoc.control_pdapi)
3225 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3226 	else {
3227 		/* we really should not see this case */
3228 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3229 	}
3230 	if (stcb->sctp_ep && stcb->sctp_socket) {
3231 		/* This should always be the case */
3232 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3233 		struct socket *so;
3234 
3235 		so = SCTP_INP_SO(stcb->sctp_ep);
3236 		if (!so_locked) {
3237 			atomic_add_int(&stcb->asoc.refcnt, 1);
3238 			SCTP_TCB_UNLOCK(stcb);
3239 			SCTP_SOCKET_LOCK(so, 1);
3240 			SCTP_TCB_LOCK(stcb);
3241 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3242 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3243 				SCTP_SOCKET_UNLOCK(so, 1);
3244 				return;
3245 			}
3246 		}
3247 #endif
3248 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3249 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3250 		if (!so_locked) {
3251 			SCTP_SOCKET_UNLOCK(so, 1);
3252 		}
3253 #endif
3254 	}
3255 }
3256 
3257 static void
3258 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3259 {
3260 	struct mbuf *m_notify;
3261 	struct sctp_shutdown_event *sse;
3262 	struct sctp_queued_to_read *control;
3263 
3264 	/*
3265 	 * For TCP model AND UDP connected sockets we will send an error up
3266 	 * when an SHUTDOWN completes
3267 	 */
3268 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3269 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3270 		/* mark socket closed for read/write and wakeup! */
3271 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3272 		struct socket *so;
3273 
3274 		so = SCTP_INP_SO(stcb->sctp_ep);
3275 		atomic_add_int(&stcb->asoc.refcnt, 1);
3276 		SCTP_TCB_UNLOCK(stcb);
3277 		SCTP_SOCKET_LOCK(so, 1);
3278 		SCTP_TCB_LOCK(stcb);
3279 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3280 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3281 			SCTP_SOCKET_UNLOCK(so, 1);
3282 			return;
3283 		}
3284 #endif
3285 		socantsendmore(stcb->sctp_socket);
3286 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3287 		SCTP_SOCKET_UNLOCK(so, 1);
3288 #endif
3289 	}
3290 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3291 		/* event not enabled */
3292 		return;
3293 	}
3294 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3295 	if (m_notify == NULL)
3296 		/* no space left */
3297 		return;
3298 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3299 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3300 	sse->sse_flags = 0;
3301 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3302 	sse->sse_assoc_id = sctp_get_associd(stcb);
3303 
3304 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3305 	SCTP_BUF_NEXT(m_notify) = NULL;
3306 
3307 	/* append to socket */
3308 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3309 	    0, 0, 0, 0, 0, 0,
3310 	    m_notify);
3311 	if (control == NULL) {
3312 		/* no memory */
3313 		sctp_m_freem(m_notify);
3314 		return;
3315 	}
3316 	control->spec_flags = M_NOTIFICATION;
3317 	control->length = SCTP_BUF_LEN(m_notify);
3318 	/* not that we need this */
3319 	control->tail_mbuf = m_notify;
3320 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3321 	    control,
3322 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3323 }
3324 
3325 static void
3326 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3327     int so_locked
3328 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3329     SCTP_UNUSED
3330 #endif
3331 )
3332 {
3333 	struct mbuf *m_notify;
3334 	struct sctp_sender_dry_event *event;
3335 	struct sctp_queued_to_read *control;
3336 
3337 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3338 		/* event not enabled */
3339 		return;
3340 	}
3341 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3342 	if (m_notify == NULL) {
3343 		/* no space left */
3344 		return;
3345 	}
3346 	SCTP_BUF_LEN(m_notify) = 0;
3347 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3348 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3349 	event->sender_dry_flags = 0;
3350 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3351 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3352 
3353 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3354 	SCTP_BUF_NEXT(m_notify) = NULL;
3355 
3356 	/* append to socket */
3357 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3358 	    0, 0, 0, 0, 0, 0, m_notify);
3359 	if (control == NULL) {
3360 		/* no memory */
3361 		sctp_m_freem(m_notify);
3362 		return;
3363 	}
3364 	control->length = SCTP_BUF_LEN(m_notify);
3365 	control->spec_flags = M_NOTIFICATION;
3366 	/* not that we need this */
3367 	control->tail_mbuf = m_notify;
3368 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3369 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3370 }
3371 
3372 
3373 static void
3374 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3375 {
3376 	struct mbuf *m_notify;
3377 	struct sctp_queued_to_read *control;
3378 	struct sctp_stream_reset_event *strreset;
3379 	int len;
3380 
3381 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3382 		/* event not enabled */
3383 		return;
3384 	}
3385 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3386 	if (m_notify == NULL)
3387 		/* no space left */
3388 		return;
3389 	SCTP_BUF_LEN(m_notify) = 0;
3390 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3391 	if (len > M_TRAILINGSPACE(m_notify)) {
3392 		/* never enough room */
3393 		sctp_m_freem(m_notify);
3394 		return;
3395 	}
3396 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3397 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3398 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3399 	strreset->strreset_length = len;
3400 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3401 	strreset->strreset_list[0] = number_entries;
3402 
3403 	SCTP_BUF_LEN(m_notify) = len;
3404 	SCTP_BUF_NEXT(m_notify) = NULL;
3405 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3406 		/* no space */
3407 		sctp_m_freem(m_notify);
3408 		return;
3409 	}
3410 	/* append to socket */
3411 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3412 	    0, 0, 0, 0, 0, 0,
3413 	    m_notify);
3414 	if (control == NULL) {
3415 		/* no memory */
3416 		sctp_m_freem(m_notify);
3417 		return;
3418 	}
3419 	control->spec_flags = M_NOTIFICATION;
3420 	control->length = SCTP_BUF_LEN(m_notify);
3421 	/* not that we need this */
3422 	control->tail_mbuf = m_notify;
3423 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3424 	    control,
3425 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3426 }
3427 
3428 
3429 static void
3430 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3431     int number_entries, uint16_t * list, int flag)
3432 {
3433 	struct mbuf *m_notify;
3434 	struct sctp_queued_to_read *control;
3435 	struct sctp_stream_reset_event *strreset;
3436 	int len;
3437 
3438 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3439 		/* event not enabled */
3440 		return;
3441 	}
3442 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3443 	if (m_notify == NULL)
3444 		/* no space left */
3445 		return;
3446 	SCTP_BUF_LEN(m_notify) = 0;
3447 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3448 	if (len > M_TRAILINGSPACE(m_notify)) {
3449 		/* never enough room */
3450 		sctp_m_freem(m_notify);
3451 		return;
3452 	}
3453 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3454 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3455 	if (number_entries == 0) {
3456 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3457 	} else {
3458 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3459 	}
3460 	strreset->strreset_length = len;
3461 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3462 	if (number_entries) {
3463 		int i;
3464 
3465 		for (i = 0; i < number_entries; i++) {
3466 			strreset->strreset_list[i] = ntohs(list[i]);
3467 		}
3468 	}
3469 	SCTP_BUF_LEN(m_notify) = len;
3470 	SCTP_BUF_NEXT(m_notify) = NULL;
3471 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3472 		/* no space */
3473 		sctp_m_freem(m_notify);
3474 		return;
3475 	}
3476 	/* append to socket */
3477 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3478 	    0, 0, 0, 0, 0, 0,
3479 	    m_notify);
3480 	if (control == NULL) {
3481 		/* no memory */
3482 		sctp_m_freem(m_notify);
3483 		return;
3484 	}
3485 	control->spec_flags = M_NOTIFICATION;
3486 	control->length = SCTP_BUF_LEN(m_notify);
3487 	/* not that we need this */
3488 	control->tail_mbuf = m_notify;
3489 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3490 	    control,
3491 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3492 }
3493 
3494 
3495 void
3496 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3497     uint32_t error, void *data, int so_locked
3498 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3499     SCTP_UNUSED
3500 #endif
3501 )
3502 {
3503 	if ((stcb == NULL) ||
3504 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3505 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3506 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3507 		/* If the socket is gone we are out of here */
3508 		return;
3509 	}
3510 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3511 		return;
3512 	}
3513 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3514 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3515 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3516 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3517 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3518 			/* Don't report these in front states */
3519 			return;
3520 		}
3521 	}
3522 	switch (notification) {
3523 	case SCTP_NOTIFY_ASSOC_UP:
3524 		if (stcb->asoc.assoc_up_sent == 0) {
3525 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3526 			stcb->asoc.assoc_up_sent = 1;
3527 		}
3528 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3529 			sctp_notify_adaptation_layer(stcb, error);
3530 		}
3531 		if (stcb->asoc.peer_supports_auth == 0) {
3532 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3533 			    NULL, so_locked);
3534 		}
3535 		break;
3536 	case SCTP_NOTIFY_ASSOC_DOWN:
3537 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3538 		break;
3539 	case SCTP_NOTIFY_INTERFACE_DOWN:
3540 		{
3541 			struct sctp_nets *net;
3542 
3543 			net = (struct sctp_nets *)data;
3544 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3545 			    (struct sockaddr *)&net->ro._l_addr, error);
3546 			break;
3547 		}
3548 	case SCTP_NOTIFY_INTERFACE_UP:
3549 		{
3550 			struct sctp_nets *net;
3551 
3552 			net = (struct sctp_nets *)data;
3553 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3554 			    (struct sockaddr *)&net->ro._l_addr, error);
3555 			break;
3556 		}
3557 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3558 		{
3559 			struct sctp_nets *net;
3560 
3561 			net = (struct sctp_nets *)data;
3562 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3563 			    (struct sockaddr *)&net->ro._l_addr, error);
3564 			break;
3565 		}
3566 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3567 		sctp_notify_send_failed2(stcb, error,
3568 		    (struct sctp_stream_queue_pending *)data, so_locked);
3569 		break;
3570 	case SCTP_NOTIFY_DG_FAIL:
3571 		sctp_notify_send_failed(stcb, error,
3572 		    (struct sctp_tmit_chunk *)data, so_locked);
3573 		break;
3574 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3575 		{
3576 			uint32_t val;
3577 
3578 			val = *((uint32_t *) data);
3579 
3580 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3581 			break;
3582 		}
3583 	case SCTP_NOTIFY_STRDATA_ERR:
3584 		break;
3585 	case SCTP_NOTIFY_ASSOC_ABORTED:
3586 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3587 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3588 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3589 		} else {
3590 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3591 		}
3592 		break;
3593 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3594 		break;
3595 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3596 		break;
3597 	case SCTP_NOTIFY_ASSOC_RESTART:
3598 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3599 		if (stcb->asoc.peer_supports_auth == 0) {
3600 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3601 			    NULL, so_locked);
3602 		}
3603 		break;
3604 	case SCTP_NOTIFY_HB_RESP:
3605 		break;
3606 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3607 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3608 		break;
3609 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3610 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3611 		break;
3612 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3613 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3614 		break;
3615 
3616 	case SCTP_NOTIFY_STR_RESET_SEND:
3617 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3618 		break;
3619 	case SCTP_NOTIFY_STR_RESET_RECV:
3620 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3621 		break;
3622 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3623 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3624 		break;
3625 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3626 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3627 		break;
3628 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3629 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3630 		    error);
3631 		break;
3632 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3633 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3634 		    error);
3635 		break;
3636 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3637 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3638 		    error);
3639 		break;
3640 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3641 		break;
3642 	case SCTP_NOTIFY_ASCONF_FAILED:
3643 		break;
3644 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3645 		sctp_notify_shutdown_event(stcb);
3646 		break;
3647 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3648 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3649 		    (uint16_t) (uintptr_t) data,
3650 		    so_locked);
3651 		break;
3652 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3653 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3654 		    (uint16_t) (uintptr_t) data,
3655 		    so_locked);
3656 		break;
3657 	case SCTP_NOTIFY_NO_PEER_AUTH:
3658 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3659 		    (uint16_t) (uintptr_t) data,
3660 		    so_locked);
3661 		break;
3662 	case SCTP_NOTIFY_SENDER_DRY:
3663 		sctp_notify_sender_dry_event(stcb, so_locked);
3664 		break;
3665 	default:
3666 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3667 		    __FUNCTION__, notification, notification);
3668 		break;
3669 	}			/* end switch */
3670 }
3671 
3672 void
3673 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3674 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3675     SCTP_UNUSED
3676 #endif
3677 )
3678 {
3679 	struct sctp_association *asoc;
3680 	struct sctp_stream_out *outs;
3681 	struct sctp_tmit_chunk *chk;
3682 	struct sctp_stream_queue_pending *sp;
3683 	int i;
3684 
3685 	asoc = &stcb->asoc;
3686 
3687 	if (stcb == NULL) {
3688 		return;
3689 	}
3690 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3691 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3692 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3693 		return;
3694 	}
3695 	/* now through all the gunk freeing chunks */
3696 	if (holds_lock == 0) {
3697 		SCTP_TCB_SEND_LOCK(stcb);
3698 	}
3699 	/* sent queue SHOULD be empty */
3700 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3701 		chk = TAILQ_FIRST(&asoc->sent_queue);
3702 		while (chk) {
3703 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3704 			asoc->sent_queue_cnt--;
3705 			if (chk->data != NULL) {
3706 				sctp_free_bufspace(stcb, asoc, chk, 1);
3707 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3708 				    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3709 				if (chk->data) {
3710 					sctp_m_freem(chk->data);
3711 					chk->data = NULL;
3712 				}
3713 			}
3714 			sctp_free_a_chunk(stcb, chk);
3715 			/* sa_ignore FREED_MEMORY */
3716 			chk = TAILQ_FIRST(&asoc->sent_queue);
3717 		}
3718 	}
3719 	/* pending send queue SHOULD be empty */
3720 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3721 		chk = TAILQ_FIRST(&asoc->send_queue);
3722 		while (chk) {
3723 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3724 			asoc->send_queue_cnt--;
3725 			if (chk->data != NULL) {
3726 				sctp_free_bufspace(stcb, asoc, chk, 1);
3727 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3728 				    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3729 				if (chk->data) {
3730 					sctp_m_freem(chk->data);
3731 					chk->data = NULL;
3732 				}
3733 			}
3734 			sctp_free_a_chunk(stcb, chk);
3735 			/* sa_ignore FREED_MEMORY */
3736 			chk = TAILQ_FIRST(&asoc->send_queue);
3737 		}
3738 	}
3739 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3740 		/* For each stream */
3741 		outs = &stcb->asoc.strmout[i];
3742 		/* clean up any sends there */
3743 		stcb->asoc.locked_on_sending = NULL;
3744 		sp = TAILQ_FIRST(&outs->outqueue);
3745 		while (sp) {
3746 			stcb->asoc.stream_queue_cnt--;
3747 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3748 			sctp_free_spbufspace(stcb, asoc, sp);
3749 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3750 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3751 			if (sp->data) {
3752 				sctp_m_freem(sp->data);
3753 				sp->data = NULL;
3754 			}
3755 			if (sp->net)
3756 				sctp_free_remote_addr(sp->net);
3757 			sp->net = NULL;
3758 			/* Free the chunk */
3759 			sctp_free_a_strmoq(stcb, sp);
3760 			/* sa_ignore FREED_MEMORY */
3761 			sp = TAILQ_FIRST(&outs->outqueue);
3762 		}
3763 	}
3764 
3765 	if (holds_lock == 0) {
3766 		SCTP_TCB_SEND_UNLOCK(stcb);
3767 	}
3768 }
3769 
3770 void
3771 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3772 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3773     SCTP_UNUSED
3774 #endif
3775 )
3776 {
3777 
3778 	if (stcb == NULL) {
3779 		return;
3780 	}
3781 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3782 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3783 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3784 		return;
3785 	}
3786 	/* Tell them we lost the asoc */
3787 	sctp_report_all_outbound(stcb, 1, so_locked);
3788 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3789 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3790 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3791 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3792 	}
3793 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3794 }
3795 
3796 void
3797 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3798     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3799     uint32_t vrf_id, uint16_t port)
3800 {
3801 	uint32_t vtag;
3802 
3803 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3804 	struct socket *so;
3805 
3806 #endif
3807 
3808 	vtag = 0;
3809 	if (stcb != NULL) {
3810 		/* We have a TCB to abort, send notification too */
3811 		vtag = stcb->asoc.peer_vtag;
3812 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3813 		/* get the assoc vrf id and table id */
3814 		vrf_id = stcb->asoc.vrf_id;
3815 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3816 	}
3817 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3818 	if (stcb != NULL) {
3819 		/* Ok, now lets free it */
3820 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3821 		so = SCTP_INP_SO(inp);
3822 		atomic_add_int(&stcb->asoc.refcnt, 1);
3823 		SCTP_TCB_UNLOCK(stcb);
3824 		SCTP_SOCKET_LOCK(so, 1);
3825 		SCTP_TCB_LOCK(stcb);
3826 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3827 #endif
3828 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3829 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3830 		SCTP_SOCKET_UNLOCK(so, 1);
3831 #endif
3832 	} else {
3833 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3834 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3835 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3836 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3837 			}
3838 		}
3839 	}
3840 }
3841 
3842 #ifdef SCTP_ASOCLOG_OF_TSNS
3843 void
3844 sctp_print_out_track_log(struct sctp_tcb *stcb)
3845 {
3846 #ifdef NOSIY_PRINTS
3847 	int i;
3848 
3849 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3850 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3851 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3852 		SCTP_PRINTF("None rcvd\n");
3853 		goto none_in;
3854 	}
3855 	if (stcb->asoc.tsn_in_wrapped) {
3856 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3857 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3858 			    stcb->asoc.in_tsnlog[i].tsn,
3859 			    stcb->asoc.in_tsnlog[i].strm,
3860 			    stcb->asoc.in_tsnlog[i].seq,
3861 			    stcb->asoc.in_tsnlog[i].flgs,
3862 			    stcb->asoc.in_tsnlog[i].sz);
3863 		}
3864 	}
3865 	if (stcb->asoc.tsn_in_at) {
3866 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3867 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3868 			    stcb->asoc.in_tsnlog[i].tsn,
3869 			    stcb->asoc.in_tsnlog[i].strm,
3870 			    stcb->asoc.in_tsnlog[i].seq,
3871 			    stcb->asoc.in_tsnlog[i].flgs,
3872 			    stcb->asoc.in_tsnlog[i].sz);
3873 		}
3874 	}
3875 none_in:
3876 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3877 	if ((stcb->asoc.tsn_out_at == 0) &&
3878 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3879 		SCTP_PRINTF("None sent\n");
3880 	}
3881 	if (stcb->asoc.tsn_out_wrapped) {
3882 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3883 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3884 			    stcb->asoc.out_tsnlog[i].tsn,
3885 			    stcb->asoc.out_tsnlog[i].strm,
3886 			    stcb->asoc.out_tsnlog[i].seq,
3887 			    stcb->asoc.out_tsnlog[i].flgs,
3888 			    stcb->asoc.out_tsnlog[i].sz);
3889 		}
3890 	}
3891 	if (stcb->asoc.tsn_out_at) {
3892 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3893 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3894 			    stcb->asoc.out_tsnlog[i].tsn,
3895 			    stcb->asoc.out_tsnlog[i].strm,
3896 			    stcb->asoc.out_tsnlog[i].seq,
3897 			    stcb->asoc.out_tsnlog[i].flgs,
3898 			    stcb->asoc.out_tsnlog[i].sz);
3899 		}
3900 	}
3901 #endif
3902 }
3903 
3904 #endif
3905 
3906 void
3907 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3908     int error, struct mbuf *op_err,
3909     int so_locked
3910 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3911     SCTP_UNUSED
3912 #endif
3913 )
3914 {
3915 	uint32_t vtag;
3916 
3917 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3918 	struct socket *so;
3919 
3920 #endif
3921 
3922 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3923 	so = SCTP_INP_SO(inp);
3924 #endif
3925 	if (stcb == NULL) {
3926 		/* Got to have a TCB */
3927 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3928 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3929 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3930 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3931 			}
3932 		}
3933 		return;
3934 	} else {
3935 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3936 	}
3937 	vtag = stcb->asoc.peer_vtag;
3938 	/* notify the ulp */
3939 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3940 		sctp_abort_notification(stcb, error, so_locked);
3941 	/* notify the peer */
3942 #if defined(SCTP_PANIC_ON_ABORT)
3943 	panic("aborting an association");
3944 #endif
3945 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3946 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3947 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3948 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3949 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3950 	}
3951 	/* now free the asoc */
3952 #ifdef SCTP_ASOCLOG_OF_TSNS
3953 	sctp_print_out_track_log(stcb);
3954 #endif
3955 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3956 	if (!so_locked) {
3957 		atomic_add_int(&stcb->asoc.refcnt, 1);
3958 		SCTP_TCB_UNLOCK(stcb);
3959 		SCTP_SOCKET_LOCK(so, 1);
3960 		SCTP_TCB_LOCK(stcb);
3961 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3962 	}
3963 #endif
3964 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3965 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3966 	if (!so_locked) {
3967 		SCTP_SOCKET_UNLOCK(so, 1);
3968 	}
3969 #endif
3970 }
3971 
3972 void
3973 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3974     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3975 {
3976 	struct sctp_chunkhdr *ch, chunk_buf;
3977 	unsigned int chk_length;
3978 
3979 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3980 	/* Generate a TO address for future reference */
3981 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3982 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3983 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3984 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3985 		}
3986 	}
3987 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3988 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3989 	while (ch != NULL) {
3990 		chk_length = ntohs(ch->chunk_length);
3991 		if (chk_length < sizeof(*ch)) {
3992 			/* break to abort land */
3993 			break;
3994 		}
3995 		switch (ch->chunk_type) {
3996 		case SCTP_COOKIE_ECHO:
3997 			/* We hit here only if the assoc is being freed */
3998 			return;
3999 		case SCTP_PACKET_DROPPED:
4000 			/* we don't respond to pkt-dropped */
4001 			return;
4002 		case SCTP_ABORT_ASSOCIATION:
4003 			/* we don't respond with an ABORT to an ABORT */
4004 			return;
4005 		case SCTP_SHUTDOWN_COMPLETE:
4006 			/*
4007 			 * we ignore it since we are not waiting for it and
4008 			 * peer is gone
4009 			 */
4010 			return;
4011 		case SCTP_SHUTDOWN_ACK:
4012 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
4013 			return;
4014 		default:
4015 			break;
4016 		}
4017 		offset += SCTP_SIZE32(chk_length);
4018 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4019 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4020 	}
4021 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4022 }
4023 
4024 /*
4025  * check the inbound datagram to make sure there is not an abort inside it,
4026  * if there is return 1, else return 0.
4027  */
4028 int
4029 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4030 {
4031 	struct sctp_chunkhdr *ch;
4032 	struct sctp_init_chunk *init_chk, chunk_buf;
4033 	int offset;
4034 	unsigned int chk_length;
4035 
4036 	offset = iphlen + sizeof(struct sctphdr);
4037 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4038 	    (uint8_t *) & chunk_buf);
4039 	while (ch != NULL) {
4040 		chk_length = ntohs(ch->chunk_length);
4041 		if (chk_length < sizeof(*ch)) {
4042 			/* packet is probably corrupt */
4043 			break;
4044 		}
4045 		/* we seem to be ok, is it an abort? */
4046 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4047 			/* yep, tell them */
4048 			return (1);
4049 		}
4050 		if (ch->chunk_type == SCTP_INITIATION) {
4051 			/* need to update the Vtag */
4052 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4053 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4054 			if (init_chk != NULL) {
4055 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4056 			}
4057 		}
4058 		/* Nope, move to the next chunk */
4059 		offset += SCTP_SIZE32(chk_length);
4060 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4061 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4062 	}
4063 	return (0);
4064 }
4065 
4066 /*
4067  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4068  * set (i.e. it's 0) so, create this function to compare link local scopes
4069  */
4070 #ifdef INET6
4071 uint32_t
4072 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4073 {
4074 	struct sockaddr_in6 a, b;
4075 
4076 	/* save copies */
4077 	a = *addr1;
4078 	b = *addr2;
4079 
4080 	if (a.sin6_scope_id == 0)
4081 		if (sa6_recoverscope(&a)) {
4082 			/* can't get scope, so can't match */
4083 			return (0);
4084 		}
4085 	if (b.sin6_scope_id == 0)
4086 		if (sa6_recoverscope(&b)) {
4087 			/* can't get scope, so can't match */
4088 			return (0);
4089 		}
4090 	if (a.sin6_scope_id != b.sin6_scope_id)
4091 		return (0);
4092 
4093 	return (1);
4094 }
4095 
4096 /*
4097  * returns a sockaddr_in6 with embedded scope recovered and removed
4098  */
4099 struct sockaddr_in6 *
4100 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4101 {
4102 	/* check and strip embedded scope junk */
4103 	if (addr->sin6_family == AF_INET6) {
4104 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4105 			if (addr->sin6_scope_id == 0) {
4106 				*store = *addr;
4107 				if (!sa6_recoverscope(store)) {
4108 					/* use the recovered scope */
4109 					addr = store;
4110 				}
4111 			} else {
4112 				/* else, return the original "to" addr */
4113 				in6_clearscope(&addr->sin6_addr);
4114 			}
4115 		}
4116 	}
4117 	return (addr);
4118 }
4119 
4120 #endif
4121 
4122 /*
4123  * are the two addresses the same?  currently a "scopeless" check returns: 1
4124  * if same, 0 if not
4125  */
4126 int
4127 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4128 {
4129 
4130 	/* must be valid */
4131 	if (sa1 == NULL || sa2 == NULL)
4132 		return (0);
4133 
4134 	/* must be the same family */
4135 	if (sa1->sa_family != sa2->sa_family)
4136 		return (0);
4137 
4138 	switch (sa1->sa_family) {
4139 #ifdef INET6
4140 	case AF_INET6:
4141 		{
4142 			/* IPv6 addresses */
4143 			struct sockaddr_in6 *sin6_1, *sin6_2;
4144 
4145 			sin6_1 = (struct sockaddr_in6 *)sa1;
4146 			sin6_2 = (struct sockaddr_in6 *)sa2;
4147 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4148 			    sin6_2));
4149 		}
4150 #endif
4151 	case AF_INET:
4152 		{
4153 			/* IPv4 addresses */
4154 			struct sockaddr_in *sin_1, *sin_2;
4155 
4156 			sin_1 = (struct sockaddr_in *)sa1;
4157 			sin_2 = (struct sockaddr_in *)sa2;
4158 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4159 		}
4160 	default:
4161 		/* we don't do these... */
4162 		return (0);
4163 	}
4164 }
4165 
4166 void
4167 sctp_print_address(struct sockaddr *sa)
4168 {
4169 #ifdef INET6
4170 	char ip6buf[INET6_ADDRSTRLEN];
4171 
4172 	ip6buf[0] = 0;
4173 #endif
4174 
4175 	switch (sa->sa_family) {
4176 #ifdef INET6
4177 	case AF_INET6:
4178 		{
4179 			struct sockaddr_in6 *sin6;
4180 
4181 			sin6 = (struct sockaddr_in6 *)sa;
4182 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4183 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4184 			    ntohs(sin6->sin6_port),
4185 			    sin6->sin6_scope_id);
4186 			break;
4187 		}
4188 #endif
4189 	case AF_INET:
4190 		{
4191 			struct sockaddr_in *sin;
4192 			unsigned char *p;
4193 
4194 			sin = (struct sockaddr_in *)sa;
4195 			p = (unsigned char *)&sin->sin_addr;
4196 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4197 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4198 			break;
4199 		}
4200 	default:
4201 		SCTP_PRINTF("?\n");
4202 		break;
4203 	}
4204 }
4205 
4206 void
4207 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4208 {
4209 	switch (iph->ip_v) {
4210 		case IPVERSION:
4211 		{
4212 			struct sockaddr_in lsa, fsa;
4213 
4214 			bzero(&lsa, sizeof(lsa));
4215 			lsa.sin_len = sizeof(lsa);
4216 			lsa.sin_family = AF_INET;
4217 			lsa.sin_addr = iph->ip_src;
4218 			lsa.sin_port = sh->src_port;
4219 			bzero(&fsa, sizeof(fsa));
4220 			fsa.sin_len = sizeof(fsa);
4221 			fsa.sin_family = AF_INET;
4222 			fsa.sin_addr = iph->ip_dst;
4223 			fsa.sin_port = sh->dest_port;
4224 			SCTP_PRINTF("src: ");
4225 			sctp_print_address((struct sockaddr *)&lsa);
4226 			SCTP_PRINTF("dest: ");
4227 			sctp_print_address((struct sockaddr *)&fsa);
4228 			break;
4229 		}
4230 #ifdef INET6
4231 	case IPV6_VERSION >> 4:
4232 		{
4233 			struct ip6_hdr *ip6;
4234 			struct sockaddr_in6 lsa6, fsa6;
4235 
4236 			ip6 = (struct ip6_hdr *)iph;
4237 			bzero(&lsa6, sizeof(lsa6));
4238 			lsa6.sin6_len = sizeof(lsa6);
4239 			lsa6.sin6_family = AF_INET6;
4240 			lsa6.sin6_addr = ip6->ip6_src;
4241 			lsa6.sin6_port = sh->src_port;
4242 			bzero(&fsa6, sizeof(fsa6));
4243 			fsa6.sin6_len = sizeof(fsa6);
4244 			fsa6.sin6_family = AF_INET6;
4245 			fsa6.sin6_addr = ip6->ip6_dst;
4246 			fsa6.sin6_port = sh->dest_port;
4247 			SCTP_PRINTF("src: ");
4248 			sctp_print_address((struct sockaddr *)&lsa6);
4249 			SCTP_PRINTF("dest: ");
4250 			sctp_print_address((struct sockaddr *)&fsa6);
4251 			break;
4252 		}
4253 #endif
4254 	default:
4255 		/* TSNH */
4256 		break;
4257 	}
4258 }
4259 
4260 void
4261 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4262     struct sctp_inpcb *new_inp,
4263     struct sctp_tcb *stcb,
4264     int waitflags)
4265 {
4266 	/*
4267 	 * go through our old INP and pull off any control structures that
4268 	 * belong to stcb and move then to the new inp.
4269 	 */
4270 	struct socket *old_so, *new_so;
4271 	struct sctp_queued_to_read *control, *nctl;
4272 	struct sctp_readhead tmp_queue;
4273 	struct mbuf *m;
4274 	int error = 0;
4275 
4276 	old_so = old_inp->sctp_socket;
4277 	new_so = new_inp->sctp_socket;
4278 	TAILQ_INIT(&tmp_queue);
4279 	error = sblock(&old_so->so_rcv, waitflags);
4280 	if (error) {
4281 		/*
4282 		 * Gak, can't get sblock, we have a problem. data will be
4283 		 * left stranded.. and we don't dare look at it since the
4284 		 * other thread may be reading something. Oh well, its a
4285 		 * screwed up app that does a peeloff OR a accept while
4286 		 * reading from the main socket... actually its only the
4287 		 * peeloff() case, since I think read will fail on a
4288 		 * listening socket..
4289 		 */
4290 		return;
4291 	}
4292 	/* lock the socket buffers */
4293 	SCTP_INP_READ_LOCK(old_inp);
4294 	control = TAILQ_FIRST(&old_inp->read_queue);
4295 	/* Pull off all for out target stcb */
4296 	while (control) {
4297 		nctl = TAILQ_NEXT(control, next);
4298 		if (control->stcb == stcb) {
4299 			/* remove it we want it */
4300 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4301 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4302 			m = control->data;
4303 			while (m) {
4304 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4305 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4306 				}
4307 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4308 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4309 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4310 				}
4311 				m = SCTP_BUF_NEXT(m);
4312 			}
4313 		}
4314 		control = nctl;
4315 	}
4316 	SCTP_INP_READ_UNLOCK(old_inp);
4317 	/* Remove the sb-lock on the old socket */
4318 
4319 	sbunlock(&old_so->so_rcv);
4320 	/* Now we move them over to the new socket buffer */
4321 	control = TAILQ_FIRST(&tmp_queue);
4322 	SCTP_INP_READ_LOCK(new_inp);
4323 	while (control) {
4324 		nctl = TAILQ_NEXT(control, next);
4325 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4326 		m = control->data;
4327 		while (m) {
4328 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4329 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4330 			}
4331 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4332 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4333 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4334 			}
4335 			m = SCTP_BUF_NEXT(m);
4336 		}
4337 		control = nctl;
4338 	}
4339 	SCTP_INP_READ_UNLOCK(new_inp);
4340 }
4341 
4342 void
4343 sctp_add_to_readq(struct sctp_inpcb *inp,
4344     struct sctp_tcb *stcb,
4345     struct sctp_queued_to_read *control,
4346     struct sockbuf *sb,
4347     int end,
4348     int inp_read_lock_held,
4349     int so_locked
4350 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4351     SCTP_UNUSED
4352 #endif
4353 )
4354 {
4355 	/*
4356 	 * Here we must place the control on the end of the socket read
4357 	 * queue AND increment sb_cc so that select will work properly on
4358 	 * read.
4359 	 */
4360 	struct mbuf *m, *prev = NULL;
4361 
4362 	if (inp == NULL) {
4363 		/* Gak, TSNH!! */
4364 #ifdef INVARIANTS
4365 		panic("Gak, inp NULL on add_to_readq");
4366 #endif
4367 		return;
4368 	}
4369 	if (inp_read_lock_held == 0)
4370 		SCTP_INP_READ_LOCK(inp);
4371 	if (!(control->spec_flags & M_NOTIFICATION)) {
4372 		atomic_add_int(&inp->total_recvs, 1);
4373 		if (!control->do_not_ref_stcb) {
4374 			atomic_add_int(&stcb->total_recvs, 1);
4375 		}
4376 	}
4377 	m = control->data;
4378 	control->held_length = 0;
4379 	control->length = 0;
4380 	while (m) {
4381 		if (SCTP_BUF_LEN(m) == 0) {
4382 			/* Skip mbufs with NO length */
4383 			if (prev == NULL) {
4384 				/* First one */
4385 				control->data = sctp_m_free(m);
4386 				m = control->data;
4387 			} else {
4388 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4389 				m = SCTP_BUF_NEXT(prev);
4390 			}
4391 			if (m == NULL) {
4392 				control->tail_mbuf = prev;
4393 			}
4394 			continue;
4395 		}
4396 		prev = m;
4397 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4398 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4399 		}
4400 		sctp_sballoc(stcb, sb, m);
4401 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4402 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4403 		}
4404 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4405 		m = SCTP_BUF_NEXT(m);
4406 	}
4407 	if (prev != NULL) {
4408 		control->tail_mbuf = prev;
4409 	} else {
4410 		/* Everything got collapsed out?? */
4411 		if (inp_read_lock_held == 0)
4412 			SCTP_INP_READ_UNLOCK(inp);
4413 		return;
4414 	}
4415 	if (end) {
4416 		control->end_added = 1;
4417 	}
4418 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4419 	if (inp_read_lock_held == 0)
4420 		SCTP_INP_READ_UNLOCK(inp);
4421 	if (inp && inp->sctp_socket) {
4422 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4423 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4424 		} else {
4425 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4426 			struct socket *so;
4427 
4428 			so = SCTP_INP_SO(inp);
4429 			if (!so_locked) {
4430 				atomic_add_int(&stcb->asoc.refcnt, 1);
4431 				SCTP_TCB_UNLOCK(stcb);
4432 				SCTP_SOCKET_LOCK(so, 1);
4433 				SCTP_TCB_LOCK(stcb);
4434 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4435 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4436 					SCTP_SOCKET_UNLOCK(so, 1);
4437 					return;
4438 				}
4439 			}
4440 #endif
4441 			sctp_sorwakeup(inp, inp->sctp_socket);
4442 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4443 			if (!so_locked) {
4444 				SCTP_SOCKET_UNLOCK(so, 1);
4445 			}
4446 #endif
4447 		}
4448 	}
4449 }
4450 
4451 
4452 int
4453 sctp_append_to_readq(struct sctp_inpcb *inp,
4454     struct sctp_tcb *stcb,
4455     struct sctp_queued_to_read *control,
4456     struct mbuf *m,
4457     int end,
4458     int ctls_cumack,
4459     struct sockbuf *sb)
4460 {
4461 	/*
4462 	 * A partial delivery API event is underway. OR we are appending on
4463 	 * the reassembly queue.
4464 	 *
4465 	 * If PDAPI this means we need to add m to the end of the data.
4466 	 * Increase the length in the control AND increment the sb_cc.
4467 	 * Otherwise sb is NULL and all we need to do is put it at the end
4468 	 * of the mbuf chain.
4469 	 */
4470 	int len = 0;
4471 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4472 
4473 	if (inp) {
4474 		SCTP_INP_READ_LOCK(inp);
4475 	}
4476 	if (control == NULL) {
4477 get_out:
4478 		if (inp) {
4479 			SCTP_INP_READ_UNLOCK(inp);
4480 		}
4481 		return (-1);
4482 	}
4483 	if (control->end_added) {
4484 		/* huh this one is complete? */
4485 		goto get_out;
4486 	}
4487 	mm = m;
4488 	if (mm == NULL) {
4489 		goto get_out;
4490 	}
4491 	while (mm) {
4492 		if (SCTP_BUF_LEN(mm) == 0) {
4493 			/* Skip mbufs with NO lenght */
4494 			if (prev == NULL) {
4495 				/* First one */
4496 				m = sctp_m_free(mm);
4497 				mm = m;
4498 			} else {
4499 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4500 				mm = SCTP_BUF_NEXT(prev);
4501 			}
4502 			continue;
4503 		}
4504 		prev = mm;
4505 		len += SCTP_BUF_LEN(mm);
4506 		if (sb) {
4507 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4508 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4509 			}
4510 			sctp_sballoc(stcb, sb, mm);
4511 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4512 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4513 			}
4514 		}
4515 		mm = SCTP_BUF_NEXT(mm);
4516 	}
4517 	if (prev) {
4518 		tail = prev;
4519 	} else {
4520 		/* Really there should always be a prev */
4521 		if (m == NULL) {
4522 			/* Huh nothing left? */
4523 #ifdef INVARIANTS
4524 			panic("Nothing left to add?");
4525 #else
4526 			goto get_out;
4527 #endif
4528 		}
4529 		tail = m;
4530 	}
4531 	if (control->tail_mbuf) {
4532 		/* append */
4533 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4534 		control->tail_mbuf = tail;
4535 	} else {
4536 		/* nothing there */
4537 #ifdef INVARIANTS
4538 		if (control->data != NULL) {
4539 			panic("This should NOT happen");
4540 		}
4541 #endif
4542 		control->data = m;
4543 		control->tail_mbuf = tail;
4544 	}
4545 	atomic_add_int(&control->length, len);
4546 	if (end) {
4547 		/* message is complete */
4548 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4549 			stcb->asoc.control_pdapi = NULL;
4550 		}
4551 		control->held_length = 0;
4552 		control->end_added = 1;
4553 	}
4554 	if (stcb == NULL) {
4555 		control->do_not_ref_stcb = 1;
4556 	}
4557 	/*
4558 	 * When we are appending in partial delivery, the cum-ack is used
4559 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4560 	 * is populated in the outbound sinfo structure from the true cumack
4561 	 * if the association exists...
4562 	 */
4563 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4564 	if (inp) {
4565 		SCTP_INP_READ_UNLOCK(inp);
4566 	}
4567 	if (inp && inp->sctp_socket) {
4568 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4569 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4570 		} else {
4571 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4572 			struct socket *so;
4573 
4574 			so = SCTP_INP_SO(inp);
4575 			atomic_add_int(&stcb->asoc.refcnt, 1);
4576 			SCTP_TCB_UNLOCK(stcb);
4577 			SCTP_SOCKET_LOCK(so, 1);
4578 			SCTP_TCB_LOCK(stcb);
4579 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4580 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4581 				SCTP_SOCKET_UNLOCK(so, 1);
4582 				return (0);
4583 			}
4584 #endif
4585 			sctp_sorwakeup(inp, inp->sctp_socket);
4586 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4587 			SCTP_SOCKET_UNLOCK(so, 1);
4588 #endif
4589 		}
4590 	}
4591 	return (0);
4592 }
4593 
4594 
4595 
4596 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4597  *************ALTERNATE ROUTING CODE
4598  */
4599 
4600 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4601  *************ALTERNATE ROUTING CODE
4602  */
4603 
4604 struct mbuf *
4605 sctp_generate_invmanparam(int err)
4606 {
4607 	/* Return a MBUF with a invalid mandatory parameter */
4608 	struct mbuf *m;
4609 
4610 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4611 	if (m) {
4612 		struct sctp_paramhdr *ph;
4613 
4614 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4615 		ph = mtod(m, struct sctp_paramhdr *);
4616 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4617 		ph->param_type = htons(err);
4618 	}
4619 	return (m);
4620 }
4621 
4622 #ifdef SCTP_MBCNT_LOGGING
4623 void
4624 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4625     struct sctp_tmit_chunk *tp1, int chk_cnt)
4626 {
4627 	if (tp1->data == NULL) {
4628 		return;
4629 	}
4630 	asoc->chunks_on_out_queue -= chk_cnt;
4631 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4632 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4633 		    asoc->total_output_queue_size,
4634 		    tp1->book_size,
4635 		    0,
4636 		    tp1->mbcnt);
4637 	}
4638 	if (asoc->total_output_queue_size >= tp1->book_size) {
4639 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4640 	} else {
4641 		asoc->total_output_queue_size = 0;
4642 	}
4643 
4644 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4645 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4646 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4647 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4648 		} else {
4649 			stcb->sctp_socket->so_snd.sb_cc = 0;
4650 
4651 		}
4652 	}
4653 }
4654 
4655 #endif
4656 
4657 int
4658 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4659     int reason, int so_locked
4660 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4661     SCTP_UNUSED
4662 #endif
4663 )
4664 {
4665 	struct sctp_stream_out *strq;
4666 	struct sctp_tmit_chunk *chk = NULL;
4667 	struct sctp_stream_queue_pending *sp;
4668 	uint16_t stream = 0, seq = 0;
4669 	uint8_t foundeom = 0;
4670 	int ret_sz = 0;
4671 	int notdone;
4672 	int do_wakeup_routine = 0;
4673 
4674 	stream = tp1->rec.data.stream_number;
4675 	seq = tp1->rec.data.stream_seq;
4676 	do {
4677 		ret_sz += tp1->book_size;
4678 		if (tp1->data != NULL) {
4679 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4680 				sctp_flight_size_decrease(tp1);
4681 				sctp_total_flight_decrease(stcb, tp1);
4682 			}
4683 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4684 			stcb->asoc.peers_rwnd += tp1->send_size;
4685 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4686 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4687 			if (tp1->data) {
4688 				sctp_m_freem(tp1->data);
4689 				tp1->data = NULL;
4690 			}
4691 			do_wakeup_routine = 1;
4692 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4693 				stcb->asoc.sent_queue_cnt_removeable--;
4694 			}
4695 		}
4696 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4697 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4698 		    SCTP_DATA_NOT_FRAG) {
4699 			/* not frag'ed we ae done   */
4700 			notdone = 0;
4701 			foundeom = 1;
4702 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4703 			/* end of frag, we are done */
4704 			notdone = 0;
4705 			foundeom = 1;
4706 		} else {
4707 			/*
4708 			 * Its a begin or middle piece, we must mark all of
4709 			 * it
4710 			 */
4711 			notdone = 1;
4712 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4713 		}
4714 	} while (tp1 && notdone);
4715 	if (foundeom == 0) {
4716 		/*
4717 		 * The multi-part message was scattered across the send and
4718 		 * sent queue.
4719 		 */
4720 next_on_sent:
4721 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4722 		/*
4723 		 * recurse throught the send_queue too, starting at the
4724 		 * beginning.
4725 		 */
4726 		if ((tp1) &&
4727 		    (tp1->rec.data.stream_number == stream) &&
4728 		    (tp1->rec.data.stream_seq == seq)) {
4729 			/*
4730 			 * save to chk in case we have some on stream out
4731 			 * queue. If so and we have an un-transmitted one we
4732 			 * don't have to fudge the TSN.
4733 			 */
4734 			chk = tp1;
4735 			ret_sz += tp1->book_size;
4736 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4737 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4738 			if (tp1->data) {
4739 				sctp_m_freem(tp1->data);
4740 				tp1->data = NULL;
4741 			}
4742 			/* No flight involved here book the size to 0 */
4743 			tp1->book_size = 0;
4744 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4745 				foundeom = 1;
4746 			}
4747 			do_wakeup_routine = 1;
4748 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4749 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4750 			/*
4751 			 * on to the sent queue so we can wait for it to be
4752 			 * passed by.
4753 			 */
4754 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4755 			    sctp_next);
4756 			stcb->asoc.send_queue_cnt--;
4757 			stcb->asoc.sent_queue_cnt++;
4758 			goto next_on_sent;
4759 		}
4760 	}
4761 	if (foundeom == 0) {
4762 		/*
4763 		 * Still no eom found. That means there is stuff left on the
4764 		 * stream out queue.. yuck.
4765 		 */
4766 		strq = &stcb->asoc.strmout[stream];
4767 		SCTP_TCB_SEND_LOCK(stcb);
4768 		sp = TAILQ_FIRST(&strq->outqueue);
4769 		while (sp->strseq <= seq) {
4770 			/* Check if its our SEQ */
4771 			if (sp->strseq == seq) {
4772 				sp->discard_rest = 1;
4773 				/*
4774 				 * We may need to put a chunk on the queue
4775 				 * that holds the TSN that would have been
4776 				 * sent with the LAST bit.
4777 				 */
4778 				if (chk == NULL) {
4779 					/* Yep, we have to */
4780 					sctp_alloc_a_chunk(stcb, chk);
4781 					if (chk == NULL) {
4782 						/*
4783 						 * we are hosed. All we can
4784 						 * do is nothing.. which
4785 						 * will cause an abort if
4786 						 * the peer is paying
4787 						 * attention.
4788 						 */
4789 						goto oh_well;
4790 					}
4791 					memset(chk, 0, sizeof(*chk));
4792 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4793 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4794 					chk->asoc = &stcb->asoc;
4795 					chk->rec.data.stream_seq = sp->strseq;
4796 					chk->rec.data.stream_number = sp->stream;
4797 					chk->rec.data.payloadtype = sp->ppid;
4798 					chk->rec.data.context = sp->context;
4799 					chk->flags = sp->act_flags;
4800 					chk->whoTo = sp->net;
4801 					atomic_add_int(&chk->whoTo->ref_count, 1);
4802 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4803 					stcb->asoc.pr_sctp_cnt++;
4804 					chk->pr_sctp_on = 1;
4805 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4806 					stcb->asoc.sent_queue_cnt++;
4807 					stcb->asoc.pr_sctp_cnt++;
4808 				} else {
4809 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4810 				}
4811 		oh_well:
4812 				if (sp->data) {
4813 					/*
4814 					 * Pull any data to free up the SB
4815 					 * and allow sender to "add more"
4816 					 * whilc we will throw away :-)
4817 					 */
4818 					sctp_free_spbufspace(stcb, &stcb->asoc,
4819 					    sp);
4820 					ret_sz += sp->length;
4821 					do_wakeup_routine = 1;
4822 					sp->some_taken = 1;
4823 					sctp_m_freem(sp->data);
4824 					sp->length = 0;
4825 					sp->data = NULL;
4826 					sp->tail_mbuf = NULL;
4827 				}
4828 				break;
4829 			} else {
4830 				/* Next one please */
4831 				sp = TAILQ_NEXT(sp, next);
4832 			}
4833 		}		/* End while */
4834 		SCTP_TCB_SEND_UNLOCK(stcb);
4835 	}
4836 	if (do_wakeup_routine) {
4837 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4838 		struct socket *so;
4839 
4840 		so = SCTP_INP_SO(stcb->sctp_ep);
4841 		if (!so_locked) {
4842 			atomic_add_int(&stcb->asoc.refcnt, 1);
4843 			SCTP_TCB_UNLOCK(stcb);
4844 			SCTP_SOCKET_LOCK(so, 1);
4845 			SCTP_TCB_LOCK(stcb);
4846 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4847 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4848 				/* assoc was freed while we were unlocked */
4849 				SCTP_SOCKET_UNLOCK(so, 1);
4850 				return (ret_sz);
4851 			}
4852 		}
4853 #endif
4854 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4855 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4856 		if (!so_locked) {
4857 			SCTP_SOCKET_UNLOCK(so, 1);
4858 		}
4859 #endif
4860 	}
4861 	return (ret_sz);
4862 }
4863 
4864 /*
4865  * checks to see if the given address, sa, is one that is currently known by
4866  * the kernel note: can't distinguish the same address on multiple interfaces
4867  * and doesn't handle multiple addresses with different zone/scope id's note:
4868  * ifa_ifwithaddr() compares the entire sockaddr struct
4869  */
4870 struct sctp_ifa *
4871 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4872     int holds_lock)
4873 {
4874 	struct sctp_laddr *laddr;
4875 
4876 	if (holds_lock == 0) {
4877 		SCTP_INP_RLOCK(inp);
4878 	}
4879 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4880 		if (laddr->ifa == NULL)
4881 			continue;
4882 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4883 			continue;
4884 		if (addr->sa_family == AF_INET) {
4885 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4886 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4887 				/* found him. */
4888 				if (holds_lock == 0) {
4889 					SCTP_INP_RUNLOCK(inp);
4890 				}
4891 				return (laddr->ifa);
4892 				break;
4893 			}
4894 		}
4895 #ifdef INET6
4896 		if (addr->sa_family == AF_INET6) {
4897 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4898 			    &laddr->ifa->address.sin6)) {
4899 				/* found him. */
4900 				if (holds_lock == 0) {
4901 					SCTP_INP_RUNLOCK(inp);
4902 				}
4903 				return (laddr->ifa);
4904 				break;
4905 			}
4906 		}
4907 #endif
4908 	}
4909 	if (holds_lock == 0) {
4910 		SCTP_INP_RUNLOCK(inp);
4911 	}
4912 	return (NULL);
4913 }
4914 
4915 uint32_t
4916 sctp_get_ifa_hash_val(struct sockaddr *addr)
4917 {
4918 	if (addr->sa_family == AF_INET) {
4919 		struct sockaddr_in *sin;
4920 
4921 		sin = (struct sockaddr_in *)addr;
4922 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4923 	} else if (addr->sa_family == AF_INET6) {
4924 		struct sockaddr_in6 *sin6;
4925 		uint32_t hash_of_addr;
4926 
4927 		sin6 = (struct sockaddr_in6 *)addr;
4928 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4929 		    sin6->sin6_addr.s6_addr32[1] +
4930 		    sin6->sin6_addr.s6_addr32[2] +
4931 		    sin6->sin6_addr.s6_addr32[3]);
4932 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4933 		return (hash_of_addr);
4934 	}
4935 	return (0);
4936 }
4937 
4938 struct sctp_ifa *
4939 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4940 {
4941 	struct sctp_ifa *sctp_ifap;
4942 	struct sctp_vrf *vrf;
4943 	struct sctp_ifalist *hash_head;
4944 	uint32_t hash_of_addr;
4945 
4946 	if (holds_lock == 0)
4947 		SCTP_IPI_ADDR_RLOCK();
4948 
4949 	vrf = sctp_find_vrf(vrf_id);
4950 	if (vrf == NULL) {
4951 stage_right:
4952 		if (holds_lock == 0)
4953 			SCTP_IPI_ADDR_RUNLOCK();
4954 		return (NULL);
4955 	}
4956 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4957 
4958 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4959 	if (hash_head == NULL) {
4960 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4961 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4962 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4963 		sctp_print_address(addr);
4964 		SCTP_PRINTF("No such bucket for address\n");
4965 		if (holds_lock == 0)
4966 			SCTP_IPI_ADDR_RUNLOCK();
4967 
4968 		return (NULL);
4969 	}
4970 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4971 		if (sctp_ifap == NULL) {
4972 #ifdef INVARIANTS
4973 			panic("Huh LIST_FOREACH corrupt");
4974 			goto stage_right;
4975 #else
4976 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4977 			goto stage_right;
4978 #endif
4979 		}
4980 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4981 			continue;
4982 		if (addr->sa_family == AF_INET) {
4983 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4984 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4985 				/* found him. */
4986 				if (holds_lock == 0)
4987 					SCTP_IPI_ADDR_RUNLOCK();
4988 				return (sctp_ifap);
4989 				break;
4990 			}
4991 		}
4992 #ifdef INET6
4993 		if (addr->sa_family == AF_INET6) {
4994 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4995 			    &sctp_ifap->address.sin6)) {
4996 				/* found him. */
4997 				if (holds_lock == 0)
4998 					SCTP_IPI_ADDR_RUNLOCK();
4999 				return (sctp_ifap);
5000 				break;
5001 			}
5002 		}
5003 #endif
5004 	}
5005 	if (holds_lock == 0)
5006 		SCTP_IPI_ADDR_RUNLOCK();
5007 	return (NULL);
5008 }
5009 
5010 static void
5011 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5012     uint32_t rwnd_req)
5013 {
5014 	/* User pulled some data, do we need a rwnd update? */
5015 	int r_unlocked = 0;
5016 	uint32_t dif, rwnd;
5017 	struct socket *so = NULL;
5018 
5019 	if (stcb == NULL)
5020 		return;
5021 
5022 	atomic_add_int(&stcb->asoc.refcnt, 1);
5023 
5024 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5025 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5026 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5027 		/* Pre-check If we are freeing no update */
5028 		goto no_lock;
5029 	}
5030 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5031 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5032 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5033 		goto out;
5034 	}
5035 	so = stcb->sctp_socket;
5036 	if (so == NULL) {
5037 		goto out;
5038 	}
5039 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5040 	/* Have you have freed enough to look */
5041 	*freed_so_far = 0;
5042 	/* Yep, its worth a look and the lock overhead */
5043 
5044 	/* Figure out what the rwnd would be */
5045 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5046 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5047 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5048 	} else {
5049 		dif = 0;
5050 	}
5051 	if (dif >= rwnd_req) {
5052 		if (hold_rlock) {
5053 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5054 			r_unlocked = 1;
5055 		}
5056 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5057 			/*
5058 			 * One last check before we allow the guy possibly
5059 			 * to get in. There is a race, where the guy has not
5060 			 * reached the gate. In that case
5061 			 */
5062 			goto out;
5063 		}
5064 		SCTP_TCB_LOCK(stcb);
5065 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5066 			/* No reports here */
5067 			SCTP_TCB_UNLOCK(stcb);
5068 			goto out;
5069 		}
5070 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5071 		sctp_send_sack(stcb);
5072 
5073 		sctp_chunk_output(stcb->sctp_ep, stcb,
5074 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5075 		/* make sure no timer is running */
5076 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5077 		SCTP_TCB_UNLOCK(stcb);
5078 	} else {
5079 		/* Update how much we have pending */
5080 		stcb->freed_by_sorcv_sincelast = dif;
5081 	}
5082 out:
5083 	if (so && r_unlocked && hold_rlock) {
5084 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5085 	}
5086 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5087 no_lock:
5088 	atomic_add_int(&stcb->asoc.refcnt, -1);
5089 	return;
5090 }
5091 
5092 int
5093 sctp_sorecvmsg(struct socket *so,
5094     struct uio *uio,
5095     struct mbuf **mp,
5096     struct sockaddr *from,
5097     int fromlen,
5098     int *msg_flags,
5099     struct sctp_sndrcvinfo *sinfo,
5100     int filling_sinfo)
5101 {
5102 	/*
5103 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5104 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5105 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5106 	 * On the way out we may send out any combination of:
5107 	 * MSG_NOTIFICATION MSG_EOR
5108 	 *
5109 	 */
5110 	struct sctp_inpcb *inp = NULL;
5111 	int my_len = 0;
5112 	int cp_len = 0, error = 0;
5113 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5114 	struct mbuf *m = NULL, *embuf = NULL;
5115 	struct sctp_tcb *stcb = NULL;
5116 	int wakeup_read_socket = 0;
5117 	int freecnt_applied = 0;
5118 	int out_flags = 0, in_flags = 0;
5119 	int block_allowed = 1;
5120 	uint32_t freed_so_far = 0;
5121 	uint32_t copied_so_far = 0;
5122 	int in_eeor_mode = 0;
5123 	int no_rcv_needed = 0;
5124 	uint32_t rwnd_req = 0;
5125 	int hold_sblock = 0;
5126 	int hold_rlock = 0;
5127 	int slen = 0;
5128 	uint32_t held_length = 0;
5129 	int sockbuf_lock = 0;
5130 
5131 	if (uio == NULL) {
5132 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5133 		return (EINVAL);
5134 	}
5135 	if (msg_flags) {
5136 		in_flags = *msg_flags;
5137 		if (in_flags & MSG_PEEK)
5138 			SCTP_STAT_INCR(sctps_read_peeks);
5139 	} else {
5140 		in_flags = 0;
5141 	}
5142 	slen = uio->uio_resid;
5143 
5144 	/* Pull in and set up our int flags */
5145 	if (in_flags & MSG_OOB) {
5146 		/* Out of band's NOT supported */
5147 		return (EOPNOTSUPP);
5148 	}
5149 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5150 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5151 		return (EINVAL);
5152 	}
5153 	if ((in_flags & (MSG_DONTWAIT
5154 	    | MSG_NBIO
5155 	    )) ||
5156 	    SCTP_SO_IS_NBIO(so)) {
5157 		block_allowed = 0;
5158 	}
5159 	/* setup the endpoint */
5160 	inp = (struct sctp_inpcb *)so->so_pcb;
5161 	if (inp == NULL) {
5162 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5163 		return (EFAULT);
5164 	}
5165 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5166 	/* Must be at least a MTU's worth */
5167 	if (rwnd_req < SCTP_MIN_RWND)
5168 		rwnd_req = SCTP_MIN_RWND;
5169 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5170 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5171 		sctp_misc_ints(SCTP_SORECV_ENTER,
5172 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5173 	}
5174 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5175 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5176 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5177 	}
5178 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5179 	sockbuf_lock = 1;
5180 	if (error) {
5181 		goto release_unlocked;
5182 	}
5183 restart:
5184 
5185 
5186 restart_nosblocks:
5187 	if (hold_sblock == 0) {
5188 		SOCKBUF_LOCK(&so->so_rcv);
5189 		hold_sblock = 1;
5190 	}
5191 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5192 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5193 		goto out;
5194 	}
5195 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5196 		if (so->so_error) {
5197 			error = so->so_error;
5198 			if ((in_flags & MSG_PEEK) == 0)
5199 				so->so_error = 0;
5200 			goto out;
5201 		} else {
5202 			if (so->so_rcv.sb_cc == 0) {
5203 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5204 				/* indicate EOF */
5205 				error = 0;
5206 				goto out;
5207 			}
5208 		}
5209 	}
5210 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5211 		/* we need to wait for data */
5212 		if ((so->so_rcv.sb_cc == 0) &&
5213 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5214 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5215 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5216 				/*
5217 				 * For active open side clear flags for
5218 				 * re-use passive open is blocked by
5219 				 * connect.
5220 				 */
5221 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5222 					/*
5223 					 * You were aborted, passive side
5224 					 * always hits here
5225 					 */
5226 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5227 					error = ECONNRESET;
5228 					/*
5229 					 * You get this once if you are
5230 					 * active open side
5231 					 */
5232 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5233 						/*
5234 						 * Remove flag if on the
5235 						 * active open side
5236 						 */
5237 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5238 					}
5239 				}
5240 				so->so_state &= ~(SS_ISCONNECTING |
5241 				    SS_ISDISCONNECTING |
5242 				    SS_ISCONFIRMING |
5243 				    SS_ISCONNECTED);
5244 				if (error == 0) {
5245 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5246 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5247 						error = ENOTCONN;
5248 					} else {
5249 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5250 					}
5251 				}
5252 				goto out;
5253 			}
5254 		}
5255 		error = sbwait(&so->so_rcv);
5256 		if (error) {
5257 			goto out;
5258 		}
5259 		held_length = 0;
5260 		goto restart_nosblocks;
5261 	} else if (so->so_rcv.sb_cc == 0) {
5262 		if (so->so_error) {
5263 			error = so->so_error;
5264 			if ((in_flags & MSG_PEEK) == 0)
5265 				so->so_error = 0;
5266 		} else {
5267 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5268 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5269 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5270 					/*
5271 					 * For active open side clear flags
5272 					 * for re-use passive open is
5273 					 * blocked by connect.
5274 					 */
5275 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5276 						/*
5277 						 * You were aborted, passive
5278 						 * side always hits here
5279 						 */
5280 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5281 						error = ECONNRESET;
5282 						/*
5283 						 * You get this once if you
5284 						 * are active open side
5285 						 */
5286 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5287 							/*
5288 							 * Remove flag if on
5289 							 * the active open
5290 							 * side
5291 							 */
5292 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5293 						}
5294 					}
5295 					so->so_state &= ~(SS_ISCONNECTING |
5296 					    SS_ISDISCONNECTING |
5297 					    SS_ISCONFIRMING |
5298 					    SS_ISCONNECTED);
5299 					if (error == 0) {
5300 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5301 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5302 							error = ENOTCONN;
5303 						} else {
5304 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5305 						}
5306 					}
5307 					goto out;
5308 				}
5309 			}
5310 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5311 			error = EWOULDBLOCK;
5312 		}
5313 		goto out;
5314 	}
5315 	if (hold_sblock == 1) {
5316 		SOCKBUF_UNLOCK(&so->so_rcv);
5317 		hold_sblock = 0;
5318 	}
5319 	/* we possibly have data we can read */
5320 	/* sa_ignore FREED_MEMORY */
5321 	control = TAILQ_FIRST(&inp->read_queue);
5322 	if (control == NULL) {
5323 		/*
5324 		 * This could be happening since the appender did the
5325 		 * increment but as not yet did the tailq insert onto the
5326 		 * read_queue
5327 		 */
5328 		if (hold_rlock == 0) {
5329 			SCTP_INP_READ_LOCK(inp);
5330 			hold_rlock = 1;
5331 		}
5332 		control = TAILQ_FIRST(&inp->read_queue);
5333 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5334 #ifdef INVARIANTS
5335 			panic("Huh, its non zero and nothing on control?");
5336 #endif
5337 			so->so_rcv.sb_cc = 0;
5338 		}
5339 		SCTP_INP_READ_UNLOCK(inp);
5340 		hold_rlock = 0;
5341 		goto restart;
5342 	}
5343 	if ((control->length == 0) &&
5344 	    (control->do_not_ref_stcb)) {
5345 		/*
5346 		 * Clean up code for freeing assoc that left behind a
5347 		 * pdapi.. maybe a peer in EEOR that just closed after
5348 		 * sending and never indicated a EOR.
5349 		 */
5350 		if (hold_rlock == 0) {
5351 			hold_rlock = 1;
5352 			SCTP_INP_READ_LOCK(inp);
5353 		}
5354 		control->held_length = 0;
5355 		if (control->data) {
5356 			/* Hmm there is data here .. fix */
5357 			struct mbuf *m_tmp;
5358 			int cnt = 0;
5359 
5360 			m_tmp = control->data;
5361 			while (m_tmp) {
5362 				cnt += SCTP_BUF_LEN(m_tmp);
5363 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5364 					control->tail_mbuf = m_tmp;
5365 					control->end_added = 1;
5366 				}
5367 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5368 			}
5369 			control->length = cnt;
5370 		} else {
5371 			/* remove it */
5372 			TAILQ_REMOVE(&inp->read_queue, control, next);
5373 			/* Add back any hiddend data */
5374 			sctp_free_remote_addr(control->whoFrom);
5375 			sctp_free_a_readq(stcb, control);
5376 		}
5377 		if (hold_rlock) {
5378 			hold_rlock = 0;
5379 			SCTP_INP_READ_UNLOCK(inp);
5380 		}
5381 		goto restart;
5382 	}
5383 	if ((control->length == 0) &&
5384 	    (control->end_added == 1)) {
5385 		/*
5386 		 * Do we also need to check for (control->pdapi_aborted ==
5387 		 * 1)?
5388 		 */
5389 		if (hold_rlock == 0) {
5390 			hold_rlock = 1;
5391 			SCTP_INP_READ_LOCK(inp);
5392 		}
5393 		TAILQ_REMOVE(&inp->read_queue, control, next);
5394 		if (control->data) {
5395 #ifdef INVARIANTS
5396 			panic("control->data not null but control->length == 0");
5397 #else
5398 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5399 			sctp_m_freem(control->data);
5400 			control->data = NULL;
5401 #endif
5402 		}
5403 		if (control->aux_data) {
5404 			sctp_m_free(control->aux_data);
5405 			control->aux_data = NULL;
5406 		}
5407 		sctp_free_remote_addr(control->whoFrom);
5408 		sctp_free_a_readq(stcb, control);
5409 		if (hold_rlock) {
5410 			hold_rlock = 0;
5411 			SCTP_INP_READ_UNLOCK(inp);
5412 		}
5413 		goto restart;
5414 	}
5415 	if (control->length == 0) {
5416 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5417 		    (filling_sinfo)) {
5418 			/* find a more suitable one then this */
5419 			ctl = TAILQ_NEXT(control, next);
5420 			while (ctl) {
5421 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5422 				    (ctl->some_taken ||
5423 				    (ctl->spec_flags & M_NOTIFICATION) ||
5424 				    ((ctl->do_not_ref_stcb == 0) &&
5425 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5426 				    ) {
5427 					/*-
5428 					 * If we have a different TCB next, and there is data
5429 					 * present. If we have already taken some (pdapi), OR we can
5430 					 * ref the tcb and no delivery as started on this stream, we
5431 					 * take it. Note we allow a notification on a different
5432 					 * assoc to be delivered..
5433 					 */
5434 					control = ctl;
5435 					goto found_one;
5436 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5437 					    (ctl->length) &&
5438 					    ((ctl->some_taken) ||
5439 					    ((ctl->do_not_ref_stcb == 0) &&
5440 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5441 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5442 					/*-
5443 					 * If we have the same tcb, and there is data present, and we
5444 					 * have the strm interleave feature present. Then if we have
5445 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5446 					 * not started a delivery for this stream, we can take it.
5447 					 * Note we do NOT allow a notificaiton on the same assoc to
5448 					 * be delivered.
5449 					 */
5450 					control = ctl;
5451 					goto found_one;
5452 				}
5453 				ctl = TAILQ_NEXT(ctl, next);
5454 			}
5455 		}
5456 		/*
5457 		 * if we reach here, not suitable replacement is available
5458 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5459 		 * into the our held count, and its time to sleep again.
5460 		 */
5461 		held_length = so->so_rcv.sb_cc;
5462 		control->held_length = so->so_rcv.sb_cc;
5463 		goto restart;
5464 	}
5465 	/* Clear the held length since there is something to read */
5466 	control->held_length = 0;
5467 	if (hold_rlock) {
5468 		SCTP_INP_READ_UNLOCK(inp);
5469 		hold_rlock = 0;
5470 	}
5471 found_one:
5472 	/*
5473 	 * If we reach here, control has a some data for us to read off.
5474 	 * Note that stcb COULD be NULL.
5475 	 */
5476 	control->some_taken++;
5477 	if (hold_sblock) {
5478 		SOCKBUF_UNLOCK(&so->so_rcv);
5479 		hold_sblock = 0;
5480 	}
5481 	stcb = control->stcb;
5482 	if (stcb) {
5483 		if ((control->do_not_ref_stcb == 0) &&
5484 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5485 			if (freecnt_applied == 0)
5486 				stcb = NULL;
5487 		} else if (control->do_not_ref_stcb == 0) {
5488 			/* you can't free it on me please */
5489 			/*
5490 			 * The lock on the socket buffer protects us so the
5491 			 * free code will stop. But since we used the
5492 			 * socketbuf lock and the sender uses the tcb_lock
5493 			 * to increment, we need to use the atomic add to
5494 			 * the refcnt
5495 			 */
5496 			if (freecnt_applied) {
5497 #ifdef INVARIANTS
5498 				panic("refcnt already incremented");
5499 #else
5500 				printf("refcnt already incremented?\n");
5501 #endif
5502 			} else {
5503 				atomic_add_int(&stcb->asoc.refcnt, 1);
5504 				freecnt_applied = 1;
5505 			}
5506 			/*
5507 			 * Setup to remember how much we have not yet told
5508 			 * the peer our rwnd has opened up. Note we grab the
5509 			 * value from the tcb from last time. Note too that
5510 			 * sack sending clears this when a sack is sent,
5511 			 * which is fine. Once we hit the rwnd_req, we then
5512 			 * will go to the sctp_user_rcvd() that will not
5513 			 * lock until it KNOWs it MUST send a WUP-SACK.
5514 			 */
5515 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5516 			stcb->freed_by_sorcv_sincelast = 0;
5517 		}
5518 	}
5519 	if (stcb &&
5520 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5521 	    control->do_not_ref_stcb == 0) {
5522 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5523 	}
5524 	/* First lets get off the sinfo and sockaddr info */
5525 	if ((sinfo) && filling_sinfo) {
5526 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5527 		nxt = TAILQ_NEXT(control, next);
5528 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5529 			struct sctp_extrcvinfo *s_extra;
5530 
5531 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5532 			if ((nxt) &&
5533 			    (nxt->length)) {
5534 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5535 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5536 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5537 				}
5538 				if (nxt->spec_flags & M_NOTIFICATION) {
5539 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5540 				}
5541 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5542 				s_extra->sreinfo_next_length = nxt->length;
5543 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5544 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5545 				if (nxt->tail_mbuf != NULL) {
5546 					if (nxt->end_added) {
5547 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5548 					}
5549 				}
5550 			} else {
5551 				/*
5552 				 * we explicitly 0 this, since the memcpy
5553 				 * got some other things beyond the older
5554 				 * sinfo_ that is on the control's structure
5555 				 * :-D
5556 				 */
5557 				nxt = NULL;
5558 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5559 				s_extra->sreinfo_next_aid = 0;
5560 				s_extra->sreinfo_next_length = 0;
5561 				s_extra->sreinfo_next_ppid = 0;
5562 				s_extra->sreinfo_next_stream = 0;
5563 			}
5564 		}
5565 		/*
5566 		 * update off the real current cum-ack, if we have an stcb.
5567 		 */
5568 		if ((control->do_not_ref_stcb == 0) && stcb)
5569 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5570 		/*
5571 		 * mask off the high bits, we keep the actual chunk bits in
5572 		 * there.
5573 		 */
5574 		sinfo->sinfo_flags &= 0x00ff;
5575 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5576 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5577 		}
5578 	}
5579 #ifdef SCTP_ASOCLOG_OF_TSNS
5580 	{
5581 		int index, newindex;
5582 		struct sctp_pcbtsn_rlog *entry;
5583 
5584 		do {
5585 			index = inp->readlog_index;
5586 			newindex = index + 1;
5587 			if (newindex >= SCTP_READ_LOG_SIZE) {
5588 				newindex = 0;
5589 			}
5590 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5591 		entry = &inp->readlog[index];
5592 		entry->vtag = control->sinfo_assoc_id;
5593 		entry->strm = control->sinfo_stream;
5594 		entry->seq = control->sinfo_ssn;
5595 		entry->sz = control->length;
5596 		entry->flgs = control->sinfo_flags;
5597 	}
5598 #endif
5599 	if (fromlen && from) {
5600 		struct sockaddr *to;
5601 
5602 #ifdef INET
5603 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5604 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5605 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5606 #else
5607 		/* No AF_INET use AF_INET6 */
5608 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5609 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5610 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5611 #endif
5612 
5613 		to = from;
5614 #if defined(INET) && defined(INET6)
5615 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5616 		    (to->sa_family == AF_INET) &&
5617 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5618 			struct sockaddr_in *sin;
5619 			struct sockaddr_in6 sin6;
5620 
5621 			sin = (struct sockaddr_in *)to;
5622 			bzero(&sin6, sizeof(sin6));
5623 			sin6.sin6_family = AF_INET6;
5624 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5625 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5626 			bcopy(&sin->sin_addr,
5627 			    &sin6.sin6_addr.s6_addr32[3],
5628 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5629 			sin6.sin6_port = sin->sin_port;
5630 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5631 		}
5632 #endif
5633 #if defined(INET6)
5634 		{
5635 			struct sockaddr_in6 lsa6, *to6;
5636 
5637 			to6 = (struct sockaddr_in6 *)to;
5638 			sctp_recover_scope_mac(to6, (&lsa6));
5639 		}
5640 #endif
5641 	}
5642 	/* now copy out what data we can */
5643 	if (mp == NULL) {
5644 		/* copy out each mbuf in the chain up to length */
5645 get_more_data:
5646 		m = control->data;
5647 		while (m) {
5648 			/* Move out all we can */
5649 			cp_len = (int)uio->uio_resid;
5650 			my_len = (int)SCTP_BUF_LEN(m);
5651 			if (cp_len > my_len) {
5652 				/* not enough in this buf */
5653 				cp_len = my_len;
5654 			}
5655 			if (hold_rlock) {
5656 				SCTP_INP_READ_UNLOCK(inp);
5657 				hold_rlock = 0;
5658 			}
5659 			if (cp_len > 0)
5660 				error = uiomove(mtod(m, char *), cp_len, uio);
5661 			/* re-read */
5662 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5663 				goto release;
5664 			}
5665 			if ((control->do_not_ref_stcb == 0) && stcb &&
5666 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5667 				no_rcv_needed = 1;
5668 			}
5669 			if (error) {
5670 				/* error we are out of here */
5671 				goto release;
5672 			}
5673 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5674 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5675 			    ((control->end_added == 0) ||
5676 			    (control->end_added &&
5677 			    (TAILQ_NEXT(control, next) == NULL)))
5678 			    ) {
5679 				SCTP_INP_READ_LOCK(inp);
5680 				hold_rlock = 1;
5681 			}
5682 			if (cp_len == SCTP_BUF_LEN(m)) {
5683 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5684 				    (control->end_added)) {
5685 					out_flags |= MSG_EOR;
5686 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5687 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5688 				}
5689 				if (control->spec_flags & M_NOTIFICATION) {
5690 					out_flags |= MSG_NOTIFICATION;
5691 				}
5692 				/* we ate up the mbuf */
5693 				if (in_flags & MSG_PEEK) {
5694 					/* just looking */
5695 					m = SCTP_BUF_NEXT(m);
5696 					copied_so_far += cp_len;
5697 				} else {
5698 					/* dispose of the mbuf */
5699 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5700 						sctp_sblog(&so->so_rcv,
5701 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5702 					}
5703 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5704 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5705 						sctp_sblog(&so->so_rcv,
5706 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5707 					}
5708 					embuf = m;
5709 					copied_so_far += cp_len;
5710 					freed_so_far += cp_len;
5711 					freed_so_far += MSIZE;
5712 					atomic_subtract_int(&control->length, cp_len);
5713 					control->data = sctp_m_free(m);
5714 					m = control->data;
5715 					/*
5716 					 * been through it all, must hold sb
5717 					 * lock ok to null tail
5718 					 */
5719 					if (control->data == NULL) {
5720 #ifdef INVARIANTS
5721 						if ((control->end_added == 0) ||
5722 						    (TAILQ_NEXT(control, next) == NULL)) {
5723 							/*
5724 							 * If the end is not
5725 							 * added, OR the
5726 							 * next is NOT null
5727 							 * we MUST have the
5728 							 * lock.
5729 							 */
5730 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5731 								panic("Hmm we don't own the lock?");
5732 							}
5733 						}
5734 #endif
5735 						control->tail_mbuf = NULL;
5736 #ifdef INVARIANTS
5737 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5738 							panic("end_added, nothing left and no MSG_EOR");
5739 						}
5740 #endif
5741 					}
5742 				}
5743 			} else {
5744 				/* Do we need to trim the mbuf? */
5745 				if (control->spec_flags & M_NOTIFICATION) {
5746 					out_flags |= MSG_NOTIFICATION;
5747 				}
5748 				if ((in_flags & MSG_PEEK) == 0) {
5749 					SCTP_BUF_RESV_UF(m, cp_len);
5750 					SCTP_BUF_LEN(m) -= cp_len;
5751 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5752 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5753 					}
5754 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5755 					if ((control->do_not_ref_stcb == 0) &&
5756 					    stcb) {
5757 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5758 					}
5759 					copied_so_far += cp_len;
5760 					embuf = m;
5761 					freed_so_far += cp_len;
5762 					freed_so_far += MSIZE;
5763 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5764 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5765 						    SCTP_LOG_SBRESULT, 0);
5766 					}
5767 					atomic_subtract_int(&control->length, cp_len);
5768 				} else {
5769 					copied_so_far += cp_len;
5770 				}
5771 			}
5772 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5773 				break;
5774 			}
5775 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5776 			    (control->do_not_ref_stcb == 0) &&
5777 			    (freed_so_far >= rwnd_req)) {
5778 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5779 			}
5780 		}		/* end while(m) */
5781 		/*
5782 		 * At this point we have looked at it all and we either have
5783 		 * a MSG_EOR/or read all the user wants... <OR>
5784 		 * control->length == 0.
5785 		 */
5786 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5787 			/* we are done with this control */
5788 			if (control->length == 0) {
5789 				if (control->data) {
5790 #ifdef INVARIANTS
5791 					panic("control->data not null at read eor?");
5792 #else
5793 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5794 					sctp_m_freem(control->data);
5795 					control->data = NULL;
5796 #endif
5797 				}
5798 		done_with_control:
5799 				if (TAILQ_NEXT(control, next) == NULL) {
5800 					/*
5801 					 * If we don't have a next we need a
5802 					 * lock, if there is a next
5803 					 * interrupt is filling ahead of us
5804 					 * and we don't need a lock to
5805 					 * remove this guy (which is the
5806 					 * head of the queue).
5807 					 */
5808 					if (hold_rlock == 0) {
5809 						SCTP_INP_READ_LOCK(inp);
5810 						hold_rlock = 1;
5811 					}
5812 				}
5813 				TAILQ_REMOVE(&inp->read_queue, control, next);
5814 				/* Add back any hiddend data */
5815 				if (control->held_length) {
5816 					held_length = 0;
5817 					control->held_length = 0;
5818 					wakeup_read_socket = 1;
5819 				}
5820 				if (control->aux_data) {
5821 					sctp_m_free(control->aux_data);
5822 					control->aux_data = NULL;
5823 				}
5824 				no_rcv_needed = control->do_not_ref_stcb;
5825 				sctp_free_remote_addr(control->whoFrom);
5826 				control->data = NULL;
5827 				sctp_free_a_readq(stcb, control);
5828 				control = NULL;
5829 				if ((freed_so_far >= rwnd_req) &&
5830 				    (no_rcv_needed == 0))
5831 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5832 
5833 			} else {
5834 				/*
5835 				 * The user did not read all of this
5836 				 * message, turn off the returned MSG_EOR
5837 				 * since we are leaving more behind on the
5838 				 * control to read.
5839 				 */
5840 #ifdef INVARIANTS
5841 				if (control->end_added &&
5842 				    (control->data == NULL) &&
5843 				    (control->tail_mbuf == NULL)) {
5844 					panic("Gak, control->length is corrupt?");
5845 				}
5846 #endif
5847 				no_rcv_needed = control->do_not_ref_stcb;
5848 				out_flags &= ~MSG_EOR;
5849 			}
5850 		}
5851 		if (out_flags & MSG_EOR) {
5852 			goto release;
5853 		}
5854 		if ((uio->uio_resid == 0) ||
5855 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5856 		    ) {
5857 			goto release;
5858 		}
5859 		/*
5860 		 * If I hit here the receiver wants more and this message is
5861 		 * NOT done (pd-api). So two questions. Can we block? if not
5862 		 * we are done. Did the user NOT set MSG_WAITALL?
5863 		 */
5864 		if (block_allowed == 0) {
5865 			goto release;
5866 		}
5867 		/*
5868 		 * We need to wait for more data a few things: - We don't
5869 		 * sbunlock() so we don't get someone else reading. - We
5870 		 * must be sure to account for the case where what is added
5871 		 * is NOT to our control when we wakeup.
5872 		 */
5873 
5874 		/*
5875 		 * Do we need to tell the transport a rwnd update might be
5876 		 * needed before we go to sleep?
5877 		 */
5878 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5879 		    ((freed_so_far >= rwnd_req) &&
5880 		    (control->do_not_ref_stcb == 0) &&
5881 		    (no_rcv_needed == 0))) {
5882 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5883 		}
5884 wait_some_more:
5885 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5886 			goto release;
5887 		}
5888 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5889 			goto release;
5890 
5891 		if (hold_rlock == 1) {
5892 			SCTP_INP_READ_UNLOCK(inp);
5893 			hold_rlock = 0;
5894 		}
5895 		if (hold_sblock == 0) {
5896 			SOCKBUF_LOCK(&so->so_rcv);
5897 			hold_sblock = 1;
5898 		}
5899 		if ((copied_so_far) && (control->length == 0) &&
5900 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5901 			goto release;
5902 		}
5903 		if (so->so_rcv.sb_cc <= control->held_length) {
5904 			error = sbwait(&so->so_rcv);
5905 			if (error) {
5906 				goto release;
5907 			}
5908 			control->held_length = 0;
5909 		}
5910 		if (hold_sblock) {
5911 			SOCKBUF_UNLOCK(&so->so_rcv);
5912 			hold_sblock = 0;
5913 		}
5914 		if (control->length == 0) {
5915 			/* still nothing here */
5916 			if (control->end_added == 1) {
5917 				/* he aborted, or is done i.e.did a shutdown */
5918 				out_flags |= MSG_EOR;
5919 				if (control->pdapi_aborted) {
5920 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5921 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5922 
5923 					out_flags |= MSG_TRUNC;
5924 				} else {
5925 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5926 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5927 				}
5928 				goto done_with_control;
5929 			}
5930 			if (so->so_rcv.sb_cc > held_length) {
5931 				control->held_length = so->so_rcv.sb_cc;
5932 				held_length = 0;
5933 			}
5934 			goto wait_some_more;
5935 		} else if (control->data == NULL) {
5936 			/*
5937 			 * we must re-sync since data is probably being
5938 			 * added
5939 			 */
5940 			SCTP_INP_READ_LOCK(inp);
5941 			if ((control->length > 0) && (control->data == NULL)) {
5942 				/*
5943 				 * big trouble.. we have the lock and its
5944 				 * corrupt?
5945 				 */
5946 #ifdef INVARIANTS
5947 				panic("Impossible data==NULL length !=0");
5948 #endif
5949 				out_flags |= MSG_EOR;
5950 				out_flags |= MSG_TRUNC;
5951 				control->length = 0;
5952 				SCTP_INP_READ_UNLOCK(inp);
5953 				goto done_with_control;
5954 			}
5955 			SCTP_INP_READ_UNLOCK(inp);
5956 			/* We will fall around to get more data */
5957 		}
5958 		goto get_more_data;
5959 	} else {
5960 		/*-
5961 		 * Give caller back the mbuf chain,
5962 		 * store in uio_resid the length
5963 		 */
5964 		wakeup_read_socket = 0;
5965 		if ((control->end_added == 0) ||
5966 		    (TAILQ_NEXT(control, next) == NULL)) {
5967 			/* Need to get rlock */
5968 			if (hold_rlock == 0) {
5969 				SCTP_INP_READ_LOCK(inp);
5970 				hold_rlock = 1;
5971 			}
5972 		}
5973 		if (control->end_added) {
5974 			out_flags |= MSG_EOR;
5975 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5976 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5977 		}
5978 		if (control->spec_flags & M_NOTIFICATION) {
5979 			out_flags |= MSG_NOTIFICATION;
5980 		}
5981 		uio->uio_resid = control->length;
5982 		*mp = control->data;
5983 		m = control->data;
5984 		while (m) {
5985 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5986 				sctp_sblog(&so->so_rcv,
5987 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5988 			}
5989 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5990 			freed_so_far += SCTP_BUF_LEN(m);
5991 			freed_so_far += MSIZE;
5992 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5993 				sctp_sblog(&so->so_rcv,
5994 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5995 			}
5996 			m = SCTP_BUF_NEXT(m);
5997 		}
5998 		control->data = control->tail_mbuf = NULL;
5999 		control->length = 0;
6000 		if (out_flags & MSG_EOR) {
6001 			/* Done with this control */
6002 			goto done_with_control;
6003 		}
6004 	}
6005 release:
6006 	if (hold_rlock == 1) {
6007 		SCTP_INP_READ_UNLOCK(inp);
6008 		hold_rlock = 0;
6009 	}
6010 	if (hold_sblock == 1) {
6011 		SOCKBUF_UNLOCK(&so->so_rcv);
6012 		hold_sblock = 0;
6013 	}
6014 	sbunlock(&so->so_rcv);
6015 	sockbuf_lock = 0;
6016 
6017 release_unlocked:
6018 	if (hold_sblock) {
6019 		SOCKBUF_UNLOCK(&so->so_rcv);
6020 		hold_sblock = 0;
6021 	}
6022 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6023 		if ((freed_so_far >= rwnd_req) &&
6024 		    (control && (control->do_not_ref_stcb == 0)) &&
6025 		    (no_rcv_needed == 0))
6026 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6027 	}
6028 out:
6029 	if (msg_flags) {
6030 		*msg_flags = out_flags;
6031 	}
6032 	if (((out_flags & MSG_EOR) == 0) &&
6033 	    ((in_flags & MSG_PEEK) == 0) &&
6034 	    (sinfo) &&
6035 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
6036 		struct sctp_extrcvinfo *s_extra;
6037 
6038 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6039 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6040 	}
6041 	if (hold_rlock == 1) {
6042 		SCTP_INP_READ_UNLOCK(inp);
6043 		hold_rlock = 0;
6044 	}
6045 	if (hold_sblock) {
6046 		SOCKBUF_UNLOCK(&so->so_rcv);
6047 		hold_sblock = 0;
6048 	}
6049 	if (sockbuf_lock) {
6050 		sbunlock(&so->so_rcv);
6051 	}
6052 	if (freecnt_applied) {
6053 		/*
6054 		 * The lock on the socket buffer protects us so the free
6055 		 * code will stop. But since we used the socketbuf lock and
6056 		 * the sender uses the tcb_lock to increment, we need to use
6057 		 * the atomic add to the refcnt.
6058 		 */
6059 		if (stcb == NULL) {
6060 #ifdef INVARIANTS
6061 			panic("stcb for refcnt has gone NULL?");
6062 			goto stage_left;
6063 #else
6064 			goto stage_left;
6065 #endif
6066 		}
6067 		atomic_add_int(&stcb->asoc.refcnt, -1);
6068 		freecnt_applied = 0;
6069 		/* Save the value back for next time */
6070 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6071 	}
6072 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6073 		if (stcb) {
6074 			sctp_misc_ints(SCTP_SORECV_DONE,
6075 			    freed_so_far,
6076 			    ((uio) ? (slen - uio->uio_resid) : slen),
6077 			    stcb->asoc.my_rwnd,
6078 			    so->so_rcv.sb_cc);
6079 		} else {
6080 			sctp_misc_ints(SCTP_SORECV_DONE,
6081 			    freed_so_far,
6082 			    ((uio) ? (slen - uio->uio_resid) : slen),
6083 			    0,
6084 			    so->so_rcv.sb_cc);
6085 		}
6086 	}
6087 stage_left:
6088 	if (wakeup_read_socket) {
6089 		sctp_sorwakeup(inp, so);
6090 	}
6091 	return (error);
6092 }
6093 
6094 
6095 #ifdef SCTP_MBUF_LOGGING
6096 struct mbuf *
6097 sctp_m_free(struct mbuf *m)
6098 {
6099 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6100 		if (SCTP_BUF_IS_EXTENDED(m)) {
6101 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6102 		}
6103 	}
6104 	return (m_free(m));
6105 }
6106 
6107 void
6108 sctp_m_freem(struct mbuf *mb)
6109 {
6110 	while (mb != NULL)
6111 		mb = sctp_m_free(mb);
6112 }
6113 
6114 #endif
6115 
6116 int
6117 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6118 {
6119 	/*
6120 	 * Given a local address. For all associations that holds the
6121 	 * address, request a peer-set-primary.
6122 	 */
6123 	struct sctp_ifa *ifa;
6124 	struct sctp_laddr *wi;
6125 
6126 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6127 	if (ifa == NULL) {
6128 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6129 		return (EADDRNOTAVAIL);
6130 	}
6131 	/*
6132 	 * Now that we have the ifa we must awaken the iterator with this
6133 	 * message.
6134 	 */
6135 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6136 	if (wi == NULL) {
6137 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6138 		return (ENOMEM);
6139 	}
6140 	/* Now incr the count and int wi structure */
6141 	SCTP_INCR_LADDR_COUNT();
6142 	bzero(wi, sizeof(*wi));
6143 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6144 	wi->ifa = ifa;
6145 	wi->action = SCTP_SET_PRIM_ADDR;
6146 	atomic_add_int(&ifa->refcount, 1);
6147 
6148 	/* Now add it to the work queue */
6149 	SCTP_WQ_ADDR_LOCK();
6150 	/*
6151 	 * Should this really be a tailq? As it is we will process the
6152 	 * newest first :-0
6153 	 */
6154 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6155 	SCTP_WQ_ADDR_UNLOCK();
6156 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6157 	    (struct sctp_inpcb *)NULL,
6158 	    (struct sctp_tcb *)NULL,
6159 	    (struct sctp_nets *)NULL);
6160 	return (0);
6161 }
6162 
6163 
6164 int
6165 sctp_soreceive(struct socket *so,
6166     struct sockaddr **psa,
6167     struct uio *uio,
6168     struct mbuf **mp0,
6169     struct mbuf **controlp,
6170     int *flagsp)
6171 {
6172 	int error, fromlen;
6173 	uint8_t sockbuf[256];
6174 	struct sockaddr *from;
6175 	struct sctp_extrcvinfo sinfo;
6176 	int filling_sinfo = 1;
6177 	struct sctp_inpcb *inp;
6178 
6179 	inp = (struct sctp_inpcb *)so->so_pcb;
6180 	/* pickup the assoc we are reading from */
6181 	if (inp == NULL) {
6182 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6183 		return (EINVAL);
6184 	}
6185 	if ((sctp_is_feature_off(inp,
6186 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6187 	    (controlp == NULL)) {
6188 		/* user does not want the sndrcv ctl */
6189 		filling_sinfo = 0;
6190 	}
6191 	if (psa) {
6192 		from = (struct sockaddr *)sockbuf;
6193 		fromlen = sizeof(sockbuf);
6194 		from->sa_len = 0;
6195 	} else {
6196 		from = NULL;
6197 		fromlen = 0;
6198 	}
6199 
6200 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6201 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6202 	if ((controlp) && (filling_sinfo)) {
6203 		/* copy back the sinfo in a CMSG format */
6204 		if (filling_sinfo)
6205 			*controlp = sctp_build_ctl_nchunk(inp,
6206 			    (struct sctp_sndrcvinfo *)&sinfo);
6207 		else
6208 			*controlp = NULL;
6209 	}
6210 	if (psa) {
6211 		/* copy back the address info */
6212 		if (from && from->sa_len) {
6213 			*psa = sodupsockaddr(from, M_NOWAIT);
6214 		} else {
6215 			*psa = NULL;
6216 		}
6217 	}
6218 	return (error);
6219 }
6220 
6221 
6222 int
6223 sctp_l_soreceive(struct socket *so,
6224     struct sockaddr **name,
6225     struct uio *uio,
6226     char **controlp,
6227     int *controllen,
6228     int *flag)
6229 {
6230 	int error, fromlen;
6231 	uint8_t sockbuf[256];
6232 	struct sockaddr *from;
6233 	struct sctp_extrcvinfo sinfo;
6234 	int filling_sinfo = 1;
6235 	struct sctp_inpcb *inp;
6236 
6237 	inp = (struct sctp_inpcb *)so->so_pcb;
6238 	/* pickup the assoc we are reading from */
6239 	if (inp == NULL) {
6240 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6241 		return (EINVAL);
6242 	}
6243 	if ((sctp_is_feature_off(inp,
6244 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6245 	    (controlp == NULL)) {
6246 		/* user does not want the sndrcv ctl */
6247 		filling_sinfo = 0;
6248 	}
6249 	if (name) {
6250 		from = (struct sockaddr *)sockbuf;
6251 		fromlen = sizeof(sockbuf);
6252 		from->sa_len = 0;
6253 	} else {
6254 		from = NULL;
6255 		fromlen = 0;
6256 	}
6257 
6258 	error = sctp_sorecvmsg(so, uio,
6259 	    (struct mbuf **)NULL,
6260 	    from, fromlen, flag,
6261 	    (struct sctp_sndrcvinfo *)&sinfo,
6262 	    filling_sinfo);
6263 	if ((controlp) && (filling_sinfo)) {
6264 		/*
6265 		 * copy back the sinfo in a CMSG format note that the caller
6266 		 * has reponsibility for freeing the memory.
6267 		 */
6268 		if (filling_sinfo)
6269 			*controlp = sctp_build_ctl_cchunk(inp,
6270 			    controllen,
6271 			    (struct sctp_sndrcvinfo *)&sinfo);
6272 	}
6273 	if (name) {
6274 		/* copy back the address info */
6275 		if (from && from->sa_len) {
6276 			*name = sodupsockaddr(from, M_WAIT);
6277 		} else {
6278 			*name = NULL;
6279 		}
6280 	}
6281 	return (error);
6282 }
6283 
6284 
6285 
6286 
6287 
6288 
6289 
6290 int
6291 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6292     int totaddr, int *error)
6293 {
6294 	int added = 0;
6295 	int i;
6296 	struct sctp_inpcb *inp;
6297 	struct sockaddr *sa;
6298 	size_t incr = 0;
6299 
6300 	sa = addr;
6301 	inp = stcb->sctp_ep;
6302 	*error = 0;
6303 	for (i = 0; i < totaddr; i++) {
6304 		if (sa->sa_family == AF_INET) {
6305 			incr = sizeof(struct sockaddr_in);
6306 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6307 				/* assoc gone no un-lock */
6308 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6309 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6310 				*error = ENOBUFS;
6311 				goto out_now;
6312 			}
6313 			added++;
6314 		} else if (sa->sa_family == AF_INET6) {
6315 			incr = sizeof(struct sockaddr_in6);
6316 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6317 				/* assoc gone no un-lock */
6318 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6319 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6320 				*error = ENOBUFS;
6321 				goto out_now;
6322 			}
6323 			added++;
6324 		}
6325 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6326 	}
6327 out_now:
6328 	return (added);
6329 }
6330 
6331 struct sctp_tcb *
6332 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6333     int *totaddr, int *num_v4, int *num_v6, int *error,
6334     int limit, int *bad_addr)
6335 {
6336 	struct sockaddr *sa;
6337 	struct sctp_tcb *stcb = NULL;
6338 	size_t incr, at, i;
6339 
6340 	at = incr = 0;
6341 	sa = addr;
6342 	*error = *num_v6 = *num_v4 = 0;
6343 	/* account and validate addresses */
6344 	for (i = 0; i < (size_t)*totaddr; i++) {
6345 		if (sa->sa_family == AF_INET) {
6346 			(*num_v4) += 1;
6347 			incr = sizeof(struct sockaddr_in);
6348 			if (sa->sa_len != incr) {
6349 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6350 				*error = EINVAL;
6351 				*bad_addr = 1;
6352 				return (NULL);
6353 			}
6354 		} else if (sa->sa_family == AF_INET6) {
6355 			struct sockaddr_in6 *sin6;
6356 
6357 			sin6 = (struct sockaddr_in6 *)sa;
6358 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6359 				/* Must be non-mapped for connectx */
6360 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6361 				*error = EINVAL;
6362 				*bad_addr = 1;
6363 				return (NULL);
6364 			}
6365 			(*num_v6) += 1;
6366 			incr = sizeof(struct sockaddr_in6);
6367 			if (sa->sa_len != incr) {
6368 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6369 				*error = EINVAL;
6370 				*bad_addr = 1;
6371 				return (NULL);
6372 			}
6373 		} else {
6374 			*totaddr = i;
6375 			/* we are done */
6376 			break;
6377 		}
6378 		SCTP_INP_INCR_REF(inp);
6379 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6380 		if (stcb != NULL) {
6381 			/* Already have or am bring up an association */
6382 			return (stcb);
6383 		} else {
6384 			SCTP_INP_DECR_REF(inp);
6385 		}
6386 		if ((at + incr) > (size_t)limit) {
6387 			*totaddr = i;
6388 			break;
6389 		}
6390 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6391 	}
6392 	return ((struct sctp_tcb *)NULL);
6393 }
6394 
6395 /*
6396  * sctp_bindx(ADD) for one address.
6397  * assumes all arguments are valid/checked by caller.
6398  */
6399 void
6400 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6401     struct sockaddr *sa, sctp_assoc_t assoc_id,
6402     uint32_t vrf_id, int *error, void *p)
6403 {
6404 	struct sockaddr *addr_touse;
6405 
6406 #ifdef INET6
6407 	struct sockaddr_in sin;
6408 
6409 #endif
6410 
6411 	/* see if we're bound all already! */
6412 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6413 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6414 		*error = EINVAL;
6415 		return;
6416 	}
6417 	addr_touse = sa;
6418 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6419 	if (sa->sa_family == AF_INET6) {
6420 		struct sockaddr_in6 *sin6;
6421 
6422 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6423 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6424 			*error = EINVAL;
6425 			return;
6426 		}
6427 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6428 			/* can only bind v6 on PF_INET6 sockets */
6429 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6430 			*error = EINVAL;
6431 			return;
6432 		}
6433 		sin6 = (struct sockaddr_in6 *)addr_touse;
6434 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6435 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6436 			    SCTP_IPV6_V6ONLY(inp)) {
6437 				/* can't bind v4-mapped on PF_INET sockets */
6438 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6439 				*error = EINVAL;
6440 				return;
6441 			}
6442 			in6_sin6_2_sin(&sin, sin6);
6443 			addr_touse = (struct sockaddr *)&sin;
6444 		}
6445 	}
6446 #endif
6447 	if (sa->sa_family == AF_INET) {
6448 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6449 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6450 			*error = EINVAL;
6451 			return;
6452 		}
6453 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6454 		    SCTP_IPV6_V6ONLY(inp)) {
6455 			/* can't bind v4 on PF_INET sockets */
6456 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6457 			*error = EINVAL;
6458 			return;
6459 		}
6460 	}
6461 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6462 		if (p == NULL) {
6463 			/* Can't get proc for Net/Open BSD */
6464 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6465 			*error = EINVAL;
6466 			return;
6467 		}
6468 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6469 		return;
6470 	}
6471 	/*
6472 	 * No locks required here since bind and mgmt_ep_sa all do their own
6473 	 * locking. If we do something for the FIX: below we may need to
6474 	 * lock in that case.
6475 	 */
6476 	if (assoc_id == 0) {
6477 		/* add the address */
6478 		struct sctp_inpcb *lep;
6479 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6480 
6481 		/* validate the incoming port */
6482 		if ((lsin->sin_port != 0) &&
6483 		    (lsin->sin_port != inp->sctp_lport)) {
6484 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6485 			*error = EINVAL;
6486 			return;
6487 		} else {
6488 			/* user specified 0 port, set it to existing port */
6489 			lsin->sin_port = inp->sctp_lport;
6490 		}
6491 
6492 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6493 		if (lep != NULL) {
6494 			/*
6495 			 * We must decrement the refcount since we have the
6496 			 * ep already and are binding. No remove going on
6497 			 * here.
6498 			 */
6499 			SCTP_INP_DECR_REF(lep);
6500 		}
6501 		if (lep == inp) {
6502 			/* already bound to it.. ok */
6503 			return;
6504 		} else if (lep == NULL) {
6505 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6506 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6507 			    SCTP_ADD_IP_ADDRESS,
6508 			    vrf_id, NULL);
6509 		} else {
6510 			*error = EADDRINUSE;
6511 		}
6512 		if (*error)
6513 			return;
6514 	} else {
6515 		/*
6516 		 * FIX: decide whether we allow assoc based bindx
6517 		 */
6518 	}
6519 }
6520 
6521 /*
6522  * sctp_bindx(DELETE) for one address.
6523  * assumes all arguments are valid/checked by caller.
6524  */
6525 void
6526 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6527     struct sockaddr *sa, sctp_assoc_t assoc_id,
6528     uint32_t vrf_id, int *error)
6529 {
6530 	struct sockaddr *addr_touse;
6531 
6532 #ifdef INET6
6533 	struct sockaddr_in sin;
6534 
6535 #endif
6536 
6537 	/* see if we're bound all already! */
6538 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6539 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6540 		*error = EINVAL;
6541 		return;
6542 	}
6543 	addr_touse = sa;
6544 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6545 	if (sa->sa_family == AF_INET6) {
6546 		struct sockaddr_in6 *sin6;
6547 
6548 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6549 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6550 			*error = EINVAL;
6551 			return;
6552 		}
6553 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6554 			/* can only bind v6 on PF_INET6 sockets */
6555 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6556 			*error = EINVAL;
6557 			return;
6558 		}
6559 		sin6 = (struct sockaddr_in6 *)addr_touse;
6560 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6561 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6562 			    SCTP_IPV6_V6ONLY(inp)) {
6563 				/* can't bind mapped-v4 on PF_INET sockets */
6564 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6565 				*error = EINVAL;
6566 				return;
6567 			}
6568 			in6_sin6_2_sin(&sin, sin6);
6569 			addr_touse = (struct sockaddr *)&sin;
6570 		}
6571 	}
6572 #endif
6573 	if (sa->sa_family == AF_INET) {
6574 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6575 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6576 			*error = EINVAL;
6577 			return;
6578 		}
6579 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6580 		    SCTP_IPV6_V6ONLY(inp)) {
6581 			/* can't bind v4 on PF_INET sockets */
6582 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6583 			*error = EINVAL;
6584 			return;
6585 		}
6586 	}
6587 	/*
6588 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6589 	 * below is ever changed we may need to lock before calling
6590 	 * association level binding.
6591 	 */
6592 	if (assoc_id == 0) {
6593 		/* delete the address */
6594 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6595 		    SCTP_DEL_IP_ADDRESS,
6596 		    vrf_id, NULL);
6597 	} else {
6598 		/*
6599 		 * FIX: decide whether we allow assoc based bindx
6600 		 */
6601 	}
6602 }
6603 
6604 /*
6605  * returns the valid local address count for an assoc, taking into account
6606  * all scoping rules
6607  */
6608 int
6609 sctp_local_addr_count(struct sctp_tcb *stcb)
6610 {
6611 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6612 	int ipv4_addr_legal, ipv6_addr_legal;
6613 	struct sctp_vrf *vrf;
6614 	struct sctp_ifn *sctp_ifn;
6615 	struct sctp_ifa *sctp_ifa;
6616 	int count = 0;
6617 
6618 	/* Turn on all the appropriate scopes */
6619 	loopback_scope = stcb->asoc.loopback_scope;
6620 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6621 	local_scope = stcb->asoc.local_scope;
6622 	site_scope = stcb->asoc.site_scope;
6623 	ipv4_addr_legal = ipv6_addr_legal = 0;
6624 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6625 		ipv6_addr_legal = 1;
6626 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6627 			ipv4_addr_legal = 1;
6628 		}
6629 	} else {
6630 		ipv4_addr_legal = 1;
6631 	}
6632 
6633 	SCTP_IPI_ADDR_RLOCK();
6634 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6635 	if (vrf == NULL) {
6636 		/* no vrf, no addresses */
6637 		SCTP_IPI_ADDR_RUNLOCK();
6638 		return (0);
6639 	}
6640 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6641 		/*
6642 		 * bound all case: go through all ifns on the vrf
6643 		 */
6644 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6645 			if ((loopback_scope == 0) &&
6646 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6647 				continue;
6648 			}
6649 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6650 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6651 					continue;
6652 				switch (sctp_ifa->address.sa.sa_family) {
6653 				case AF_INET:
6654 					if (ipv4_addr_legal) {
6655 						struct sockaddr_in *sin;
6656 
6657 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6658 						if (sin->sin_addr.s_addr == 0) {
6659 							/*
6660 							 * skip unspecified
6661 							 * addrs
6662 							 */
6663 							continue;
6664 						}
6665 						if ((ipv4_local_scope == 0) &&
6666 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6667 							continue;
6668 						}
6669 						/* count this one */
6670 						count++;
6671 					} else {
6672 						continue;
6673 					}
6674 					break;
6675 #ifdef INET6
6676 				case AF_INET6:
6677 					if (ipv6_addr_legal) {
6678 						struct sockaddr_in6 *sin6;
6679 
6680 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6681 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6682 							continue;
6683 						}
6684 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6685 							if (local_scope == 0)
6686 								continue;
6687 							if (sin6->sin6_scope_id == 0) {
6688 								if (sa6_recoverscope(sin6) != 0)
6689 									/*
6690 									 *
6691 									 * bad
6692 									 *
6693 									 * li
6694 									 * nk
6695 									 *
6696 									 * loc
6697 									 * al
6698 									 *
6699 									 * add
6700 									 * re
6701 									 * ss
6702 									 * */
6703 									continue;
6704 							}
6705 						}
6706 						if ((site_scope == 0) &&
6707 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6708 							continue;
6709 						}
6710 						/* count this one */
6711 						count++;
6712 					}
6713 					break;
6714 #endif
6715 				default:
6716 					/* TSNH */
6717 					break;
6718 				}
6719 			}
6720 		}
6721 	} else {
6722 		/*
6723 		 * subset bound case
6724 		 */
6725 		struct sctp_laddr *laddr;
6726 
6727 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6728 		    sctp_nxt_addr) {
6729 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6730 				continue;
6731 			}
6732 			/* count this one */
6733 			count++;
6734 		}
6735 	}
6736 	SCTP_IPI_ADDR_RUNLOCK();
6737 	return (count);
6738 }
6739 
6740 #if defined(SCTP_LOCAL_TRACE_BUF)
6741 
6742 void
6743 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6744 {
6745 	uint32_t saveindex, newindex;
6746 
6747 	do {
6748 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6749 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6750 			newindex = 1;
6751 		} else {
6752 			newindex = saveindex + 1;
6753 		}
6754 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6755 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6756 		saveindex = 0;
6757 	}
6758 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6759 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6760 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6761 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6762 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6763 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6764 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6765 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6766 }
6767 
6768 #endif
6769 /* We will need to add support
6770  * to bind the ports and such here
6771  * so we can do UDP tunneling. In
6772  * the mean-time, we return error
6773  */
6774 #include <netinet/udp.h>
6775 #include <netinet/udp_var.h>
6776 #include <sys/proc.h>
6777 #ifdef INET6
6778 #include <netinet6/sctp6_var.h>
6779 #endif
6780 
6781 static void
6782 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6783 {
6784 	struct ip *iph;
6785 	struct mbuf *sp, *last;
6786 	struct udphdr *uhdr;
6787 	uint16_t port = 0, len;
6788 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6789 
6790 	/*
6791 	 * Split out the mbuf chain. Leave the IP header in m, place the
6792 	 * rest in the sp.
6793 	 */
6794 	if ((m->m_flags & M_PKTHDR) == 0) {
6795 		/* Can't handle one that is not a pkt hdr */
6796 		goto out;
6797 	}
6798 	/* pull the src port */
6799 	iph = mtod(m, struct ip *);
6800 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6801 
6802 	port = uhdr->uh_sport;
6803 	sp = m_split(m, off, M_DONTWAIT);
6804 	if (sp == NULL) {
6805 		/* Gak, drop packet, we can't do a split */
6806 		goto out;
6807 	}
6808 	if (sp->m_pkthdr.len < header_size) {
6809 		/* Gak, packet can't have an SCTP header in it - to small */
6810 		m_freem(sp);
6811 		goto out;
6812 	}
6813 	/* ok now pull up the UDP header and SCTP header together */
6814 	sp = m_pullup(sp, header_size);
6815 	if (sp == NULL) {
6816 		/* Gak pullup failed */
6817 		goto out;
6818 	}
6819 	/* trim out the UDP header */
6820 	m_adj(sp, sizeof(struct udphdr));
6821 
6822 	/* Now reconstruct the mbuf chain */
6823 	/* 1) find last one */
6824 	last = m;
6825 	while (last->m_next != NULL) {
6826 		last = last->m_next;
6827 	}
6828 	last->m_next = sp;
6829 	m->m_pkthdr.len += sp->m_pkthdr.len;
6830 	last = m;
6831 	while (last != NULL) {
6832 		last = last->m_next;
6833 	}
6834 	/* Now its ready for sctp_input or sctp6_input */
6835 	iph = mtod(m, struct ip *);
6836 	switch (iph->ip_v) {
6837 	case IPVERSION:
6838 		{
6839 			/* its IPv4 */
6840 			len = SCTP_GET_IPV4_LENGTH(iph);
6841 			len -= sizeof(struct udphdr);
6842 			SCTP_GET_IPV4_LENGTH(iph) = len;
6843 			sctp_input_with_port(m, off, port);
6844 			break;
6845 		}
6846 #ifdef INET6
6847 	case IPV6_VERSION >> 4:
6848 		{
6849 			/* its IPv6 - NOT supported */
6850 			goto out;
6851 			break;
6852 
6853 		}
6854 #endif
6855 	default:
6856 		{
6857 			m_freem(m);
6858 			break;
6859 		}
6860 	}
6861 	return;
6862 out:
6863 	m_freem(m);
6864 }
6865 
6866 void
6867 sctp_over_udp_stop(void)
6868 {
6869 	struct socket *sop;
6870 
6871 	/*
6872 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6873 	 * for writting!
6874 	 */
6875 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6876 		/* Nothing to do */
6877 		return;
6878 	}
6879 	sop = SCTP_BASE_INFO(udp_tun_socket);
6880 	soclose(sop);
6881 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6882 }
6883 int
6884 sctp_over_udp_start(void)
6885 {
6886 	uint16_t port;
6887 	int ret;
6888 	struct sockaddr_in sin;
6889 	struct socket *sop = NULL;
6890 	struct thread *th;
6891 	struct ucred *cred;
6892 
6893 	/*
6894 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6895 	 * for writting!
6896 	 */
6897 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6898 	if (port == 0) {
6899 		/* Must have a port set */
6900 		return (EINVAL);
6901 	}
6902 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6903 		/* Already running -- must stop first */
6904 		return (EALREADY);
6905 	}
6906 	th = curthread;
6907 	cred = th->td_ucred;
6908 	if ((ret = socreate(PF_INET, &sop,
6909 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6910 		return (ret);
6911 	}
6912 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6913 	/* call the special UDP hook */
6914 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6915 	if (ret) {
6916 		goto exit_stage_left;
6917 	}
6918 	/* Ok we have a socket, bind it to the port */
6919 	memset(&sin, 0, sizeof(sin));
6920 	sin.sin_len = sizeof(sin);
6921 	sin.sin_family = AF_INET;
6922 	sin.sin_port = htons(port);
6923 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6924 	if (ret) {
6925 		/* Close up we cant get the port */
6926 exit_stage_left:
6927 		sctp_over_udp_stop();
6928 		return (ret);
6929 	}
6930 	/*
6931 	 * Ok we should now get UDP packets directly to our input routine
6932 	 * sctp_recv_upd_tunneled_packet().
6933 	 */
6934 	return (0);
6935 }
6936