xref: /freebsd/sys/netinet/sctputil.c (revision eb6d21b4ca6d668cf89afd99eef7baeafa712197)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 
52 #define NUMBER_OF_MTU_SIZES 18
53 
54 
55 #if defined(__Windows__) && !defined(SCTP_LOCAL_TRACE_BUF)
56 #include "eventrace_netinet.h"
57 #include "sctputil.tmh"		/* this is the file that will be auto
58 				 * generated */
59 #else
60 #ifndef KTR_SCTP
61 #define KTR_SCTP KTR_SUBSYS
62 #endif
63 #endif
64 
65 void
66 sctp_sblog(struct sockbuf *sb,
67     struct sctp_tcb *stcb, int from, int incr)
68 {
69 	struct sctp_cwnd_log sctp_clog;
70 
71 	sctp_clog.x.sb.stcb = stcb;
72 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
73 	if (stcb)
74 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
75 	else
76 		sctp_clog.x.sb.stcb_sbcc = 0;
77 	sctp_clog.x.sb.incr = incr;
78 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
79 	    SCTP_LOG_EVENT_SB,
80 	    from,
81 	    sctp_clog.x.misc.log1,
82 	    sctp_clog.x.misc.log2,
83 	    sctp_clog.x.misc.log3,
84 	    sctp_clog.x.misc.log4);
85 }
86 
87 void
88 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
89 {
90 	struct sctp_cwnd_log sctp_clog;
91 
92 	sctp_clog.x.close.inp = (void *)inp;
93 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
94 	if (stcb) {
95 		sctp_clog.x.close.stcb = (void *)stcb;
96 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
97 	} else {
98 		sctp_clog.x.close.stcb = 0;
99 		sctp_clog.x.close.state = 0;
100 	}
101 	sctp_clog.x.close.loc = loc;
102 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
103 	    SCTP_LOG_EVENT_CLOSE,
104 	    0,
105 	    sctp_clog.x.misc.log1,
106 	    sctp_clog.x.misc.log2,
107 	    sctp_clog.x.misc.log3,
108 	    sctp_clog.x.misc.log4);
109 }
110 
111 
112 void
113 rto_logging(struct sctp_nets *net, int from)
114 {
115 	struct sctp_cwnd_log sctp_clog;
116 
117 	memset(&sctp_clog, 0, sizeof(sctp_clog));
118 	sctp_clog.x.rto.net = (void *)net;
119 	sctp_clog.x.rto.rtt = net->prev_rtt;
120 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
121 	    SCTP_LOG_EVENT_RTT,
122 	    from,
123 	    sctp_clog.x.misc.log1,
124 	    sctp_clog.x.misc.log2,
125 	    sctp_clog.x.misc.log3,
126 	    sctp_clog.x.misc.log4);
127 
128 }
129 
130 void
131 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
132 {
133 	struct sctp_cwnd_log sctp_clog;
134 
135 	sctp_clog.x.strlog.stcb = stcb;
136 	sctp_clog.x.strlog.n_tsn = tsn;
137 	sctp_clog.x.strlog.n_sseq = sseq;
138 	sctp_clog.x.strlog.e_tsn = 0;
139 	sctp_clog.x.strlog.e_sseq = 0;
140 	sctp_clog.x.strlog.strm = stream;
141 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
142 	    SCTP_LOG_EVENT_STRM,
143 	    from,
144 	    sctp_clog.x.misc.log1,
145 	    sctp_clog.x.misc.log2,
146 	    sctp_clog.x.misc.log3,
147 	    sctp_clog.x.misc.log4);
148 
149 }
150 
151 void
152 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
153 {
154 	struct sctp_cwnd_log sctp_clog;
155 
156 	sctp_clog.x.nagle.stcb = (void *)stcb;
157 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
158 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
159 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
160 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
161 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
162 	    SCTP_LOG_EVENT_NAGLE,
163 	    action,
164 	    sctp_clog.x.misc.log1,
165 	    sctp_clog.x.misc.log2,
166 	    sctp_clog.x.misc.log3,
167 	    sctp_clog.x.misc.log4);
168 }
169 
170 
171 void
172 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
173 {
174 	struct sctp_cwnd_log sctp_clog;
175 
176 	sctp_clog.x.sack.cumack = cumack;
177 	sctp_clog.x.sack.oldcumack = old_cumack;
178 	sctp_clog.x.sack.tsn = tsn;
179 	sctp_clog.x.sack.numGaps = gaps;
180 	sctp_clog.x.sack.numDups = dups;
181 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
182 	    SCTP_LOG_EVENT_SACK,
183 	    from,
184 	    sctp_clog.x.misc.log1,
185 	    sctp_clog.x.misc.log2,
186 	    sctp_clog.x.misc.log3,
187 	    sctp_clog.x.misc.log4);
188 }
189 
190 void
191 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
192 {
193 	struct sctp_cwnd_log sctp_clog;
194 
195 	memset(&sctp_clog, 0, sizeof(sctp_clog));
196 	sctp_clog.x.map.base = map;
197 	sctp_clog.x.map.cum = cum;
198 	sctp_clog.x.map.high = high;
199 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
200 	    SCTP_LOG_EVENT_MAP,
201 	    from,
202 	    sctp_clog.x.misc.log1,
203 	    sctp_clog.x.misc.log2,
204 	    sctp_clog.x.misc.log3,
205 	    sctp_clog.x.misc.log4);
206 }
207 
208 void
209 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
210     int from)
211 {
212 	struct sctp_cwnd_log sctp_clog;
213 
214 	memset(&sctp_clog, 0, sizeof(sctp_clog));
215 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
216 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
217 	sctp_clog.x.fr.tsn = tsn;
218 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
219 	    SCTP_LOG_EVENT_FR,
220 	    from,
221 	    sctp_clog.x.misc.log1,
222 	    sctp_clog.x.misc.log2,
223 	    sctp_clog.x.misc.log3,
224 	    sctp_clog.x.misc.log4);
225 
226 }
227 
228 
229 void
230 sctp_log_mb(struct mbuf *m, int from)
231 {
232 	struct sctp_cwnd_log sctp_clog;
233 
234 	sctp_clog.x.mb.mp = m;
235 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
236 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
237 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
238 	if (SCTP_BUF_IS_EXTENDED(m)) {
239 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
240 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
241 	} else {
242 		sctp_clog.x.mb.ext = 0;
243 		sctp_clog.x.mb.refcnt = 0;
244 	}
245 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
246 	    SCTP_LOG_EVENT_MBUF,
247 	    from,
248 	    sctp_clog.x.misc.log1,
249 	    sctp_clog.x.misc.log2,
250 	    sctp_clog.x.misc.log3,
251 	    sctp_clog.x.misc.log4);
252 }
253 
254 
255 void
256 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
257     int from)
258 {
259 	struct sctp_cwnd_log sctp_clog;
260 
261 	if (control == NULL) {
262 		SCTP_PRINTF("Gak log of NULL?\n");
263 		return;
264 	}
265 	sctp_clog.x.strlog.stcb = control->stcb;
266 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
267 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
268 	sctp_clog.x.strlog.strm = control->sinfo_stream;
269 	if (poschk != NULL) {
270 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
271 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
272 	} else {
273 		sctp_clog.x.strlog.e_tsn = 0;
274 		sctp_clog.x.strlog.e_sseq = 0;
275 	}
276 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
277 	    SCTP_LOG_EVENT_STRM,
278 	    from,
279 	    sctp_clog.x.misc.log1,
280 	    sctp_clog.x.misc.log2,
281 	    sctp_clog.x.misc.log3,
282 	    sctp_clog.x.misc.log4);
283 
284 }
285 
286 void
287 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
288 {
289 	struct sctp_cwnd_log sctp_clog;
290 
291 	sctp_clog.x.cwnd.net = net;
292 	if (stcb->asoc.send_queue_cnt > 255)
293 		sctp_clog.x.cwnd.cnt_in_send = 255;
294 	else
295 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
296 	if (stcb->asoc.stream_queue_cnt > 255)
297 		sctp_clog.x.cwnd.cnt_in_str = 255;
298 	else
299 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
300 
301 	if (net) {
302 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
303 		sctp_clog.x.cwnd.inflight = net->flight_size;
304 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
305 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
306 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
307 	}
308 	if (SCTP_CWNDLOG_PRESEND == from) {
309 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
310 	}
311 	sctp_clog.x.cwnd.cwnd_augment = augment;
312 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
313 	    SCTP_LOG_EVENT_CWND,
314 	    from,
315 	    sctp_clog.x.misc.log1,
316 	    sctp_clog.x.misc.log2,
317 	    sctp_clog.x.misc.log3,
318 	    sctp_clog.x.misc.log4);
319 
320 }
321 
322 void
323 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
324 {
325 	struct sctp_cwnd_log sctp_clog;
326 
327 	memset(&sctp_clog, 0, sizeof(sctp_clog));
328 	if (inp) {
329 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
330 
331 	} else {
332 		sctp_clog.x.lock.sock = (void *)NULL;
333 	}
334 	sctp_clog.x.lock.inp = (void *)inp;
335 	if (stcb) {
336 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
337 	} else {
338 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	if (inp) {
341 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
342 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
343 	} else {
344 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
345 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
346 	}
347 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
348 	if (inp->sctp_socket) {
349 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
350 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
351 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
352 	} else {
353 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
354 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
355 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
356 	}
357 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
358 	    SCTP_LOG_LOCK_EVENT,
359 	    from,
360 	    sctp_clog.x.misc.log1,
361 	    sctp_clog.x.misc.log2,
362 	    sctp_clog.x.misc.log3,
363 	    sctp_clog.x.misc.log4);
364 
365 }
366 
367 void
368 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
369 {
370 	struct sctp_cwnd_log sctp_clog;
371 
372 	memset(&sctp_clog, 0, sizeof(sctp_clog));
373 	sctp_clog.x.cwnd.net = net;
374 	sctp_clog.x.cwnd.cwnd_new_value = error;
375 	sctp_clog.x.cwnd.inflight = net->flight_size;
376 	sctp_clog.x.cwnd.cwnd_augment = burst;
377 	if (stcb->asoc.send_queue_cnt > 255)
378 		sctp_clog.x.cwnd.cnt_in_send = 255;
379 	else
380 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
381 	if (stcb->asoc.stream_queue_cnt > 255)
382 		sctp_clog.x.cwnd.cnt_in_str = 255;
383 	else
384 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
385 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
386 	    SCTP_LOG_EVENT_MAXBURST,
387 	    from,
388 	    sctp_clog.x.misc.log1,
389 	    sctp_clog.x.misc.log2,
390 	    sctp_clog.x.misc.log3,
391 	    sctp_clog.x.misc.log4);
392 
393 }
394 
395 void
396 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
397 {
398 	struct sctp_cwnd_log sctp_clog;
399 
400 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
401 	sctp_clog.x.rwnd.send_size = snd_size;
402 	sctp_clog.x.rwnd.overhead = overhead;
403 	sctp_clog.x.rwnd.new_rwnd = 0;
404 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
405 	    SCTP_LOG_EVENT_RWND,
406 	    from,
407 	    sctp_clog.x.misc.log1,
408 	    sctp_clog.x.misc.log2,
409 	    sctp_clog.x.misc.log3,
410 	    sctp_clog.x.misc.log4);
411 }
412 
413 void
414 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
415 {
416 	struct sctp_cwnd_log sctp_clog;
417 
418 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
419 	sctp_clog.x.rwnd.send_size = flight_size;
420 	sctp_clog.x.rwnd.overhead = overhead;
421 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
422 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
423 	    SCTP_LOG_EVENT_RWND,
424 	    from,
425 	    sctp_clog.x.misc.log1,
426 	    sctp_clog.x.misc.log2,
427 	    sctp_clog.x.misc.log3,
428 	    sctp_clog.x.misc.log4);
429 }
430 
431 void
432 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
433 {
434 	struct sctp_cwnd_log sctp_clog;
435 
436 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
437 	sctp_clog.x.mbcnt.size_change = book;
438 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
439 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
440 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
441 	    SCTP_LOG_EVENT_MBCNT,
442 	    from,
443 	    sctp_clog.x.misc.log1,
444 	    sctp_clog.x.misc.log2,
445 	    sctp_clog.x.misc.log3,
446 	    sctp_clog.x.misc.log4);
447 
448 }
449 
450 void
451 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
452 {
453 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
454 	    SCTP_LOG_MISC_EVENT,
455 	    from,
456 	    a, b, c, d);
457 }
458 
459 void
460 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
461 {
462 	struct sctp_cwnd_log sctp_clog;
463 
464 	sctp_clog.x.wake.stcb = (void *)stcb;
465 	sctp_clog.x.wake.wake_cnt = wake_cnt;
466 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
467 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
468 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
469 
470 	if (stcb->asoc.stream_queue_cnt < 0xff)
471 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
472 	else
473 		sctp_clog.x.wake.stream_qcnt = 0xff;
474 
475 	if (stcb->asoc.chunks_on_out_queue < 0xff)
476 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
477 	else
478 		sctp_clog.x.wake.chunks_on_oque = 0xff;
479 
480 	sctp_clog.x.wake.sctpflags = 0;
481 	/* set in the defered mode stuff */
482 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
483 		sctp_clog.x.wake.sctpflags |= 1;
484 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
485 		sctp_clog.x.wake.sctpflags |= 2;
486 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
487 		sctp_clog.x.wake.sctpflags |= 4;
488 	/* what about the sb */
489 	if (stcb->sctp_socket) {
490 		struct socket *so = stcb->sctp_socket;
491 
492 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
493 	} else {
494 		sctp_clog.x.wake.sbflags = 0xff;
495 	}
496 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
497 	    SCTP_LOG_EVENT_WAKE,
498 	    from,
499 	    sctp_clog.x.misc.log1,
500 	    sctp_clog.x.misc.log2,
501 	    sctp_clog.x.misc.log3,
502 	    sctp_clog.x.misc.log4);
503 
504 }
505 
506 void
507 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
508 {
509 	struct sctp_cwnd_log sctp_clog;
510 
511 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
512 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
513 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
514 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
515 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
516 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
517 	sctp_clog.x.blk.sndlen = sendlen;
518 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
519 	    SCTP_LOG_EVENT_BLOCK,
520 	    from,
521 	    sctp_clog.x.misc.log1,
522 	    sctp_clog.x.misc.log2,
523 	    sctp_clog.x.misc.log3,
524 	    sctp_clog.x.misc.log4);
525 
526 }
527 
528 int
529 sctp_fill_stat_log(void *optval, size_t *optsize)
530 {
531 	/* May need to fix this if ktrdump does not work */
532 	return (0);
533 }
534 
535 #ifdef SCTP_AUDITING_ENABLED
536 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
537 static int sctp_audit_indx = 0;
538 
539 static
540 void
541 sctp_print_audit_report(void)
542 {
543 	int i;
544 	int cnt;
545 
546 	cnt = 0;
547 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
548 		if ((sctp_audit_data[i][0] == 0xe0) &&
549 		    (sctp_audit_data[i][1] == 0x01)) {
550 			cnt = 0;
551 			SCTP_PRINTF("\n");
552 		} else if (sctp_audit_data[i][0] == 0xf0) {
553 			cnt = 0;
554 			SCTP_PRINTF("\n");
555 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
556 		    (sctp_audit_data[i][1] == 0x01)) {
557 			SCTP_PRINTF("\n");
558 			cnt = 0;
559 		}
560 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
561 		    (uint32_t) sctp_audit_data[i][1]);
562 		cnt++;
563 		if ((cnt % 14) == 0)
564 			SCTP_PRINTF("\n");
565 	}
566 	for (i = 0; i < sctp_audit_indx; i++) {
567 		if ((sctp_audit_data[i][0] == 0xe0) &&
568 		    (sctp_audit_data[i][1] == 0x01)) {
569 			cnt = 0;
570 			SCTP_PRINTF("\n");
571 		} else if (sctp_audit_data[i][0] == 0xf0) {
572 			cnt = 0;
573 			SCTP_PRINTF("\n");
574 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
575 		    (sctp_audit_data[i][1] == 0x01)) {
576 			SCTP_PRINTF("\n");
577 			cnt = 0;
578 		}
579 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
580 		    (uint32_t) sctp_audit_data[i][1]);
581 		cnt++;
582 		if ((cnt % 14) == 0)
583 			SCTP_PRINTF("\n");
584 	}
585 	SCTP_PRINTF("\n");
586 }
587 
588 void
589 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
590     struct sctp_nets *net)
591 {
592 	int resend_cnt, tot_out, rep, tot_book_cnt;
593 	struct sctp_nets *lnet;
594 	struct sctp_tmit_chunk *chk;
595 
596 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
597 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
598 	sctp_audit_indx++;
599 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
600 		sctp_audit_indx = 0;
601 	}
602 	if (inp == NULL) {
603 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
604 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
605 		sctp_audit_indx++;
606 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 			sctp_audit_indx = 0;
608 		}
609 		return;
610 	}
611 	if (stcb == NULL) {
612 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
613 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
614 		sctp_audit_indx++;
615 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
616 			sctp_audit_indx = 0;
617 		}
618 		return;
619 	}
620 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
621 	sctp_audit_data[sctp_audit_indx][1] =
622 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
623 	sctp_audit_indx++;
624 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625 		sctp_audit_indx = 0;
626 	}
627 	rep = 0;
628 	tot_book_cnt = 0;
629 	resend_cnt = tot_out = 0;
630 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
631 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
632 			resend_cnt++;
633 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
634 			tot_out += chk->book_size;
635 			tot_book_cnt++;
636 		}
637 	}
638 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
639 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
640 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
641 		sctp_audit_indx++;
642 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
643 			sctp_audit_indx = 0;
644 		}
645 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
646 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
647 		rep = 1;
648 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
649 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
650 		sctp_audit_data[sctp_audit_indx][1] =
651 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
652 		sctp_audit_indx++;
653 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
654 			sctp_audit_indx = 0;
655 		}
656 	}
657 	if (tot_out != stcb->asoc.total_flight) {
658 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
659 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
660 		sctp_audit_indx++;
661 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
662 			sctp_audit_indx = 0;
663 		}
664 		rep = 1;
665 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
666 		    (int)stcb->asoc.total_flight);
667 		stcb->asoc.total_flight = tot_out;
668 	}
669 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
670 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
671 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
672 		sctp_audit_indx++;
673 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
674 			sctp_audit_indx = 0;
675 		}
676 		rep = 1;
677 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
678 
679 		stcb->asoc.total_flight_count = tot_book_cnt;
680 	}
681 	tot_out = 0;
682 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
683 		tot_out += lnet->flight_size;
684 	}
685 	if (tot_out != stcb->asoc.total_flight) {
686 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
687 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
688 		sctp_audit_indx++;
689 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
690 			sctp_audit_indx = 0;
691 		}
692 		rep = 1;
693 		SCTP_PRINTF("real flight:%d net total was %d\n",
694 		    stcb->asoc.total_flight, tot_out);
695 		/* now corrective action */
696 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
697 
698 			tot_out = 0;
699 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
700 				if ((chk->whoTo == lnet) &&
701 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
702 					tot_out += chk->book_size;
703 				}
704 			}
705 			if (lnet->flight_size != tot_out) {
706 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
707 				    (uint32_t) lnet, lnet->flight_size,
708 				    tot_out);
709 				lnet->flight_size = tot_out;
710 			}
711 		}
712 	}
713 	if (rep) {
714 		sctp_print_audit_report();
715 	}
716 }
717 
718 void
719 sctp_audit_log(uint8_t ev, uint8_t fd)
720 {
721 
722 	sctp_audit_data[sctp_audit_indx][0] = ev;
723 	sctp_audit_data[sctp_audit_indx][1] = fd;
724 	sctp_audit_indx++;
725 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
726 		sctp_audit_indx = 0;
727 	}
728 }
729 
730 #endif
731 
732 /*
733  * a list of sizes based on typical mtu's, used only if next hop size not
734  * returned.
735  */
736 static int sctp_mtu_sizes[] = {
737 	68,
738 	296,
739 	508,
740 	512,
741 	544,
742 	576,
743 	1006,
744 	1492,
745 	1500,
746 	1536,
747 	2002,
748 	2048,
749 	4352,
750 	4464,
751 	8166,
752 	17914,
753 	32000,
754 	65535
755 };
756 
757 void
758 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
759 {
760 	struct sctp_association *asoc;
761 	struct sctp_nets *net;
762 
763 	asoc = &stcb->asoc;
764 
765 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
766 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
767 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
768 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
769 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
770 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
771 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
772 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
773 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
774 	}
775 }
776 
777 int
778 find_next_best_mtu(int totsz)
779 {
780 	int i, perfer;
781 
782 	/*
783 	 * if we are in here we must find the next best fit based on the
784 	 * size of the dg that failed to be sent.
785 	 */
786 	perfer = 0;
787 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
788 		if (totsz < sctp_mtu_sizes[i]) {
789 			perfer = i - 1;
790 			if (perfer < 0)
791 				perfer = 0;
792 			break;
793 		}
794 	}
795 	return (sctp_mtu_sizes[perfer]);
796 }
797 
798 void
799 sctp_fill_random_store(struct sctp_pcb *m)
800 {
801 	/*
802 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
803 	 * our counter. The result becomes our good random numbers and we
804 	 * then setup to give these out. Note that we do no locking to
805 	 * protect this. This is ok, since if competing folks call this we
806 	 * will get more gobbled gook in the random store which is what we
807 	 * want. There is a danger that two guys will use the same random
808 	 * numbers, but thats ok too since that is random as well :->
809 	 */
810 	m->store_at = 0;
811 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
812 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
813 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
814 	m->random_counter++;
815 }
816 
817 uint32_t
818 sctp_select_initial_TSN(struct sctp_pcb *inp)
819 {
820 	/*
821 	 * A true implementation should use random selection process to get
822 	 * the initial stream sequence number, using RFC1750 as a good
823 	 * guideline
824 	 */
825 	uint32_t x, *xp;
826 	uint8_t *p;
827 	int store_at, new_store;
828 
829 	if (inp->initial_sequence_debug != 0) {
830 		uint32_t ret;
831 
832 		ret = inp->initial_sequence_debug;
833 		inp->initial_sequence_debug++;
834 		return (ret);
835 	}
836 retry:
837 	store_at = inp->store_at;
838 	new_store = store_at + sizeof(uint32_t);
839 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
840 		new_store = 0;
841 	}
842 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
843 		goto retry;
844 	}
845 	if (new_store == 0) {
846 		/* Refill the random store */
847 		sctp_fill_random_store(inp);
848 	}
849 	p = &inp->random_store[store_at];
850 	xp = (uint32_t *) p;
851 	x = *xp;
852 	return (x);
853 }
854 
855 uint32_t
856 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
857 {
858 	u_long x, not_done;
859 	struct timeval now;
860 
861 	(void)SCTP_GETTIME_TIMEVAL(&now);
862 	not_done = 1;
863 	while (not_done) {
864 		x = sctp_select_initial_TSN(&inp->sctp_ep);
865 		if (x == 0) {
866 			/* we never use 0 */
867 			continue;
868 		}
869 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
870 			not_done = 0;
871 		}
872 	}
873 	return (x);
874 }
875 
876 int
877 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
878     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
879 {
880 	struct sctp_association *asoc;
881 
882 	/*
883 	 * Anything set to zero is taken care of by the allocation routine's
884 	 * bzero
885 	 */
886 
887 	/*
888 	 * Up front select what scoping to apply on addresses I tell my peer
889 	 * Not sure what to do with these right now, we will need to come up
890 	 * with a way to set them. We may need to pass them through from the
891 	 * caller in the sctp_aloc_assoc() function.
892 	 */
893 	int i;
894 
895 	asoc = &stcb->asoc;
896 	/* init all variables to a known value. */
897 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
898 	asoc->max_burst = m->sctp_ep.max_burst;
899 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
900 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
901 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
902 	/* EY Init nr_sack variable */
903 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
904 	/* JRS 5/21/07 - Init CMT PF variables */
905 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
906 	asoc->sctp_frag_point = m->sctp_frag_point;
907 #ifdef INET
908 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
909 #else
910 	asoc->default_tos = 0;
911 #endif
912 
913 #ifdef INET6
914 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
915 #else
916 	asoc->default_flowlabel = 0;
917 #endif
918 	asoc->sb_send_resv = 0;
919 	if (override_tag) {
920 		asoc->my_vtag = override_tag;
921 	} else {
922 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
923 	}
924 	/* Get the nonce tags */
925 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
926 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
927 	asoc->vrf_id = vrf_id;
928 
929 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
930 		asoc->hb_is_disabled = 1;
931 	else
932 		asoc->hb_is_disabled = 0;
933 
934 #ifdef SCTP_ASOCLOG_OF_TSNS
935 	asoc->tsn_in_at = 0;
936 	asoc->tsn_out_at = 0;
937 	asoc->tsn_in_wrapped = 0;
938 	asoc->tsn_out_wrapped = 0;
939 	asoc->cumack_log_at = 0;
940 	asoc->cumack_log_atsnt = 0;
941 #endif
942 #ifdef SCTP_FS_SPEC_LOG
943 	asoc->fs_index = 0;
944 #endif
945 	asoc->refcnt = 0;
946 	asoc->assoc_up_sent = 0;
947 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
948 	    sctp_select_initial_TSN(&m->sctp_ep);
949 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
950 	/* we are optimisitic here */
951 	asoc->peer_supports_pktdrop = 1;
952 	asoc->peer_supports_nat = 0;
953 	asoc->sent_queue_retran_cnt = 0;
954 
955 	/* for CMT */
956 	asoc->last_net_cmt_send_started = NULL;
957 
958 	/* This will need to be adjusted */
959 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
960 	asoc->last_acked_seq = asoc->init_seq_number - 1;
961 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
962 	asoc->asconf_seq_in = asoc->last_acked_seq;
963 
964 	/* here we are different, we hold the next one we expect */
965 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
966 
967 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
968 	asoc->initial_rto = m->sctp_ep.initial_rto;
969 
970 	asoc->max_init_times = m->sctp_ep.max_init_times;
971 	asoc->max_send_times = m->sctp_ep.max_send_times;
972 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
973 	asoc->free_chunk_cnt = 0;
974 
975 	asoc->iam_blocking = 0;
976 	/* ECN Nonce initialization */
977 	asoc->context = m->sctp_context;
978 	asoc->def_send = m->def_send;
979 	asoc->ecn_nonce_allowed = 0;
980 	asoc->receiver_nonce_sum = 1;
981 	asoc->nonce_sum_expect_base = 1;
982 	asoc->nonce_sum_check = 1;
983 	asoc->nonce_resync_tsn = 0;
984 	asoc->nonce_wait_for_ecne = 0;
985 	asoc->nonce_wait_tsn = 0;
986 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
987 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
988 	asoc->pr_sctp_cnt = 0;
989 	asoc->total_output_queue_size = 0;
990 
991 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
992 		struct in6pcb *inp6;
993 
994 		/* Its a V6 socket */
995 		inp6 = (struct in6pcb *)m;
996 		asoc->ipv6_addr_legal = 1;
997 		/* Now look at the binding flag to see if V4 will be legal */
998 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
999 			asoc->ipv4_addr_legal = 1;
1000 		} else {
1001 			/* V4 addresses are NOT legal on the association */
1002 			asoc->ipv4_addr_legal = 0;
1003 		}
1004 	} else {
1005 		/* Its a V4 socket, no - V6 */
1006 		asoc->ipv4_addr_legal = 1;
1007 		asoc->ipv6_addr_legal = 0;
1008 	}
1009 
1010 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1011 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1012 
1013 	asoc->smallest_mtu = m->sctp_frag_point;
1014 #ifdef SCTP_PRINT_FOR_B_AND_M
1015 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1016 	    asoc->smallest_mtu);
1017 #endif
1018 	asoc->minrto = m->sctp_ep.sctp_minrto;
1019 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1020 
1021 	asoc->locked_on_sending = NULL;
1022 	asoc->stream_locked_on = 0;
1023 	asoc->ecn_echo_cnt_onq = 0;
1024 	asoc->stream_locked = 0;
1025 
1026 	asoc->send_sack = 1;
1027 
1028 	LIST_INIT(&asoc->sctp_restricted_addrs);
1029 
1030 	TAILQ_INIT(&asoc->nets);
1031 	TAILQ_INIT(&asoc->pending_reply_queue);
1032 	TAILQ_INIT(&asoc->asconf_ack_sent);
1033 	/* Setup to fill the hb random cache at first HB */
1034 	asoc->hb_random_idx = 4;
1035 
1036 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1037 
1038 	/*
1039 	 * JRS - Pick the default congestion control module based on the
1040 	 * sysctl.
1041 	 */
1042 	switch (m->sctp_ep.sctp_default_cc_module) {
1043 		/* JRS - Standard TCP congestion control */
1044 	case SCTP_CC_RFC2581:
1045 		{
1046 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1047 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1048 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1049 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1050 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1051 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1052 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1053 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1054 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1055 			break;
1056 		}
1057 		/* JRS - High Speed TCP congestion control (Floyd) */
1058 	case SCTP_CC_HSTCP:
1059 		{
1060 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1061 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1063 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1064 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1065 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1066 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1067 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1068 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1069 			break;
1070 		}
1071 		/* JRS - HTCP congestion control */
1072 	case SCTP_CC_HTCP:
1073 		{
1074 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1075 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1077 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1078 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1079 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1080 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1081 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1082 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1083 			break;
1084 		}
1085 		/* JRS - By default, use RFC2581 */
1086 	default:
1087 		{
1088 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1089 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1091 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1092 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1093 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1094 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1095 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1096 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1097 			break;
1098 		}
1099 	}
1100 
1101 	/*
1102 	 * Now the stream parameters, here we allocate space for all streams
1103 	 * that we request by default.
1104 	 */
1105 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1106 	    m->sctp_ep.pre_open_stream_count;
1107 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1108 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1109 	    SCTP_M_STRMO);
1110 	if (asoc->strmout == NULL) {
1111 		/* big trouble no memory */
1112 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1113 		return (ENOMEM);
1114 	}
1115 	for (i = 0; i < asoc->streamoutcnt; i++) {
1116 		/*
1117 		 * inbound side must be set to 0xffff, also NOTE when we get
1118 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1119 		 * count (streamoutcnt) but first check if we sent to any of
1120 		 * the upper streams that were dropped (if some were). Those
1121 		 * that were dropped must be notified to the upper layer as
1122 		 * failed to send.
1123 		 */
1124 		asoc->strmout[i].next_sequence_sent = 0x0;
1125 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1126 		asoc->strmout[i].stream_no = i;
1127 		asoc->strmout[i].last_msg_incomplete = 0;
1128 		asoc->strmout[i].next_spoke.tqe_next = 0;
1129 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1130 	}
1131 	/* Now the mapping array */
1132 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1133 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1134 	    SCTP_M_MAP);
1135 	if (asoc->mapping_array == NULL) {
1136 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1137 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1138 		return (ENOMEM);
1139 	}
1140 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1141 	/* EY  - initialize the nr_mapping_array just like mapping array */
1142 	asoc->nr_mapping_array_size = SCTP_INITIAL_NR_MAPPING_ARRAY;
1143 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->nr_mapping_array_size,
1144 	    SCTP_M_MAP);
1145 	if (asoc->nr_mapping_array == NULL) {
1146 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1147 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1148 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1149 		return (ENOMEM);
1150 	}
1151 	memset(asoc->nr_mapping_array, 0, asoc->nr_mapping_array_size);
1152 
1153 	/* Now the init of the other outqueues */
1154 	TAILQ_INIT(&asoc->free_chunks);
1155 	TAILQ_INIT(&asoc->out_wheel);
1156 	TAILQ_INIT(&asoc->control_send_queue);
1157 	TAILQ_INIT(&asoc->asconf_send_queue);
1158 	TAILQ_INIT(&asoc->send_queue);
1159 	TAILQ_INIT(&asoc->sent_queue);
1160 	TAILQ_INIT(&asoc->reasmqueue);
1161 	TAILQ_INIT(&asoc->resetHead);
1162 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1163 	TAILQ_INIT(&asoc->asconf_queue);
1164 	/* authentication fields */
1165 	asoc->authinfo.random = NULL;
1166 	asoc->authinfo.active_keyid = 0;
1167 	asoc->authinfo.assoc_key = NULL;
1168 	asoc->authinfo.assoc_keyid = 0;
1169 	asoc->authinfo.recv_key = NULL;
1170 	asoc->authinfo.recv_keyid = 0;
1171 	LIST_INIT(&asoc->shared_keys);
1172 	asoc->marked_retrans = 0;
1173 	asoc->timoinit = 0;
1174 	asoc->timodata = 0;
1175 	asoc->timosack = 0;
1176 	asoc->timoshutdown = 0;
1177 	asoc->timoheartbeat = 0;
1178 	asoc->timocookie = 0;
1179 	asoc->timoshutdownack = 0;
1180 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1181 	asoc->discontinuity_time = asoc->start_time;
1182 	/*
1183 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1184 	 * freed later whe the association is freed.
1185 	 */
1186 	return (0);
1187 }
1188 
1189 int
1190 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1191 {
1192 	/* mapping array needs to grow */
1193 	uint8_t *new_array;
1194 	uint32_t new_size;
1195 
1196 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1197 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1198 	if (new_array == NULL) {
1199 		/* can't get more, forget it */
1200 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1201 		    new_size);
1202 		return (-1);
1203 	}
1204 	memset(new_array, 0, new_size);
1205 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1206 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1207 	asoc->mapping_array = new_array;
1208 	asoc->mapping_array_size = new_size;
1209 	if (asoc->peer_supports_nr_sack) {
1210 		new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
1211 		SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1212 		if (new_array == NULL) {
1213 			/* can't get more, forget it */
1214 			SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1215 			    new_size);
1216 			return (-1);
1217 		}
1218 		memset(new_array, 0, new_size);
1219 		memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
1220 		SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1221 		asoc->nr_mapping_array = new_array;
1222 		asoc->nr_mapping_array_size = new_size;
1223 	}
1224 	return (0);
1225 }
1226 
1227 
1228 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1229 static void
1230 sctp_iterator_work(struct sctp_iterator *it)
1231 {
1232 	int iteration_count = 0;
1233 	int inp_skip = 0;
1234 
1235 	SCTP_ITERATOR_LOCK();
1236 	if (it->inp) {
1237 		SCTP_INP_DECR_REF(it->inp);
1238 	}
1239 	if (it->inp == NULL) {
1240 		/* iterator is complete */
1241 done_with_iterator:
1242 		SCTP_ITERATOR_UNLOCK();
1243 		if (it->function_atend != NULL) {
1244 			(*it->function_atend) (it->pointer, it->val);
1245 		}
1246 		SCTP_FREE(it, SCTP_M_ITER);
1247 		return;
1248 	}
1249 select_a_new_ep:
1250 	SCTP_INP_WLOCK(it->inp);
1251 	while (((it->pcb_flags) &&
1252 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1253 	    ((it->pcb_features) &&
1254 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1255 		/* endpoint flags or features don't match, so keep looking */
1256 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1257 			SCTP_INP_WUNLOCK(it->inp);
1258 			goto done_with_iterator;
1259 		}
1260 		SCTP_INP_WUNLOCK(it->inp);
1261 		it->inp = LIST_NEXT(it->inp, sctp_list);
1262 		if (it->inp == NULL) {
1263 			goto done_with_iterator;
1264 		}
1265 		SCTP_INP_WLOCK(it->inp);
1266 	}
1267 
1268 	SCTP_INP_WUNLOCK(it->inp);
1269 	SCTP_INP_RLOCK(it->inp);
1270 
1271 	/* now go through each assoc which is in the desired state */
1272 	if (it->done_current_ep == 0) {
1273 		if (it->function_inp != NULL)
1274 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1275 		it->done_current_ep = 1;
1276 	}
1277 	if (it->stcb == NULL) {
1278 		/* run the per instance function */
1279 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1280 	}
1281 	if ((inp_skip) || it->stcb == NULL) {
1282 		if (it->function_inp_end != NULL) {
1283 			inp_skip = (*it->function_inp_end) (it->inp,
1284 			    it->pointer,
1285 			    it->val);
1286 		}
1287 		SCTP_INP_RUNLOCK(it->inp);
1288 		goto no_stcb;
1289 	}
1290 	while (it->stcb) {
1291 		SCTP_TCB_LOCK(it->stcb);
1292 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1293 			/* not in the right state... keep looking */
1294 			SCTP_TCB_UNLOCK(it->stcb);
1295 			goto next_assoc;
1296 		}
1297 		/* see if we have limited out the iterator loop */
1298 		iteration_count++;
1299 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1300 			/* Pause to let others grab the lock */
1301 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1302 			SCTP_TCB_UNLOCK(it->stcb);
1303 
1304 			SCTP_INP_INCR_REF(it->inp);
1305 			SCTP_INP_RUNLOCK(it->inp);
1306 			SCTP_ITERATOR_UNLOCK();
1307 			SCTP_ITERATOR_LOCK();
1308 			SCTP_INP_RLOCK(it->inp);
1309 
1310 			SCTP_INP_DECR_REF(it->inp);
1311 			SCTP_TCB_LOCK(it->stcb);
1312 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1313 			iteration_count = 0;
1314 		}
1315 		/* run function on this one */
1316 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1317 
1318 		/*
1319 		 * we lie here, it really needs to have its own type but
1320 		 * first I must verify that this won't effect things :-0
1321 		 */
1322 		if (it->no_chunk_output == 0)
1323 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1324 
1325 		SCTP_TCB_UNLOCK(it->stcb);
1326 next_assoc:
1327 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1328 		if (it->stcb == NULL) {
1329 			/* Run last function */
1330 			if (it->function_inp_end != NULL) {
1331 				inp_skip = (*it->function_inp_end) (it->inp,
1332 				    it->pointer,
1333 				    it->val);
1334 			}
1335 		}
1336 	}
1337 	SCTP_INP_RUNLOCK(it->inp);
1338 no_stcb:
1339 	/* done with all assocs on this endpoint, move on to next endpoint */
1340 	it->done_current_ep = 0;
1341 	SCTP_INP_WLOCK(it->inp);
1342 	SCTP_INP_WUNLOCK(it->inp);
1343 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1344 		it->inp = NULL;
1345 	} else {
1346 		SCTP_INP_INFO_RLOCK();
1347 		it->inp = LIST_NEXT(it->inp, sctp_list);
1348 		SCTP_INP_INFO_RUNLOCK();
1349 	}
1350 	if (it->inp == NULL) {
1351 		goto done_with_iterator;
1352 	}
1353 	goto select_a_new_ep;
1354 }
1355 
1356 void
1357 sctp_iterator_worker(void)
1358 {
1359 	struct sctp_iterator *it = NULL;
1360 
1361 	/* This function is called with the WQ lock in place */
1362 
1363 	SCTP_BASE_INFO(iterator_running) = 1;
1364 again:
1365 	it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1366 	while (it) {
1367 		/* now lets work on this one */
1368 		TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1369 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1370 		sctp_iterator_work(it);
1371 		SCTP_IPI_ITERATOR_WQ_LOCK();
1372 		/* sa_ignore FREED_MEMORY */
1373 		it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1374 	}
1375 	if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
1376 		goto again;
1377 	}
1378 	SCTP_BASE_INFO(iterator_running) = 0;
1379 	return;
1380 }
1381 
1382 #endif
1383 
1384 
1385 static void
1386 sctp_handle_addr_wq(void)
1387 {
1388 	/* deal with the ADDR wq from the rtsock calls */
1389 	struct sctp_laddr *wi;
1390 	struct sctp_asconf_iterator *asc;
1391 
1392 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1393 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1394 	if (asc == NULL) {
1395 		/* Try later, no memory */
1396 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1397 		    (struct sctp_inpcb *)NULL,
1398 		    (struct sctp_tcb *)NULL,
1399 		    (struct sctp_nets *)NULL);
1400 		return;
1401 	}
1402 	LIST_INIT(&asc->list_of_work);
1403 	asc->cnt = 0;
1404 	SCTP_IPI_ITERATOR_WQ_LOCK();
1405 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1406 	while (wi != NULL) {
1407 		LIST_REMOVE(wi, sctp_nxt_addr);
1408 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1409 		asc->cnt++;
1410 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1411 	}
1412 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1413 	if (asc->cnt == 0) {
1414 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1415 	} else {
1416 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1417 		    sctp_asconf_iterator_stcb,
1418 		    NULL,	/* No ep end for boundall */
1419 		    SCTP_PCB_FLAGS_BOUNDALL,
1420 		    SCTP_PCB_ANY_FEATURES,
1421 		    SCTP_ASOC_ANY_STATE,
1422 		    (void *)asc, 0,
1423 		    sctp_asconf_iterator_end, NULL, 0);
1424 	}
1425 }
1426 
1427 int retcode = 0;
1428 int cur_oerr = 0;
1429 
1430 void
1431 sctp_timeout_handler(void *t)
1432 {
1433 	struct sctp_inpcb *inp;
1434 	struct sctp_tcb *stcb;
1435 	struct sctp_nets *net;
1436 	struct sctp_timer *tmr;
1437 
1438 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1439 	struct socket *so;
1440 
1441 #endif
1442 	int did_output, type;
1443 	struct sctp_iterator *it = NULL;
1444 
1445 	tmr = (struct sctp_timer *)t;
1446 	inp = (struct sctp_inpcb *)tmr->ep;
1447 	stcb = (struct sctp_tcb *)tmr->tcb;
1448 	net = (struct sctp_nets *)tmr->net;
1449 	CURVNET_SET((struct vnet *)tmr->vnet);
1450 	did_output = 1;
1451 
1452 #ifdef SCTP_AUDITING_ENABLED
1453 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1454 	sctp_auditing(3, inp, stcb, net);
1455 #endif
1456 
1457 	/* sanity checks... */
1458 	if (tmr->self != (void *)tmr) {
1459 		/*
1460 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1461 		 * tmr);
1462 		 */
1463 		CURVNET_RESTORE();
1464 		return;
1465 	}
1466 	tmr->stopped_from = 0xa001;
1467 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1468 		/*
1469 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1470 		 * tmr->type);
1471 		 */
1472 		CURVNET_RESTORE();
1473 		return;
1474 	}
1475 	tmr->stopped_from = 0xa002;
1476 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1477 		CURVNET_RESTORE();
1478 		return;
1479 	}
1480 	/* if this is an iterator timeout, get the struct and clear inp */
1481 	tmr->stopped_from = 0xa003;
1482 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1483 		it = (struct sctp_iterator *)inp;
1484 		inp = NULL;
1485 	}
1486 	type = tmr->type;
1487 	if (inp) {
1488 		SCTP_INP_INCR_REF(inp);
1489 		if ((inp->sctp_socket == 0) &&
1490 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1491 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1492 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1493 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1494 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1495 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1496 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1497 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1498 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1499 		    ) {
1500 			SCTP_INP_DECR_REF(inp);
1501 			CURVNET_RESTORE();
1502 			return;
1503 		}
1504 	}
1505 	tmr->stopped_from = 0xa004;
1506 	if (stcb) {
1507 		atomic_add_int(&stcb->asoc.refcnt, 1);
1508 		if (stcb->asoc.state == 0) {
1509 			atomic_add_int(&stcb->asoc.refcnt, -1);
1510 			if (inp) {
1511 				SCTP_INP_DECR_REF(inp);
1512 			}
1513 			CURVNET_RESTORE();
1514 			return;
1515 		}
1516 	}
1517 	tmr->stopped_from = 0xa005;
1518 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1519 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1520 		if (inp) {
1521 			SCTP_INP_DECR_REF(inp);
1522 		}
1523 		if (stcb) {
1524 			atomic_add_int(&stcb->asoc.refcnt, -1);
1525 		}
1526 		CURVNET_RESTORE();
1527 		return;
1528 	}
1529 	tmr->stopped_from = 0xa006;
1530 
1531 	if (stcb) {
1532 		SCTP_TCB_LOCK(stcb);
1533 		atomic_add_int(&stcb->asoc.refcnt, -1);
1534 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1535 		    ((stcb->asoc.state == 0) ||
1536 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1537 			SCTP_TCB_UNLOCK(stcb);
1538 			if (inp) {
1539 				SCTP_INP_DECR_REF(inp);
1540 			}
1541 			CURVNET_RESTORE();
1542 			return;
1543 		}
1544 	}
1545 	/* record in stopped what t-o occured */
1546 	tmr->stopped_from = tmr->type;
1547 
1548 	/* mark as being serviced now */
1549 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1550 		/*
1551 		 * Callout has been rescheduled.
1552 		 */
1553 		goto get_out;
1554 	}
1555 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1556 		/*
1557 		 * Not active, so no action.
1558 		 */
1559 		goto get_out;
1560 	}
1561 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1562 
1563 	/* call the handler for the appropriate timer type */
1564 	switch (tmr->type) {
1565 	case SCTP_TIMER_TYPE_ZERO_COPY:
1566 		if (inp == NULL) {
1567 			break;
1568 		}
1569 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1570 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1571 		}
1572 		break;
1573 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1574 		if (inp == NULL) {
1575 			break;
1576 		}
1577 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1578 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1579 		}
1580 		break;
1581 	case SCTP_TIMER_TYPE_ADDR_WQ:
1582 		sctp_handle_addr_wq();
1583 		break;
1584 	case SCTP_TIMER_TYPE_ITERATOR:
1585 		SCTP_STAT_INCR(sctps_timoiterator);
1586 		sctp_iterator_timer(it);
1587 		break;
1588 	case SCTP_TIMER_TYPE_SEND:
1589 		if ((stcb == NULL) || (inp == NULL)) {
1590 			break;
1591 		}
1592 		SCTP_STAT_INCR(sctps_timodata);
1593 		stcb->asoc.timodata++;
1594 		stcb->asoc.num_send_timers_up--;
1595 		if (stcb->asoc.num_send_timers_up < 0) {
1596 			stcb->asoc.num_send_timers_up = 0;
1597 		}
1598 		SCTP_TCB_LOCK_ASSERT(stcb);
1599 		cur_oerr = stcb->asoc.overall_error_count;
1600 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1601 		if (retcode) {
1602 			/* no need to unlock on tcb its gone */
1603 
1604 			goto out_decr;
1605 		}
1606 		SCTP_TCB_LOCK_ASSERT(stcb);
1607 #ifdef SCTP_AUDITING_ENABLED
1608 		sctp_auditing(4, inp, stcb, net);
1609 #endif
1610 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1611 		if ((stcb->asoc.num_send_timers_up == 0) &&
1612 		    (stcb->asoc.sent_queue_cnt > 0)
1613 		    ) {
1614 			struct sctp_tmit_chunk *chk;
1615 
1616 			/*
1617 			 * safeguard. If there on some on the sent queue
1618 			 * somewhere but no timers running something is
1619 			 * wrong... so we start a timer on the first chunk
1620 			 * on the send queue on whatever net it is sent to.
1621 			 */
1622 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1623 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1624 			    chk->whoTo);
1625 		}
1626 		break;
1627 	case SCTP_TIMER_TYPE_INIT:
1628 		if ((stcb == NULL) || (inp == NULL)) {
1629 			break;
1630 		}
1631 		SCTP_STAT_INCR(sctps_timoinit);
1632 		stcb->asoc.timoinit++;
1633 		if (sctp_t1init_timer(inp, stcb, net)) {
1634 			/* no need to unlock on tcb its gone */
1635 			goto out_decr;
1636 		}
1637 		/* We do output but not here */
1638 		did_output = 0;
1639 		break;
1640 	case SCTP_TIMER_TYPE_RECV:
1641 		if ((stcb == NULL) || (inp == NULL)) {
1642 			break;
1643 		} {
1644 			int abort_flag;
1645 
1646 			SCTP_STAT_INCR(sctps_timosack);
1647 			stcb->asoc.timosack++;
1648 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1649 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1650 
1651 			/*
1652 			 * EY if nr_sacks used then send an nr-sack , a sack
1653 			 * otherwise
1654 			 */
1655 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
1656 				sctp_send_nr_sack(stcb);
1657 			else
1658 				sctp_send_sack(stcb);
1659 		}
1660 #ifdef SCTP_AUDITING_ENABLED
1661 		sctp_auditing(4, inp, stcb, net);
1662 #endif
1663 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1664 		break;
1665 	case SCTP_TIMER_TYPE_SHUTDOWN:
1666 		if ((stcb == NULL) || (inp == NULL)) {
1667 			break;
1668 		}
1669 		if (sctp_shutdown_timer(inp, stcb, net)) {
1670 			/* no need to unlock on tcb its gone */
1671 			goto out_decr;
1672 		}
1673 		SCTP_STAT_INCR(sctps_timoshutdown);
1674 		stcb->asoc.timoshutdown++;
1675 #ifdef SCTP_AUDITING_ENABLED
1676 		sctp_auditing(4, inp, stcb, net);
1677 #endif
1678 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1679 		break;
1680 	case SCTP_TIMER_TYPE_HEARTBEAT:
1681 		{
1682 			struct sctp_nets *lnet;
1683 			int cnt_of_unconf = 0;
1684 
1685 			if ((stcb == NULL) || (inp == NULL)) {
1686 				break;
1687 			}
1688 			SCTP_STAT_INCR(sctps_timoheartbeat);
1689 			stcb->asoc.timoheartbeat++;
1690 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1691 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1692 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1693 					cnt_of_unconf++;
1694 				}
1695 			}
1696 			if (cnt_of_unconf == 0) {
1697 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1698 				    cnt_of_unconf)) {
1699 					/* no need to unlock on tcb its gone */
1700 					goto out_decr;
1701 				}
1702 			}
1703 #ifdef SCTP_AUDITING_ENABLED
1704 			sctp_auditing(4, inp, stcb, lnet);
1705 #endif
1706 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1707 			    stcb->sctp_ep, stcb, lnet);
1708 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1709 		}
1710 		break;
1711 	case SCTP_TIMER_TYPE_COOKIE:
1712 		if ((stcb == NULL) || (inp == NULL)) {
1713 			break;
1714 		}
1715 		if (sctp_cookie_timer(inp, stcb, net)) {
1716 			/* no need to unlock on tcb its gone */
1717 			goto out_decr;
1718 		}
1719 		SCTP_STAT_INCR(sctps_timocookie);
1720 		stcb->asoc.timocookie++;
1721 #ifdef SCTP_AUDITING_ENABLED
1722 		sctp_auditing(4, inp, stcb, net);
1723 #endif
1724 		/*
1725 		 * We consider T3 and Cookie timer pretty much the same with
1726 		 * respect to where from in chunk_output.
1727 		 */
1728 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1729 		break;
1730 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1731 		{
1732 			struct timeval tv;
1733 			int i, secret;
1734 
1735 			if (inp == NULL) {
1736 				break;
1737 			}
1738 			SCTP_STAT_INCR(sctps_timosecret);
1739 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1740 			SCTP_INP_WLOCK(inp);
1741 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1742 			inp->sctp_ep.last_secret_number =
1743 			    inp->sctp_ep.current_secret_number;
1744 			inp->sctp_ep.current_secret_number++;
1745 			if (inp->sctp_ep.current_secret_number >=
1746 			    SCTP_HOW_MANY_SECRETS) {
1747 				inp->sctp_ep.current_secret_number = 0;
1748 			}
1749 			secret = (int)inp->sctp_ep.current_secret_number;
1750 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1751 				inp->sctp_ep.secret_key[secret][i] =
1752 				    sctp_select_initial_TSN(&inp->sctp_ep);
1753 			}
1754 			SCTP_INP_WUNLOCK(inp);
1755 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1756 		}
1757 		did_output = 0;
1758 		break;
1759 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1760 		if ((stcb == NULL) || (inp == NULL)) {
1761 			break;
1762 		}
1763 		SCTP_STAT_INCR(sctps_timopathmtu);
1764 		sctp_pathmtu_timer(inp, stcb, net);
1765 		did_output = 0;
1766 		break;
1767 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1768 		if ((stcb == NULL) || (inp == NULL)) {
1769 			break;
1770 		}
1771 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1772 			/* no need to unlock on tcb its gone */
1773 			goto out_decr;
1774 		}
1775 		SCTP_STAT_INCR(sctps_timoshutdownack);
1776 		stcb->asoc.timoshutdownack++;
1777 #ifdef SCTP_AUDITING_ENABLED
1778 		sctp_auditing(4, inp, stcb, net);
1779 #endif
1780 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1781 		break;
1782 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1783 		if ((stcb == NULL) || (inp == NULL)) {
1784 			break;
1785 		}
1786 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1787 		sctp_abort_an_association(inp, stcb,
1788 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1789 		/* no need to unlock on tcb its gone */
1790 		goto out_decr;
1791 
1792 	case SCTP_TIMER_TYPE_STRRESET:
1793 		if ((stcb == NULL) || (inp == NULL)) {
1794 			break;
1795 		}
1796 		if (sctp_strreset_timer(inp, stcb, net)) {
1797 			/* no need to unlock on tcb its gone */
1798 			goto out_decr;
1799 		}
1800 		SCTP_STAT_INCR(sctps_timostrmrst);
1801 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1802 		break;
1803 	case SCTP_TIMER_TYPE_EARLYFR:
1804 		/* Need to do FR of things for net */
1805 		if ((stcb == NULL) || (inp == NULL)) {
1806 			break;
1807 		}
1808 		SCTP_STAT_INCR(sctps_timoearlyfr);
1809 		sctp_early_fr_timer(inp, stcb, net);
1810 		break;
1811 	case SCTP_TIMER_TYPE_ASCONF:
1812 		if ((stcb == NULL) || (inp == NULL)) {
1813 			break;
1814 		}
1815 		if (sctp_asconf_timer(inp, stcb, net)) {
1816 			/* no need to unlock on tcb its gone */
1817 			goto out_decr;
1818 		}
1819 		SCTP_STAT_INCR(sctps_timoasconf);
1820 #ifdef SCTP_AUDITING_ENABLED
1821 		sctp_auditing(4, inp, stcb, net);
1822 #endif
1823 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1824 		break;
1825 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1826 		if ((stcb == NULL) || (inp == NULL)) {
1827 			break;
1828 		}
1829 		sctp_delete_prim_timer(inp, stcb, net);
1830 		SCTP_STAT_INCR(sctps_timodelprim);
1831 		break;
1832 
1833 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1834 		if ((stcb == NULL) || (inp == NULL)) {
1835 			break;
1836 		}
1837 		SCTP_STAT_INCR(sctps_timoautoclose);
1838 		sctp_autoclose_timer(inp, stcb, net);
1839 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1840 		did_output = 0;
1841 		break;
1842 	case SCTP_TIMER_TYPE_ASOCKILL:
1843 		if ((stcb == NULL) || (inp == NULL)) {
1844 			break;
1845 		}
1846 		SCTP_STAT_INCR(sctps_timoassockill);
1847 		/* Can we free it yet? */
1848 		SCTP_INP_DECR_REF(inp);
1849 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1850 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1851 		so = SCTP_INP_SO(inp);
1852 		atomic_add_int(&stcb->asoc.refcnt, 1);
1853 		SCTP_TCB_UNLOCK(stcb);
1854 		SCTP_SOCKET_LOCK(so, 1);
1855 		SCTP_TCB_LOCK(stcb);
1856 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1857 #endif
1858 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1859 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1860 		SCTP_SOCKET_UNLOCK(so, 1);
1861 #endif
1862 		/*
1863 		 * free asoc, always unlocks (or destroy's) so prevent
1864 		 * duplicate unlock or unlock of a free mtx :-0
1865 		 */
1866 		stcb = NULL;
1867 		goto out_no_decr;
1868 	case SCTP_TIMER_TYPE_INPKILL:
1869 		SCTP_STAT_INCR(sctps_timoinpkill);
1870 		if (inp == NULL) {
1871 			break;
1872 		}
1873 		/*
1874 		 * special case, take away our increment since WE are the
1875 		 * killer
1876 		 */
1877 		SCTP_INP_DECR_REF(inp);
1878 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1879 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1880 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1881 		inp = NULL;
1882 		goto out_no_decr;
1883 	default:
1884 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1885 		    tmr->type);
1886 		break;
1887 	};
1888 #ifdef SCTP_AUDITING_ENABLED
1889 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1890 	if (inp)
1891 		sctp_auditing(5, inp, stcb, net);
1892 #endif
1893 	if ((did_output) && stcb) {
1894 		/*
1895 		 * Now we need to clean up the control chunk chain if an
1896 		 * ECNE is on it. It must be marked as UNSENT again so next
1897 		 * call will continue to send it until such time that we get
1898 		 * a CWR, to remove it. It is, however, less likely that we
1899 		 * will find a ecn echo on the chain though.
1900 		 */
1901 		sctp_fix_ecn_echo(&stcb->asoc);
1902 	}
1903 get_out:
1904 	if (stcb) {
1905 		SCTP_TCB_UNLOCK(stcb);
1906 	}
1907 out_decr:
1908 	if (inp) {
1909 		SCTP_INP_DECR_REF(inp);
1910 	}
1911 out_no_decr:
1912 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1913 	    type);
1914 	CURVNET_RESTORE();
1915 }
1916 
1917 void
1918 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1919     struct sctp_nets *net)
1920 {
1921 	int to_ticks;
1922 	struct sctp_timer *tmr;
1923 
1924 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1925 		return;
1926 
1927 	to_ticks = 0;
1928 
1929 	tmr = NULL;
1930 	if (stcb) {
1931 		SCTP_TCB_LOCK_ASSERT(stcb);
1932 	}
1933 	switch (t_type) {
1934 	case SCTP_TIMER_TYPE_ZERO_COPY:
1935 		tmr = &inp->sctp_ep.zero_copy_timer;
1936 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1937 		break;
1938 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1939 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1940 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1941 		break;
1942 	case SCTP_TIMER_TYPE_ADDR_WQ:
1943 		/* Only 1 tick away :-) */
1944 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1945 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1946 		break;
1947 	case SCTP_TIMER_TYPE_ITERATOR:
1948 		{
1949 			struct sctp_iterator *it;
1950 
1951 			it = (struct sctp_iterator *)inp;
1952 			tmr = &it->tmr;
1953 			to_ticks = SCTP_ITERATOR_TICKS;
1954 		}
1955 		break;
1956 	case SCTP_TIMER_TYPE_SEND:
1957 		/* Here we use the RTO timer */
1958 		{
1959 			int rto_val;
1960 
1961 			if ((stcb == NULL) || (net == NULL)) {
1962 				return;
1963 			}
1964 			tmr = &net->rxt_timer;
1965 			if (net->RTO == 0) {
1966 				rto_val = stcb->asoc.initial_rto;
1967 			} else {
1968 				rto_val = net->RTO;
1969 			}
1970 			to_ticks = MSEC_TO_TICKS(rto_val);
1971 		}
1972 		break;
1973 	case SCTP_TIMER_TYPE_INIT:
1974 		/*
1975 		 * Here we use the INIT timer default usually about 1
1976 		 * minute.
1977 		 */
1978 		if ((stcb == NULL) || (net == NULL)) {
1979 			return;
1980 		}
1981 		tmr = &net->rxt_timer;
1982 		if (net->RTO == 0) {
1983 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1984 		} else {
1985 			to_ticks = MSEC_TO_TICKS(net->RTO);
1986 		}
1987 		break;
1988 	case SCTP_TIMER_TYPE_RECV:
1989 		/*
1990 		 * Here we use the Delayed-Ack timer value from the inp
1991 		 * ususually about 200ms.
1992 		 */
1993 		if (stcb == NULL) {
1994 			return;
1995 		}
1996 		tmr = &stcb->asoc.dack_timer;
1997 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1998 		break;
1999 	case SCTP_TIMER_TYPE_SHUTDOWN:
2000 		/* Here we use the RTO of the destination. */
2001 		if ((stcb == NULL) || (net == NULL)) {
2002 			return;
2003 		}
2004 		if (net->RTO == 0) {
2005 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2006 		} else {
2007 			to_ticks = MSEC_TO_TICKS(net->RTO);
2008 		}
2009 		tmr = &net->rxt_timer;
2010 		break;
2011 	case SCTP_TIMER_TYPE_HEARTBEAT:
2012 		/*
2013 		 * the net is used here so that we can add in the RTO. Even
2014 		 * though we use a different timer. We also add the HB timer
2015 		 * PLUS a random jitter.
2016 		 */
2017 		if ((inp == NULL) || (stcb == NULL)) {
2018 			return;
2019 		} else {
2020 			uint32_t rndval;
2021 			uint8_t this_random;
2022 			int cnt_of_unconf = 0;
2023 			struct sctp_nets *lnet;
2024 
2025 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2026 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2027 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2028 					cnt_of_unconf++;
2029 				}
2030 			}
2031 			if (cnt_of_unconf) {
2032 				net = lnet = NULL;
2033 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2034 			}
2035 			if (stcb->asoc.hb_random_idx > 3) {
2036 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2037 				memcpy(stcb->asoc.hb_random_values, &rndval,
2038 				    sizeof(stcb->asoc.hb_random_values));
2039 				stcb->asoc.hb_random_idx = 0;
2040 			}
2041 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2042 			stcb->asoc.hb_random_idx++;
2043 			stcb->asoc.hb_ect_randombit = 0;
2044 			/*
2045 			 * this_random will be 0 - 256 ms RTO is in ms.
2046 			 */
2047 			if ((stcb->asoc.hb_is_disabled) &&
2048 			    (cnt_of_unconf == 0)) {
2049 				return;
2050 			}
2051 			if (net) {
2052 				int delay;
2053 
2054 				delay = stcb->asoc.heart_beat_delay;
2055 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2056 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2057 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2058 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2059 						delay = 0;
2060 					}
2061 				}
2062 				if (net->RTO == 0) {
2063 					/* Never been checked */
2064 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2065 				} else {
2066 					/* set rto_val to the ms */
2067 					to_ticks = delay + net->RTO + this_random;
2068 				}
2069 			} else {
2070 				if (cnt_of_unconf) {
2071 					to_ticks = this_random + stcb->asoc.initial_rto;
2072 				} else {
2073 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2074 				}
2075 			}
2076 			/*
2077 			 * Now we must convert the to_ticks that are now in
2078 			 * ms to ticks.
2079 			 */
2080 			to_ticks = MSEC_TO_TICKS(to_ticks);
2081 			tmr = &stcb->asoc.hb_timer;
2082 		}
2083 		break;
2084 	case SCTP_TIMER_TYPE_COOKIE:
2085 		/*
2086 		 * Here we can use the RTO timer from the network since one
2087 		 * RTT was compelete. If a retran happened then we will be
2088 		 * using the RTO initial value.
2089 		 */
2090 		if ((stcb == NULL) || (net == NULL)) {
2091 			return;
2092 		}
2093 		if (net->RTO == 0) {
2094 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2095 		} else {
2096 			to_ticks = MSEC_TO_TICKS(net->RTO);
2097 		}
2098 		tmr = &net->rxt_timer;
2099 		break;
2100 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2101 		/*
2102 		 * nothing needed but the endpoint here ususually about 60
2103 		 * minutes.
2104 		 */
2105 		if (inp == NULL) {
2106 			return;
2107 		}
2108 		tmr = &inp->sctp_ep.signature_change;
2109 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2110 		break;
2111 	case SCTP_TIMER_TYPE_ASOCKILL:
2112 		if (stcb == NULL) {
2113 			return;
2114 		}
2115 		tmr = &stcb->asoc.strreset_timer;
2116 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2117 		break;
2118 	case SCTP_TIMER_TYPE_INPKILL:
2119 		/*
2120 		 * The inp is setup to die. We re-use the signature_chage
2121 		 * timer since that has stopped and we are in the GONE
2122 		 * state.
2123 		 */
2124 		if (inp == NULL) {
2125 			return;
2126 		}
2127 		tmr = &inp->sctp_ep.signature_change;
2128 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2129 		break;
2130 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2131 		/*
2132 		 * Here we use the value found in the EP for PMTU ususually
2133 		 * about 10 minutes.
2134 		 */
2135 		if ((stcb == NULL) || (inp == NULL)) {
2136 			return;
2137 		}
2138 		if (net == NULL) {
2139 			return;
2140 		}
2141 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2142 		tmr = &net->pmtu_timer;
2143 		break;
2144 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2145 		/* Here we use the RTO of the destination */
2146 		if ((stcb == NULL) || (net == NULL)) {
2147 			return;
2148 		}
2149 		if (net->RTO == 0) {
2150 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2151 		} else {
2152 			to_ticks = MSEC_TO_TICKS(net->RTO);
2153 		}
2154 		tmr = &net->rxt_timer;
2155 		break;
2156 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2157 		/*
2158 		 * Here we use the endpoints shutdown guard timer usually
2159 		 * about 3 minutes.
2160 		 */
2161 		if ((inp == NULL) || (stcb == NULL)) {
2162 			return;
2163 		}
2164 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2165 		tmr = &stcb->asoc.shut_guard_timer;
2166 		break;
2167 	case SCTP_TIMER_TYPE_STRRESET:
2168 		/*
2169 		 * Here the timer comes from the stcb but its value is from
2170 		 * the net's RTO.
2171 		 */
2172 		if ((stcb == NULL) || (net == NULL)) {
2173 			return;
2174 		}
2175 		if (net->RTO == 0) {
2176 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2177 		} else {
2178 			to_ticks = MSEC_TO_TICKS(net->RTO);
2179 		}
2180 		tmr = &stcb->asoc.strreset_timer;
2181 		break;
2182 
2183 	case SCTP_TIMER_TYPE_EARLYFR:
2184 		{
2185 			unsigned int msec;
2186 
2187 			if ((stcb == NULL) || (net == NULL)) {
2188 				return;
2189 			}
2190 			if (net->flight_size > net->cwnd) {
2191 				/* no need to start */
2192 				return;
2193 			}
2194 			SCTP_STAT_INCR(sctps_earlyfrstart);
2195 			if (net->lastsa == 0) {
2196 				/* Hmm no rtt estimate yet? */
2197 				msec = stcb->asoc.initial_rto >> 2;
2198 			} else {
2199 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2200 			}
2201 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2202 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2203 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2204 					msec = SCTP_MINFR_MSEC_FLOOR;
2205 				}
2206 			}
2207 			to_ticks = MSEC_TO_TICKS(msec);
2208 			tmr = &net->fr_timer;
2209 		}
2210 		break;
2211 	case SCTP_TIMER_TYPE_ASCONF:
2212 		/*
2213 		 * Here the timer comes from the stcb but its value is from
2214 		 * the net's RTO.
2215 		 */
2216 		if ((stcb == NULL) || (net == NULL)) {
2217 			return;
2218 		}
2219 		if (net->RTO == 0) {
2220 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2221 		} else {
2222 			to_ticks = MSEC_TO_TICKS(net->RTO);
2223 		}
2224 		tmr = &stcb->asoc.asconf_timer;
2225 		break;
2226 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2227 		if ((stcb == NULL) || (net != NULL)) {
2228 			return;
2229 		}
2230 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2231 		tmr = &stcb->asoc.delete_prim_timer;
2232 		break;
2233 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2234 		if (stcb == NULL) {
2235 			return;
2236 		}
2237 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2238 			/*
2239 			 * Really an error since stcb is NOT set to
2240 			 * autoclose
2241 			 */
2242 			return;
2243 		}
2244 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2245 		tmr = &stcb->asoc.autoclose_timer;
2246 		break;
2247 	default:
2248 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2249 		    __FUNCTION__, t_type);
2250 		return;
2251 		break;
2252 	};
2253 	if ((to_ticks <= 0) || (tmr == NULL)) {
2254 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2255 		    __FUNCTION__, t_type, to_ticks, tmr);
2256 		return;
2257 	}
2258 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2259 		/*
2260 		 * we do NOT allow you to have it already running. if it is
2261 		 * we leave the current one up unchanged
2262 		 */
2263 		return;
2264 	}
2265 	/* At this point we can proceed */
2266 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2267 		stcb->asoc.num_send_timers_up++;
2268 	}
2269 	tmr->stopped_from = 0;
2270 	tmr->type = t_type;
2271 	tmr->ep = (void *)inp;
2272 	tmr->tcb = (void *)stcb;
2273 	tmr->net = (void *)net;
2274 	tmr->self = (void *)tmr;
2275 	tmr->vnet = (void *)curvnet;
2276 	tmr->ticks = sctp_get_tick_count();
2277 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2278 	return;
2279 }
2280 
2281 void
2282 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2283     struct sctp_nets *net, uint32_t from)
2284 {
2285 	struct sctp_timer *tmr;
2286 
2287 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2288 	    (inp == NULL))
2289 		return;
2290 
2291 	tmr = NULL;
2292 	if (stcb) {
2293 		SCTP_TCB_LOCK_ASSERT(stcb);
2294 	}
2295 	switch (t_type) {
2296 	case SCTP_TIMER_TYPE_ZERO_COPY:
2297 		tmr = &inp->sctp_ep.zero_copy_timer;
2298 		break;
2299 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2300 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2301 		break;
2302 	case SCTP_TIMER_TYPE_ADDR_WQ:
2303 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2304 		break;
2305 	case SCTP_TIMER_TYPE_EARLYFR:
2306 		if ((stcb == NULL) || (net == NULL)) {
2307 			return;
2308 		}
2309 		tmr = &net->fr_timer;
2310 		SCTP_STAT_INCR(sctps_earlyfrstop);
2311 		break;
2312 	case SCTP_TIMER_TYPE_ITERATOR:
2313 		{
2314 			struct sctp_iterator *it;
2315 
2316 			it = (struct sctp_iterator *)inp;
2317 			tmr = &it->tmr;
2318 		}
2319 		break;
2320 	case SCTP_TIMER_TYPE_SEND:
2321 		if ((stcb == NULL) || (net == NULL)) {
2322 			return;
2323 		}
2324 		tmr = &net->rxt_timer;
2325 		break;
2326 	case SCTP_TIMER_TYPE_INIT:
2327 		if ((stcb == NULL) || (net == NULL)) {
2328 			return;
2329 		}
2330 		tmr = &net->rxt_timer;
2331 		break;
2332 	case SCTP_TIMER_TYPE_RECV:
2333 		if (stcb == NULL) {
2334 			return;
2335 		}
2336 		tmr = &stcb->asoc.dack_timer;
2337 		break;
2338 	case SCTP_TIMER_TYPE_SHUTDOWN:
2339 		if ((stcb == NULL) || (net == NULL)) {
2340 			return;
2341 		}
2342 		tmr = &net->rxt_timer;
2343 		break;
2344 	case SCTP_TIMER_TYPE_HEARTBEAT:
2345 		if (stcb == NULL) {
2346 			return;
2347 		}
2348 		tmr = &stcb->asoc.hb_timer;
2349 		break;
2350 	case SCTP_TIMER_TYPE_COOKIE:
2351 		if ((stcb == NULL) || (net == NULL)) {
2352 			return;
2353 		}
2354 		tmr = &net->rxt_timer;
2355 		break;
2356 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2357 		/* nothing needed but the endpoint here */
2358 		tmr = &inp->sctp_ep.signature_change;
2359 		/*
2360 		 * We re-use the newcookie timer for the INP kill timer. We
2361 		 * must assure that we do not kill it by accident.
2362 		 */
2363 		break;
2364 	case SCTP_TIMER_TYPE_ASOCKILL:
2365 		/*
2366 		 * Stop the asoc kill timer.
2367 		 */
2368 		if (stcb == NULL) {
2369 			return;
2370 		}
2371 		tmr = &stcb->asoc.strreset_timer;
2372 		break;
2373 
2374 	case SCTP_TIMER_TYPE_INPKILL:
2375 		/*
2376 		 * The inp is setup to die. We re-use the signature_chage
2377 		 * timer since that has stopped and we are in the GONE
2378 		 * state.
2379 		 */
2380 		tmr = &inp->sctp_ep.signature_change;
2381 		break;
2382 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2383 		if ((stcb == NULL) || (net == NULL)) {
2384 			return;
2385 		}
2386 		tmr = &net->pmtu_timer;
2387 		break;
2388 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2389 		if ((stcb == NULL) || (net == NULL)) {
2390 			return;
2391 		}
2392 		tmr = &net->rxt_timer;
2393 		break;
2394 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2395 		if (stcb == NULL) {
2396 			return;
2397 		}
2398 		tmr = &stcb->asoc.shut_guard_timer;
2399 		break;
2400 	case SCTP_TIMER_TYPE_STRRESET:
2401 		if (stcb == NULL) {
2402 			return;
2403 		}
2404 		tmr = &stcb->asoc.strreset_timer;
2405 		break;
2406 	case SCTP_TIMER_TYPE_ASCONF:
2407 		if (stcb == NULL) {
2408 			return;
2409 		}
2410 		tmr = &stcb->asoc.asconf_timer;
2411 		break;
2412 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2413 		if (stcb == NULL) {
2414 			return;
2415 		}
2416 		tmr = &stcb->asoc.delete_prim_timer;
2417 		break;
2418 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2419 		if (stcb == NULL) {
2420 			return;
2421 		}
2422 		tmr = &stcb->asoc.autoclose_timer;
2423 		break;
2424 	default:
2425 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2426 		    __FUNCTION__, t_type);
2427 		break;
2428 	};
2429 	if (tmr == NULL) {
2430 		return;
2431 	}
2432 	if ((tmr->type != t_type) && tmr->type) {
2433 		/*
2434 		 * Ok we have a timer that is under joint use. Cookie timer
2435 		 * per chance with the SEND timer. We therefore are NOT
2436 		 * running the timer that the caller wants stopped.  So just
2437 		 * return.
2438 		 */
2439 		return;
2440 	}
2441 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2442 		stcb->asoc.num_send_timers_up--;
2443 		if (stcb->asoc.num_send_timers_up < 0) {
2444 			stcb->asoc.num_send_timers_up = 0;
2445 		}
2446 	}
2447 	tmr->self = NULL;
2448 	tmr->stopped_from = from;
2449 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2450 	return;
2451 }
2452 
2453 uint32_t
2454 sctp_calculate_len(struct mbuf *m)
2455 {
2456 	uint32_t tlen = 0;
2457 	struct mbuf *at;
2458 
2459 	at = m;
2460 	while (at) {
2461 		tlen += SCTP_BUF_LEN(at);
2462 		at = SCTP_BUF_NEXT(at);
2463 	}
2464 	return (tlen);
2465 }
2466 
2467 void
2468 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2469     struct sctp_association *asoc, uint32_t mtu)
2470 {
2471 	/*
2472 	 * Reset the P-MTU size on this association, this involves changing
2473 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2474 	 * allow the DF flag to be cleared.
2475 	 */
2476 	struct sctp_tmit_chunk *chk;
2477 	unsigned int eff_mtu, ovh;
2478 
2479 #ifdef SCTP_PRINT_FOR_B_AND_M
2480 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2481 	    inp, asoc, mtu);
2482 #endif
2483 	asoc->smallest_mtu = mtu;
2484 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2485 		ovh = SCTP_MIN_OVERHEAD;
2486 	} else {
2487 		ovh = SCTP_MIN_V4_OVERHEAD;
2488 	}
2489 	eff_mtu = mtu - ovh;
2490 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2491 
2492 		if (chk->send_size > eff_mtu) {
2493 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2494 		}
2495 	}
2496 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2497 		if (chk->send_size > eff_mtu) {
2498 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2499 		}
2500 	}
2501 }
2502 
2503 
2504 /*
2505  * given an association and starting time of the current RTT period return
2506  * RTO in number of msecs net should point to the current network
2507  */
2508 uint32_t
2509 sctp_calculate_rto(struct sctp_tcb *stcb,
2510     struct sctp_association *asoc,
2511     struct sctp_nets *net,
2512     struct timeval *told,
2513     int safe)
2514 {
2515 	/*-
2516 	 * given an association and the starting time of the current RTT
2517 	 * period (in value1/value2) return RTO in number of msecs.
2518 	 */
2519 	int calc_time = 0;
2520 	int o_calctime;
2521 	uint32_t new_rto = 0;
2522 	int first_measure = 0;
2523 	struct timeval now, then, *old;
2524 
2525 	/* Copy it out for sparc64 */
2526 	if (safe == sctp_align_unsafe_makecopy) {
2527 		old = &then;
2528 		memcpy(&then, told, sizeof(struct timeval));
2529 	} else if (safe == sctp_align_safe_nocopy) {
2530 		old = told;
2531 	} else {
2532 		/* error */
2533 		SCTP_PRINTF("Huh, bad rto calc call\n");
2534 		return (0);
2535 	}
2536 	/************************/
2537 	/* 1. calculate new RTT */
2538 	/************************/
2539 	/* get the current time */
2540 	(void)SCTP_GETTIME_TIMEVAL(&now);
2541 	/* compute the RTT value */
2542 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2543 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2544 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2545 			calc_time += (((u_long)now.tv_usec -
2546 			    (u_long)old->tv_usec) / 1000);
2547 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2548 			/* Borrow 1,000ms from current calculation */
2549 			calc_time -= 1000;
2550 			/* Add in the slop over */
2551 			calc_time += ((int)now.tv_usec / 1000);
2552 			/* Add in the pre-second ms's */
2553 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2554 		}
2555 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2556 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2557 			calc_time = ((u_long)now.tv_usec -
2558 			    (u_long)old->tv_usec) / 1000;
2559 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2560 			/* impossible .. garbage in nothing out */
2561 			goto calc_rto;
2562 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2563 			/*
2564 			 * We have to have 1 usec :-D this must be the
2565 			 * loopback.
2566 			 */
2567 			calc_time = 1;
2568 		} else {
2569 			/* impossible .. garbage in nothing out */
2570 			goto calc_rto;
2571 		}
2572 	} else {
2573 		/* Clock wrapped? */
2574 		goto calc_rto;
2575 	}
2576 	/***************************/
2577 	/* 2. update RTTVAR & SRTT */
2578 	/***************************/
2579 	net->rtt = o_calctime = calc_time;
2580 	/* this is Van Jacobson's integer version */
2581 	if (net->RTO_measured) {
2582 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2583 								 * shift=3 */
2584 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2585 			rto_logging(net, SCTP_LOG_RTTVAR);
2586 		}
2587 		net->prev_rtt = o_calctime;
2588 		net->lastsa += calc_time;	/* add 7/8th into sa when
2589 						 * shift=3 */
2590 		if (calc_time < 0) {
2591 			calc_time = -calc_time;
2592 		}
2593 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2594 									 * VAR shift=2 */
2595 		net->lastsv += calc_time;
2596 		if (net->lastsv == 0) {
2597 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2598 		}
2599 	} else {
2600 		/* First RTO measurment */
2601 		net->RTO_measured = 1;
2602 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2603 								 * shift=3 */
2604 		net->lastsv = calc_time;
2605 		if (net->lastsv == 0) {
2606 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2607 		}
2608 		first_measure = 1;
2609 		net->prev_rtt = o_calctime;
2610 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2611 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2612 		}
2613 	}
2614 calc_rto:
2615 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2616 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2617 	    (stcb->asoc.sat_network_lockout == 0)) {
2618 		stcb->asoc.sat_network = 1;
2619 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2620 		stcb->asoc.sat_network = 0;
2621 		stcb->asoc.sat_network_lockout = 1;
2622 	}
2623 	/* bound it, per C6/C7 in Section 5.3.1 */
2624 	if (new_rto < stcb->asoc.minrto) {
2625 		new_rto = stcb->asoc.minrto;
2626 	}
2627 	if (new_rto > stcb->asoc.maxrto) {
2628 		new_rto = stcb->asoc.maxrto;
2629 	}
2630 	/* we are now returning the RTO */
2631 	return (new_rto);
2632 }
2633 
2634 /*
2635  * return a pointer to a contiguous piece of data from the given mbuf chain
2636  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2637  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2638  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2639  */
2640 caddr_t
2641 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2642 {
2643 	uint32_t count;
2644 	uint8_t *ptr;
2645 
2646 	ptr = in_ptr;
2647 	if ((off < 0) || (len <= 0))
2648 		return (NULL);
2649 
2650 	/* find the desired start location */
2651 	while ((m != NULL) && (off > 0)) {
2652 		if (off < SCTP_BUF_LEN(m))
2653 			break;
2654 		off -= SCTP_BUF_LEN(m);
2655 		m = SCTP_BUF_NEXT(m);
2656 	}
2657 	if (m == NULL)
2658 		return (NULL);
2659 
2660 	/* is the current mbuf large enough (eg. contiguous)? */
2661 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2662 		return (mtod(m, caddr_t)+off);
2663 	} else {
2664 		/* else, it spans more than one mbuf, so save a temp copy... */
2665 		while ((m != NULL) && (len > 0)) {
2666 			count = min(SCTP_BUF_LEN(m) - off, len);
2667 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2668 			len -= count;
2669 			ptr += count;
2670 			off = 0;
2671 			m = SCTP_BUF_NEXT(m);
2672 		}
2673 		if ((m == NULL) && (len > 0))
2674 			return (NULL);
2675 		else
2676 			return ((caddr_t)in_ptr);
2677 	}
2678 }
2679 
2680 
2681 
2682 struct sctp_paramhdr *
2683 sctp_get_next_param(struct mbuf *m,
2684     int offset,
2685     struct sctp_paramhdr *pull,
2686     int pull_limit)
2687 {
2688 	/* This just provides a typed signature to Peter's Pull routine */
2689 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2690 	    (uint8_t *) pull));
2691 }
2692 
2693 
2694 int
2695 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2696 {
2697 	/*
2698 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2699 	 * padlen is > 3 this routine will fail.
2700 	 */
2701 	uint8_t *dp;
2702 	int i;
2703 
2704 	if (padlen > 3) {
2705 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2706 		return (ENOBUFS);
2707 	}
2708 	if (padlen <= M_TRAILINGSPACE(m)) {
2709 		/*
2710 		 * The easy way. We hope the majority of the time we hit
2711 		 * here :)
2712 		 */
2713 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2714 		SCTP_BUF_LEN(m) += padlen;
2715 	} else {
2716 		/* Hard way we must grow the mbuf */
2717 		struct mbuf *tmp;
2718 
2719 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2720 		if (tmp == NULL) {
2721 			/* Out of space GAK! we are in big trouble. */
2722 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2723 			return (ENOSPC);
2724 		}
2725 		/* setup and insert in middle */
2726 		SCTP_BUF_LEN(tmp) = padlen;
2727 		SCTP_BUF_NEXT(tmp) = NULL;
2728 		SCTP_BUF_NEXT(m) = tmp;
2729 		dp = mtod(tmp, uint8_t *);
2730 	}
2731 	/* zero out the pad */
2732 	for (i = 0; i < padlen; i++) {
2733 		*dp = 0;
2734 		dp++;
2735 	}
2736 	return (0);
2737 }
2738 
2739 int
2740 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2741 {
2742 	/* find the last mbuf in chain and pad it */
2743 	struct mbuf *m_at;
2744 
2745 	m_at = m;
2746 	if (last_mbuf) {
2747 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2748 	} else {
2749 		while (m_at) {
2750 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2751 				return (sctp_add_pad_tombuf(m_at, padval));
2752 			}
2753 			m_at = SCTP_BUF_NEXT(m_at);
2754 		}
2755 	}
2756 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2757 	return (EFAULT);
2758 }
2759 
2760 int sctp_asoc_change_wake = 0;
2761 
2762 static void
2763 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2764     uint32_t error, void *data, int so_locked
2765 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2766     SCTP_UNUSED
2767 #endif
2768 )
2769 {
2770 	struct mbuf *m_notify;
2771 	struct sctp_assoc_change *sac;
2772 	struct sctp_queued_to_read *control;
2773 
2774 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2775 	struct socket *so;
2776 
2777 #endif
2778 
2779 	/*
2780 	 * For TCP model AND UDP connected sockets we will send an error up
2781 	 * when an ABORT comes in.
2782 	 */
2783 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2784 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2785 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2786 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2787 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2788 			stcb->sctp_socket->so_error = ECONNREFUSED;
2789 		} else {
2790 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2791 			stcb->sctp_socket->so_error = ECONNRESET;
2792 		}
2793 		/* Wake ANY sleepers */
2794 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2795 		so = SCTP_INP_SO(stcb->sctp_ep);
2796 		if (!so_locked) {
2797 			atomic_add_int(&stcb->asoc.refcnt, 1);
2798 			SCTP_TCB_UNLOCK(stcb);
2799 			SCTP_SOCKET_LOCK(so, 1);
2800 			SCTP_TCB_LOCK(stcb);
2801 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2802 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2803 				SCTP_SOCKET_UNLOCK(so, 1);
2804 				return;
2805 			}
2806 		}
2807 #endif
2808 		sorwakeup(stcb->sctp_socket);
2809 		sowwakeup(stcb->sctp_socket);
2810 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2811 		if (!so_locked) {
2812 			SCTP_SOCKET_UNLOCK(so, 1);
2813 		}
2814 #endif
2815 		sctp_asoc_change_wake++;
2816 	}
2817 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2818 		/* event not enabled */
2819 		return;
2820 	}
2821 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2822 	if (m_notify == NULL)
2823 		/* no space left */
2824 		return;
2825 	SCTP_BUF_LEN(m_notify) = 0;
2826 
2827 	sac = mtod(m_notify, struct sctp_assoc_change *);
2828 	sac->sac_type = SCTP_ASSOC_CHANGE;
2829 	sac->sac_flags = 0;
2830 	sac->sac_length = sizeof(struct sctp_assoc_change);
2831 	sac->sac_state = event;
2832 	sac->sac_error = error;
2833 	/* XXX verify these stream counts */
2834 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2835 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2836 	sac->sac_assoc_id = sctp_get_associd(stcb);
2837 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2838 	SCTP_BUF_NEXT(m_notify) = NULL;
2839 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2840 	    0, 0, 0, 0, 0, 0,
2841 	    m_notify);
2842 	if (control == NULL) {
2843 		/* no memory */
2844 		sctp_m_freem(m_notify);
2845 		return;
2846 	}
2847 	control->length = SCTP_BUF_LEN(m_notify);
2848 	/* not that we need this */
2849 	control->tail_mbuf = m_notify;
2850 	control->spec_flags = M_NOTIFICATION;
2851 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2852 	    control,
2853 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2854 	    so_locked);
2855 	if (event == SCTP_COMM_LOST) {
2856 		/* Wake up any sleeper */
2857 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2858 		so = SCTP_INP_SO(stcb->sctp_ep);
2859 		if (!so_locked) {
2860 			atomic_add_int(&stcb->asoc.refcnt, 1);
2861 			SCTP_TCB_UNLOCK(stcb);
2862 			SCTP_SOCKET_LOCK(so, 1);
2863 			SCTP_TCB_LOCK(stcb);
2864 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2865 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2866 				SCTP_SOCKET_UNLOCK(so, 1);
2867 				return;
2868 			}
2869 		}
2870 #endif
2871 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2872 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2873 		if (!so_locked) {
2874 			SCTP_SOCKET_UNLOCK(so, 1);
2875 		}
2876 #endif
2877 	}
2878 }
2879 
2880 static void
2881 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2882     struct sockaddr *sa, uint32_t error)
2883 {
2884 	struct mbuf *m_notify;
2885 	struct sctp_paddr_change *spc;
2886 	struct sctp_queued_to_read *control;
2887 
2888 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2889 		/* event not enabled */
2890 		return;
2891 	}
2892 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2893 	if (m_notify == NULL)
2894 		return;
2895 	SCTP_BUF_LEN(m_notify) = 0;
2896 	spc = mtod(m_notify, struct sctp_paddr_change *);
2897 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2898 	spc->spc_flags = 0;
2899 	spc->spc_length = sizeof(struct sctp_paddr_change);
2900 	switch (sa->sa_family) {
2901 	case AF_INET:
2902 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2903 		break;
2904 #ifdef INET6
2905 	case AF_INET6:
2906 		{
2907 			struct sockaddr_in6 *sin6;
2908 
2909 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2910 
2911 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2912 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2913 				if (sin6->sin6_scope_id == 0) {
2914 					/* recover scope_id for user */
2915 					(void)sa6_recoverscope(sin6);
2916 				} else {
2917 					/* clear embedded scope_id for user */
2918 					in6_clearscope(&sin6->sin6_addr);
2919 				}
2920 			}
2921 			break;
2922 		}
2923 #endif
2924 	default:
2925 		/* TSNH */
2926 		break;
2927 	}
2928 	spc->spc_state = state;
2929 	spc->spc_error = error;
2930 	spc->spc_assoc_id = sctp_get_associd(stcb);
2931 
2932 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2933 	SCTP_BUF_NEXT(m_notify) = NULL;
2934 
2935 	/* append to socket */
2936 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2937 	    0, 0, 0, 0, 0, 0,
2938 	    m_notify);
2939 	if (control == NULL) {
2940 		/* no memory */
2941 		sctp_m_freem(m_notify);
2942 		return;
2943 	}
2944 	control->length = SCTP_BUF_LEN(m_notify);
2945 	control->spec_flags = M_NOTIFICATION;
2946 	/* not that we need this */
2947 	control->tail_mbuf = m_notify;
2948 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2949 	    control,
2950 	    &stcb->sctp_socket->so_rcv, 1,
2951 	    SCTP_READ_LOCK_NOT_HELD,
2952 	    SCTP_SO_NOT_LOCKED);
2953 }
2954 
2955 
2956 static void
2957 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2958     struct sctp_tmit_chunk *chk, int so_locked
2959 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2960     SCTP_UNUSED
2961 #endif
2962 )
2963 {
2964 	struct mbuf *m_notify;
2965 	struct sctp_send_failed *ssf;
2966 	struct sctp_queued_to_read *control;
2967 	int length;
2968 
2969 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2970 		/* event not enabled */
2971 		return;
2972 	}
2973 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2974 	if (m_notify == NULL)
2975 		/* no space left */
2976 		return;
2977 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2978 	length -= sizeof(struct sctp_data_chunk);
2979 	SCTP_BUF_LEN(m_notify) = 0;
2980 	ssf = mtod(m_notify, struct sctp_send_failed *);
2981 	ssf->ssf_type = SCTP_SEND_FAILED;
2982 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2983 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2984 	else
2985 		ssf->ssf_flags = SCTP_DATA_SENT;
2986 	ssf->ssf_length = length;
2987 	ssf->ssf_error = error;
2988 	/* not exactly what the user sent in, but should be close :) */
2989 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2990 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2991 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2992 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2993 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2994 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2995 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2996 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2997 
2998 	if (chk->data) {
2999 		/*
3000 		 * trim off the sctp chunk header(it should be there)
3001 		 */
3002 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
3003 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
3004 			sctp_mbuf_crush(chk->data);
3005 			chk->send_size -= sizeof(struct sctp_data_chunk);
3006 		}
3007 	}
3008 	SCTP_BUF_NEXT(m_notify) = chk->data;
3009 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3010 	/* Steal off the mbuf */
3011 	chk->data = NULL;
3012 	/*
3013 	 * For this case, we check the actual socket buffer, since the assoc
3014 	 * is going away we don't want to overfill the socket buffer for a
3015 	 * non-reader
3016 	 */
3017 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3018 		sctp_m_freem(m_notify);
3019 		return;
3020 	}
3021 	/* append to socket */
3022 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3023 	    0, 0, 0, 0, 0, 0,
3024 	    m_notify);
3025 	if (control == NULL) {
3026 		/* no memory */
3027 		sctp_m_freem(m_notify);
3028 		return;
3029 	}
3030 	control->spec_flags = M_NOTIFICATION;
3031 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3032 	    control,
3033 	    &stcb->sctp_socket->so_rcv, 1,
3034 	    SCTP_READ_LOCK_NOT_HELD,
3035 	    so_locked);
3036 }
3037 
3038 
3039 static void
3040 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3041     struct sctp_stream_queue_pending *sp, int so_locked
3042 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3043     SCTP_UNUSED
3044 #endif
3045 )
3046 {
3047 	struct mbuf *m_notify;
3048 	struct sctp_send_failed *ssf;
3049 	struct sctp_queued_to_read *control;
3050 	int length;
3051 
3052 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3053 		/* event not enabled */
3054 		return;
3055 	}
3056 	length = sizeof(struct sctp_send_failed) + sp->length;
3057 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3058 	if (m_notify == NULL)
3059 		/* no space left */
3060 		return;
3061 	SCTP_BUF_LEN(m_notify) = 0;
3062 	ssf = mtod(m_notify, struct sctp_send_failed *);
3063 	ssf->ssf_type = SCTP_SEND_FAILED;
3064 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3065 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3066 	else
3067 		ssf->ssf_flags = SCTP_DATA_SENT;
3068 	ssf->ssf_length = length;
3069 	ssf->ssf_error = error;
3070 	/* not exactly what the user sent in, but should be close :) */
3071 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3072 	ssf->ssf_info.sinfo_stream = sp->stream;
3073 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3074 	if (sp->some_taken) {
3075 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3076 	} else {
3077 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3078 	}
3079 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3080 	ssf->ssf_info.sinfo_context = sp->context;
3081 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3082 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3083 	SCTP_BUF_NEXT(m_notify) = sp->data;
3084 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3085 
3086 	/* Steal off the mbuf */
3087 	sp->data = NULL;
3088 	/*
3089 	 * For this case, we check the actual socket buffer, since the assoc
3090 	 * is going away we don't want to overfill the socket buffer for a
3091 	 * non-reader
3092 	 */
3093 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3094 		sctp_m_freem(m_notify);
3095 		return;
3096 	}
3097 	/* append to socket */
3098 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3099 	    0, 0, 0, 0, 0, 0,
3100 	    m_notify);
3101 	if (control == NULL) {
3102 		/* no memory */
3103 		sctp_m_freem(m_notify);
3104 		return;
3105 	}
3106 	control->spec_flags = M_NOTIFICATION;
3107 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3108 	    control,
3109 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3110 }
3111 
3112 
3113 
3114 static void
3115 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3116     uint32_t error)
3117 {
3118 	struct mbuf *m_notify;
3119 	struct sctp_adaptation_event *sai;
3120 	struct sctp_queued_to_read *control;
3121 
3122 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3123 		/* event not enabled */
3124 		return;
3125 	}
3126 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3127 	if (m_notify == NULL)
3128 		/* no space left */
3129 		return;
3130 	SCTP_BUF_LEN(m_notify) = 0;
3131 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3132 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3133 	sai->sai_flags = 0;
3134 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3135 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3136 	sai->sai_assoc_id = sctp_get_associd(stcb);
3137 
3138 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3139 	SCTP_BUF_NEXT(m_notify) = NULL;
3140 
3141 	/* append to socket */
3142 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3143 	    0, 0, 0, 0, 0, 0,
3144 	    m_notify);
3145 	if (control == NULL) {
3146 		/* no memory */
3147 		sctp_m_freem(m_notify);
3148 		return;
3149 	}
3150 	control->length = SCTP_BUF_LEN(m_notify);
3151 	control->spec_flags = M_NOTIFICATION;
3152 	/* not that we need this */
3153 	control->tail_mbuf = m_notify;
3154 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3155 	    control,
3156 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3157 }
3158 
3159 /* This always must be called with the read-queue LOCKED in the INP */
3160 static void
3161 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3162     uint32_t val, int so_locked
3163 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3164     SCTP_UNUSED
3165 #endif
3166 )
3167 {
3168 	struct mbuf *m_notify;
3169 	struct sctp_pdapi_event *pdapi;
3170 	struct sctp_queued_to_read *control;
3171 	struct sockbuf *sb;
3172 
3173 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3174 		/* event not enabled */
3175 		return;
3176 	}
3177 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3178 	if (m_notify == NULL)
3179 		/* no space left */
3180 		return;
3181 	SCTP_BUF_LEN(m_notify) = 0;
3182 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3183 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3184 	pdapi->pdapi_flags = 0;
3185 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3186 	pdapi->pdapi_indication = error;
3187 	pdapi->pdapi_stream = (val >> 16);
3188 	pdapi->pdapi_seq = (val & 0x0000ffff);
3189 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3190 
3191 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3192 	SCTP_BUF_NEXT(m_notify) = NULL;
3193 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3194 	    0, 0, 0, 0, 0, 0,
3195 	    m_notify);
3196 	if (control == NULL) {
3197 		/* no memory */
3198 		sctp_m_freem(m_notify);
3199 		return;
3200 	}
3201 	control->spec_flags = M_NOTIFICATION;
3202 	control->length = SCTP_BUF_LEN(m_notify);
3203 	/* not that we need this */
3204 	control->tail_mbuf = m_notify;
3205 	control->held_length = 0;
3206 	control->length = 0;
3207 	sb = &stcb->sctp_socket->so_rcv;
3208 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3209 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3210 	}
3211 	sctp_sballoc(stcb, sb, m_notify);
3212 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3213 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3214 	}
3215 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3216 	control->end_added = 1;
3217 	if (stcb->asoc.control_pdapi)
3218 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3219 	else {
3220 		/* we really should not see this case */
3221 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3222 	}
3223 	if (stcb->sctp_ep && stcb->sctp_socket) {
3224 		/* This should always be the case */
3225 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3226 		struct socket *so;
3227 
3228 		so = SCTP_INP_SO(stcb->sctp_ep);
3229 		if (!so_locked) {
3230 			atomic_add_int(&stcb->asoc.refcnt, 1);
3231 			SCTP_TCB_UNLOCK(stcb);
3232 			SCTP_SOCKET_LOCK(so, 1);
3233 			SCTP_TCB_LOCK(stcb);
3234 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3235 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3236 				SCTP_SOCKET_UNLOCK(so, 1);
3237 				return;
3238 			}
3239 		}
3240 #endif
3241 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3242 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3243 		if (!so_locked) {
3244 			SCTP_SOCKET_UNLOCK(so, 1);
3245 		}
3246 #endif
3247 	}
3248 }
3249 
3250 static void
3251 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3252 {
3253 	struct mbuf *m_notify;
3254 	struct sctp_shutdown_event *sse;
3255 	struct sctp_queued_to_read *control;
3256 
3257 	/*
3258 	 * For TCP model AND UDP connected sockets we will send an error up
3259 	 * when an SHUTDOWN completes
3260 	 */
3261 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3262 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3263 		/* mark socket closed for read/write and wakeup! */
3264 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3265 		struct socket *so;
3266 
3267 		so = SCTP_INP_SO(stcb->sctp_ep);
3268 		atomic_add_int(&stcb->asoc.refcnt, 1);
3269 		SCTP_TCB_UNLOCK(stcb);
3270 		SCTP_SOCKET_LOCK(so, 1);
3271 		SCTP_TCB_LOCK(stcb);
3272 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3273 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3274 			SCTP_SOCKET_UNLOCK(so, 1);
3275 			return;
3276 		}
3277 #endif
3278 		socantsendmore(stcb->sctp_socket);
3279 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3280 		SCTP_SOCKET_UNLOCK(so, 1);
3281 #endif
3282 	}
3283 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3284 		/* event not enabled */
3285 		return;
3286 	}
3287 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3288 	if (m_notify == NULL)
3289 		/* no space left */
3290 		return;
3291 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3292 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3293 	sse->sse_flags = 0;
3294 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3295 	sse->sse_assoc_id = sctp_get_associd(stcb);
3296 
3297 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3298 	SCTP_BUF_NEXT(m_notify) = NULL;
3299 
3300 	/* append to socket */
3301 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3302 	    0, 0, 0, 0, 0, 0,
3303 	    m_notify);
3304 	if (control == NULL) {
3305 		/* no memory */
3306 		sctp_m_freem(m_notify);
3307 		return;
3308 	}
3309 	control->spec_flags = M_NOTIFICATION;
3310 	control->length = SCTP_BUF_LEN(m_notify);
3311 	/* not that we need this */
3312 	control->tail_mbuf = m_notify;
3313 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3314 	    control,
3315 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3316 }
3317 
3318 static void
3319 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3320     int so_locked
3321 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3322     SCTP_UNUSED
3323 #endif
3324 )
3325 {
3326 	struct mbuf *m_notify;
3327 	struct sctp_sender_dry_event *event;
3328 	struct sctp_queued_to_read *control;
3329 
3330 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3331 		/* event not enabled */
3332 		return;
3333 	}
3334 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3335 	if (m_notify == NULL) {
3336 		/* no space left */
3337 		return;
3338 	}
3339 	SCTP_BUF_LEN(m_notify) = 0;
3340 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3341 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3342 	event->sender_dry_flags = 0;
3343 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3344 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3345 
3346 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3347 	SCTP_BUF_NEXT(m_notify) = NULL;
3348 
3349 	/* append to socket */
3350 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3351 	    0, 0, 0, 0, 0, 0, m_notify);
3352 	if (control == NULL) {
3353 		/* no memory */
3354 		sctp_m_freem(m_notify);
3355 		return;
3356 	}
3357 	control->length = SCTP_BUF_LEN(m_notify);
3358 	control->spec_flags = M_NOTIFICATION;
3359 	/* not that we need this */
3360 	control->tail_mbuf = m_notify;
3361 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3362 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3363 }
3364 
3365 
3366 static void
3367 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3368 {
3369 	struct mbuf *m_notify;
3370 	struct sctp_queued_to_read *control;
3371 	struct sctp_stream_reset_event *strreset;
3372 	int len;
3373 
3374 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3375 		/* event not enabled */
3376 		return;
3377 	}
3378 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3379 	if (m_notify == NULL)
3380 		/* no space left */
3381 		return;
3382 	SCTP_BUF_LEN(m_notify) = 0;
3383 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3384 	if (len > M_TRAILINGSPACE(m_notify)) {
3385 		/* never enough room */
3386 		sctp_m_freem(m_notify);
3387 		return;
3388 	}
3389 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3390 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3391 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3392 	strreset->strreset_length = len;
3393 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3394 	strreset->strreset_list[0] = number_entries;
3395 
3396 	SCTP_BUF_LEN(m_notify) = len;
3397 	SCTP_BUF_NEXT(m_notify) = NULL;
3398 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3399 		/* no space */
3400 		sctp_m_freem(m_notify);
3401 		return;
3402 	}
3403 	/* append to socket */
3404 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3405 	    0, 0, 0, 0, 0, 0,
3406 	    m_notify);
3407 	if (control == NULL) {
3408 		/* no memory */
3409 		sctp_m_freem(m_notify);
3410 		return;
3411 	}
3412 	control->spec_flags = M_NOTIFICATION;
3413 	control->length = SCTP_BUF_LEN(m_notify);
3414 	/* not that we need this */
3415 	control->tail_mbuf = m_notify;
3416 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3417 	    control,
3418 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3419 }
3420 
3421 
3422 static void
3423 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3424     int number_entries, uint16_t * list, int flag)
3425 {
3426 	struct mbuf *m_notify;
3427 	struct sctp_queued_to_read *control;
3428 	struct sctp_stream_reset_event *strreset;
3429 	int len;
3430 
3431 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3432 		/* event not enabled */
3433 		return;
3434 	}
3435 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3436 	if (m_notify == NULL)
3437 		/* no space left */
3438 		return;
3439 	SCTP_BUF_LEN(m_notify) = 0;
3440 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3441 	if (len > M_TRAILINGSPACE(m_notify)) {
3442 		/* never enough room */
3443 		sctp_m_freem(m_notify);
3444 		return;
3445 	}
3446 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3447 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3448 	if (number_entries == 0) {
3449 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3450 	} else {
3451 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3452 	}
3453 	strreset->strreset_length = len;
3454 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3455 	if (number_entries) {
3456 		int i;
3457 
3458 		for (i = 0; i < number_entries; i++) {
3459 			strreset->strreset_list[i] = ntohs(list[i]);
3460 		}
3461 	}
3462 	SCTP_BUF_LEN(m_notify) = len;
3463 	SCTP_BUF_NEXT(m_notify) = NULL;
3464 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3465 		/* no space */
3466 		sctp_m_freem(m_notify);
3467 		return;
3468 	}
3469 	/* append to socket */
3470 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3471 	    0, 0, 0, 0, 0, 0,
3472 	    m_notify);
3473 	if (control == NULL) {
3474 		/* no memory */
3475 		sctp_m_freem(m_notify);
3476 		return;
3477 	}
3478 	control->spec_flags = M_NOTIFICATION;
3479 	control->length = SCTP_BUF_LEN(m_notify);
3480 	/* not that we need this */
3481 	control->tail_mbuf = m_notify;
3482 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3483 	    control,
3484 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3485 }
3486 
3487 
3488 void
3489 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3490     uint32_t error, void *data, int so_locked
3491 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3492     SCTP_UNUSED
3493 #endif
3494 )
3495 {
3496 	if ((stcb == NULL) ||
3497 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3498 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3499 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3500 		/* If the socket is gone we are out of here */
3501 		return;
3502 	}
3503 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3504 		return;
3505 	}
3506 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3507 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3508 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3509 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3510 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3511 			/* Don't report these in front states */
3512 			return;
3513 		}
3514 	}
3515 	switch (notification) {
3516 	case SCTP_NOTIFY_ASSOC_UP:
3517 		if (stcb->asoc.assoc_up_sent == 0) {
3518 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3519 			stcb->asoc.assoc_up_sent = 1;
3520 		}
3521 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3522 			sctp_notify_adaptation_layer(stcb, error);
3523 		}
3524 		if (stcb->asoc.peer_supports_auth == 0) {
3525 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3526 			    NULL, so_locked);
3527 		}
3528 		break;
3529 	case SCTP_NOTIFY_ASSOC_DOWN:
3530 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3531 		break;
3532 	case SCTP_NOTIFY_INTERFACE_DOWN:
3533 		{
3534 			struct sctp_nets *net;
3535 
3536 			net = (struct sctp_nets *)data;
3537 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3538 			    (struct sockaddr *)&net->ro._l_addr, error);
3539 			break;
3540 		}
3541 	case SCTP_NOTIFY_INTERFACE_UP:
3542 		{
3543 			struct sctp_nets *net;
3544 
3545 			net = (struct sctp_nets *)data;
3546 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3547 			    (struct sockaddr *)&net->ro._l_addr, error);
3548 			break;
3549 		}
3550 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3551 		{
3552 			struct sctp_nets *net;
3553 
3554 			net = (struct sctp_nets *)data;
3555 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3556 			    (struct sockaddr *)&net->ro._l_addr, error);
3557 			break;
3558 		}
3559 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3560 		sctp_notify_send_failed2(stcb, error,
3561 		    (struct sctp_stream_queue_pending *)data, so_locked);
3562 		break;
3563 	case SCTP_NOTIFY_DG_FAIL:
3564 		sctp_notify_send_failed(stcb, error,
3565 		    (struct sctp_tmit_chunk *)data, so_locked);
3566 		break;
3567 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3568 		{
3569 			uint32_t val;
3570 
3571 			val = *((uint32_t *) data);
3572 
3573 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3574 			break;
3575 		}
3576 	case SCTP_NOTIFY_STRDATA_ERR:
3577 		break;
3578 	case SCTP_NOTIFY_ASSOC_ABORTED:
3579 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3580 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3581 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3582 		} else {
3583 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3584 		}
3585 		break;
3586 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3587 		break;
3588 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3589 		break;
3590 	case SCTP_NOTIFY_ASSOC_RESTART:
3591 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3592 		if (stcb->asoc.peer_supports_auth == 0) {
3593 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3594 			    NULL, so_locked);
3595 		}
3596 		break;
3597 	case SCTP_NOTIFY_HB_RESP:
3598 		break;
3599 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3600 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3601 		break;
3602 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3603 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3604 		break;
3605 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3606 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3607 		break;
3608 
3609 	case SCTP_NOTIFY_STR_RESET_SEND:
3610 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3611 		break;
3612 	case SCTP_NOTIFY_STR_RESET_RECV:
3613 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3614 		break;
3615 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3616 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3617 		break;
3618 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3619 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3620 		break;
3621 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3622 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3623 		    error);
3624 		break;
3625 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3626 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3627 		    error);
3628 		break;
3629 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3630 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3631 		    error);
3632 		break;
3633 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3634 		break;
3635 	case SCTP_NOTIFY_ASCONF_FAILED:
3636 		break;
3637 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3638 		sctp_notify_shutdown_event(stcb);
3639 		break;
3640 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3641 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3642 		    (uint16_t) (uintptr_t) data,
3643 		    so_locked);
3644 		break;
3645 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3646 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3647 		    (uint16_t) (uintptr_t) data,
3648 		    so_locked);
3649 		break;
3650 	case SCTP_NOTIFY_NO_PEER_AUTH:
3651 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3652 		    (uint16_t) (uintptr_t) data,
3653 		    so_locked);
3654 		break;
3655 	case SCTP_NOTIFY_SENDER_DRY:
3656 		sctp_notify_sender_dry_event(stcb, so_locked);
3657 		break;
3658 	default:
3659 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3660 		    __FUNCTION__, notification, notification);
3661 		break;
3662 	}			/* end switch */
3663 }
3664 
3665 void
3666 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3667 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3668     SCTP_UNUSED
3669 #endif
3670 )
3671 {
3672 	struct sctp_association *asoc;
3673 	struct sctp_stream_out *outs;
3674 	struct sctp_tmit_chunk *chk;
3675 	struct sctp_stream_queue_pending *sp;
3676 	int i;
3677 
3678 	asoc = &stcb->asoc;
3679 
3680 	if (stcb == NULL) {
3681 		return;
3682 	}
3683 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3684 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3685 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3686 		return;
3687 	}
3688 	/* now through all the gunk freeing chunks */
3689 	if (holds_lock == 0) {
3690 		SCTP_TCB_SEND_LOCK(stcb);
3691 	}
3692 	/* sent queue SHOULD be empty */
3693 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3694 		chk = TAILQ_FIRST(&asoc->sent_queue);
3695 		while (chk) {
3696 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3697 			asoc->sent_queue_cnt--;
3698 			if (chk->data != NULL) {
3699 				sctp_free_bufspace(stcb, asoc, chk, 1);
3700 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3701 				    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3702 				if (chk->data) {
3703 					sctp_m_freem(chk->data);
3704 					chk->data = NULL;
3705 				}
3706 			}
3707 			sctp_free_a_chunk(stcb, chk);
3708 			/* sa_ignore FREED_MEMORY */
3709 			chk = TAILQ_FIRST(&asoc->sent_queue);
3710 		}
3711 	}
3712 	/* pending send queue SHOULD be empty */
3713 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3714 		chk = TAILQ_FIRST(&asoc->send_queue);
3715 		while (chk) {
3716 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3717 			asoc->send_queue_cnt--;
3718 			if (chk->data != NULL) {
3719 				sctp_free_bufspace(stcb, asoc, chk, 1);
3720 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3721 				    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3722 				if (chk->data) {
3723 					sctp_m_freem(chk->data);
3724 					chk->data = NULL;
3725 				}
3726 			}
3727 			sctp_free_a_chunk(stcb, chk);
3728 			/* sa_ignore FREED_MEMORY */
3729 			chk = TAILQ_FIRST(&asoc->send_queue);
3730 		}
3731 	}
3732 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3733 		/* For each stream */
3734 		outs = &stcb->asoc.strmout[i];
3735 		/* clean up any sends there */
3736 		stcb->asoc.locked_on_sending = NULL;
3737 		sp = TAILQ_FIRST(&outs->outqueue);
3738 		while (sp) {
3739 			stcb->asoc.stream_queue_cnt--;
3740 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3741 			sctp_free_spbufspace(stcb, asoc, sp);
3742 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3743 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3744 			if (sp->data) {
3745 				sctp_m_freem(sp->data);
3746 				sp->data = NULL;
3747 			}
3748 			if (sp->net)
3749 				sctp_free_remote_addr(sp->net);
3750 			sp->net = NULL;
3751 			/* Free the chunk */
3752 			sctp_free_a_strmoq(stcb, sp);
3753 			/* sa_ignore FREED_MEMORY */
3754 			sp = TAILQ_FIRST(&outs->outqueue);
3755 		}
3756 	}
3757 
3758 	if (holds_lock == 0) {
3759 		SCTP_TCB_SEND_UNLOCK(stcb);
3760 	}
3761 }
3762 
3763 void
3764 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3765 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3766     SCTP_UNUSED
3767 #endif
3768 )
3769 {
3770 
3771 	if (stcb == NULL) {
3772 		return;
3773 	}
3774 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3775 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3776 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3777 		return;
3778 	}
3779 	/* Tell them we lost the asoc */
3780 	sctp_report_all_outbound(stcb, 1, so_locked);
3781 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3782 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3783 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3784 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3785 	}
3786 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3787 }
3788 
3789 void
3790 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3791     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3792     uint32_t vrf_id, uint16_t port)
3793 {
3794 	uint32_t vtag;
3795 
3796 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3797 	struct socket *so;
3798 
3799 #endif
3800 
3801 	vtag = 0;
3802 	if (stcb != NULL) {
3803 		/* We have a TCB to abort, send notification too */
3804 		vtag = stcb->asoc.peer_vtag;
3805 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3806 		/* get the assoc vrf id and table id */
3807 		vrf_id = stcb->asoc.vrf_id;
3808 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3809 	}
3810 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3811 	if (stcb != NULL) {
3812 		/* Ok, now lets free it */
3813 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3814 		so = SCTP_INP_SO(inp);
3815 		atomic_add_int(&stcb->asoc.refcnt, 1);
3816 		SCTP_TCB_UNLOCK(stcb);
3817 		SCTP_SOCKET_LOCK(so, 1);
3818 		SCTP_TCB_LOCK(stcb);
3819 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3820 #endif
3821 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3822 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3823 		SCTP_SOCKET_UNLOCK(so, 1);
3824 #endif
3825 	} else {
3826 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3827 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3828 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3829 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3830 			}
3831 		}
3832 	}
3833 }
3834 
3835 #ifdef SCTP_ASOCLOG_OF_TSNS
3836 void
3837 sctp_print_out_track_log(struct sctp_tcb *stcb)
3838 {
3839 #ifdef NOSIY_PRINTS
3840 	int i;
3841 
3842 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3843 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3844 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3845 		SCTP_PRINTF("None rcvd\n");
3846 		goto none_in;
3847 	}
3848 	if (stcb->asoc.tsn_in_wrapped) {
3849 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3850 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3851 			    stcb->asoc.in_tsnlog[i].tsn,
3852 			    stcb->asoc.in_tsnlog[i].strm,
3853 			    stcb->asoc.in_tsnlog[i].seq,
3854 			    stcb->asoc.in_tsnlog[i].flgs,
3855 			    stcb->asoc.in_tsnlog[i].sz);
3856 		}
3857 	}
3858 	if (stcb->asoc.tsn_in_at) {
3859 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3860 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3861 			    stcb->asoc.in_tsnlog[i].tsn,
3862 			    stcb->asoc.in_tsnlog[i].strm,
3863 			    stcb->asoc.in_tsnlog[i].seq,
3864 			    stcb->asoc.in_tsnlog[i].flgs,
3865 			    stcb->asoc.in_tsnlog[i].sz);
3866 		}
3867 	}
3868 none_in:
3869 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3870 	if ((stcb->asoc.tsn_out_at == 0) &&
3871 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3872 		SCTP_PRINTF("None sent\n");
3873 	}
3874 	if (stcb->asoc.tsn_out_wrapped) {
3875 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3876 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3877 			    stcb->asoc.out_tsnlog[i].tsn,
3878 			    stcb->asoc.out_tsnlog[i].strm,
3879 			    stcb->asoc.out_tsnlog[i].seq,
3880 			    stcb->asoc.out_tsnlog[i].flgs,
3881 			    stcb->asoc.out_tsnlog[i].sz);
3882 		}
3883 	}
3884 	if (stcb->asoc.tsn_out_at) {
3885 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3886 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3887 			    stcb->asoc.out_tsnlog[i].tsn,
3888 			    stcb->asoc.out_tsnlog[i].strm,
3889 			    stcb->asoc.out_tsnlog[i].seq,
3890 			    stcb->asoc.out_tsnlog[i].flgs,
3891 			    stcb->asoc.out_tsnlog[i].sz);
3892 		}
3893 	}
3894 #endif
3895 }
3896 
3897 #endif
3898 
3899 void
3900 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3901     int error, struct mbuf *op_err,
3902     int so_locked
3903 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3904     SCTP_UNUSED
3905 #endif
3906 )
3907 {
3908 	uint32_t vtag;
3909 
3910 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3911 	struct socket *so;
3912 
3913 #endif
3914 
3915 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3916 	so = SCTP_INP_SO(inp);
3917 #endif
3918 	if (stcb == NULL) {
3919 		/* Got to have a TCB */
3920 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3921 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3922 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3923 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3924 			}
3925 		}
3926 		return;
3927 	} else {
3928 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3929 	}
3930 	vtag = stcb->asoc.peer_vtag;
3931 	/* notify the ulp */
3932 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3933 		sctp_abort_notification(stcb, error, so_locked);
3934 	/* notify the peer */
3935 #if defined(SCTP_PANIC_ON_ABORT)
3936 	panic("aborting an association");
3937 #endif
3938 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3939 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3940 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3941 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3942 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3943 	}
3944 	/* now free the asoc */
3945 #ifdef SCTP_ASOCLOG_OF_TSNS
3946 	sctp_print_out_track_log(stcb);
3947 #endif
3948 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3949 	if (!so_locked) {
3950 		atomic_add_int(&stcb->asoc.refcnt, 1);
3951 		SCTP_TCB_UNLOCK(stcb);
3952 		SCTP_SOCKET_LOCK(so, 1);
3953 		SCTP_TCB_LOCK(stcb);
3954 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3955 	}
3956 #endif
3957 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3958 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3959 	if (!so_locked) {
3960 		SCTP_SOCKET_UNLOCK(so, 1);
3961 	}
3962 #endif
3963 }
3964 
3965 void
3966 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3967     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3968 {
3969 	struct sctp_chunkhdr *ch, chunk_buf;
3970 	unsigned int chk_length;
3971 
3972 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3973 	/* Generate a TO address for future reference */
3974 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3975 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3976 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3977 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3978 		}
3979 	}
3980 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3981 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3982 	while (ch != NULL) {
3983 		chk_length = ntohs(ch->chunk_length);
3984 		if (chk_length < sizeof(*ch)) {
3985 			/* break to abort land */
3986 			break;
3987 		}
3988 		switch (ch->chunk_type) {
3989 		case SCTP_COOKIE_ECHO:
3990 			/* We hit here only if the assoc is being freed */
3991 			return;
3992 		case SCTP_PACKET_DROPPED:
3993 			/* we don't respond to pkt-dropped */
3994 			return;
3995 		case SCTP_ABORT_ASSOCIATION:
3996 			/* we don't respond with an ABORT to an ABORT */
3997 			return;
3998 		case SCTP_SHUTDOWN_COMPLETE:
3999 			/*
4000 			 * we ignore it since we are not waiting for it and
4001 			 * peer is gone
4002 			 */
4003 			return;
4004 		case SCTP_SHUTDOWN_ACK:
4005 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
4006 			return;
4007 		default:
4008 			break;
4009 		}
4010 		offset += SCTP_SIZE32(chk_length);
4011 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4012 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4013 	}
4014 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4015 }
4016 
4017 /*
4018  * check the inbound datagram to make sure there is not an abort inside it,
4019  * if there is return 1, else return 0.
4020  */
4021 int
4022 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4023 {
4024 	struct sctp_chunkhdr *ch;
4025 	struct sctp_init_chunk *init_chk, chunk_buf;
4026 	int offset;
4027 	unsigned int chk_length;
4028 
4029 	offset = iphlen + sizeof(struct sctphdr);
4030 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4031 	    (uint8_t *) & chunk_buf);
4032 	while (ch != NULL) {
4033 		chk_length = ntohs(ch->chunk_length);
4034 		if (chk_length < sizeof(*ch)) {
4035 			/* packet is probably corrupt */
4036 			break;
4037 		}
4038 		/* we seem to be ok, is it an abort? */
4039 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4040 			/* yep, tell them */
4041 			return (1);
4042 		}
4043 		if (ch->chunk_type == SCTP_INITIATION) {
4044 			/* need to update the Vtag */
4045 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4046 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4047 			if (init_chk != NULL) {
4048 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4049 			}
4050 		}
4051 		/* Nope, move to the next chunk */
4052 		offset += SCTP_SIZE32(chk_length);
4053 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4054 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4055 	}
4056 	return (0);
4057 }
4058 
4059 /*
4060  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4061  * set (i.e. it's 0) so, create this function to compare link local scopes
4062  */
4063 #ifdef INET6
4064 uint32_t
4065 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4066 {
4067 	struct sockaddr_in6 a, b;
4068 
4069 	/* save copies */
4070 	a = *addr1;
4071 	b = *addr2;
4072 
4073 	if (a.sin6_scope_id == 0)
4074 		if (sa6_recoverscope(&a)) {
4075 			/* can't get scope, so can't match */
4076 			return (0);
4077 		}
4078 	if (b.sin6_scope_id == 0)
4079 		if (sa6_recoverscope(&b)) {
4080 			/* can't get scope, so can't match */
4081 			return (0);
4082 		}
4083 	if (a.sin6_scope_id != b.sin6_scope_id)
4084 		return (0);
4085 
4086 	return (1);
4087 }
4088 
4089 /*
4090  * returns a sockaddr_in6 with embedded scope recovered and removed
4091  */
4092 struct sockaddr_in6 *
4093 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4094 {
4095 	/* check and strip embedded scope junk */
4096 	if (addr->sin6_family == AF_INET6) {
4097 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4098 			if (addr->sin6_scope_id == 0) {
4099 				*store = *addr;
4100 				if (!sa6_recoverscope(store)) {
4101 					/* use the recovered scope */
4102 					addr = store;
4103 				}
4104 			} else {
4105 				/* else, return the original "to" addr */
4106 				in6_clearscope(&addr->sin6_addr);
4107 			}
4108 		}
4109 	}
4110 	return (addr);
4111 }
4112 
4113 #endif
4114 
4115 /*
4116  * are the two addresses the same?  currently a "scopeless" check returns: 1
4117  * if same, 0 if not
4118  */
4119 int
4120 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4121 {
4122 
4123 	/* must be valid */
4124 	if (sa1 == NULL || sa2 == NULL)
4125 		return (0);
4126 
4127 	/* must be the same family */
4128 	if (sa1->sa_family != sa2->sa_family)
4129 		return (0);
4130 
4131 	switch (sa1->sa_family) {
4132 #ifdef INET6
4133 	case AF_INET6:
4134 		{
4135 			/* IPv6 addresses */
4136 			struct sockaddr_in6 *sin6_1, *sin6_2;
4137 
4138 			sin6_1 = (struct sockaddr_in6 *)sa1;
4139 			sin6_2 = (struct sockaddr_in6 *)sa2;
4140 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4141 			    sin6_2));
4142 		}
4143 #endif
4144 	case AF_INET:
4145 		{
4146 			/* IPv4 addresses */
4147 			struct sockaddr_in *sin_1, *sin_2;
4148 
4149 			sin_1 = (struct sockaddr_in *)sa1;
4150 			sin_2 = (struct sockaddr_in *)sa2;
4151 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4152 		}
4153 	default:
4154 		/* we don't do these... */
4155 		return (0);
4156 	}
4157 }
4158 
4159 void
4160 sctp_print_address(struct sockaddr *sa)
4161 {
4162 #ifdef INET6
4163 	char ip6buf[INET6_ADDRSTRLEN];
4164 
4165 	ip6buf[0] = 0;
4166 #endif
4167 
4168 	switch (sa->sa_family) {
4169 #ifdef INET6
4170 	case AF_INET6:
4171 		{
4172 			struct sockaddr_in6 *sin6;
4173 
4174 			sin6 = (struct sockaddr_in6 *)sa;
4175 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4176 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4177 			    ntohs(sin6->sin6_port),
4178 			    sin6->sin6_scope_id);
4179 			break;
4180 		}
4181 #endif
4182 	case AF_INET:
4183 		{
4184 			struct sockaddr_in *sin;
4185 			unsigned char *p;
4186 
4187 			sin = (struct sockaddr_in *)sa;
4188 			p = (unsigned char *)&sin->sin_addr;
4189 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4190 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4191 			break;
4192 		}
4193 	default:
4194 		SCTP_PRINTF("?\n");
4195 		break;
4196 	}
4197 }
4198 
4199 void
4200 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4201 {
4202 	switch (iph->ip_v) {
4203 		case IPVERSION:
4204 		{
4205 			struct sockaddr_in lsa, fsa;
4206 
4207 			bzero(&lsa, sizeof(lsa));
4208 			lsa.sin_len = sizeof(lsa);
4209 			lsa.sin_family = AF_INET;
4210 			lsa.sin_addr = iph->ip_src;
4211 			lsa.sin_port = sh->src_port;
4212 			bzero(&fsa, sizeof(fsa));
4213 			fsa.sin_len = sizeof(fsa);
4214 			fsa.sin_family = AF_INET;
4215 			fsa.sin_addr = iph->ip_dst;
4216 			fsa.sin_port = sh->dest_port;
4217 			SCTP_PRINTF("src: ");
4218 			sctp_print_address((struct sockaddr *)&lsa);
4219 			SCTP_PRINTF("dest: ");
4220 			sctp_print_address((struct sockaddr *)&fsa);
4221 			break;
4222 		}
4223 #ifdef INET6
4224 	case IPV6_VERSION >> 4:
4225 		{
4226 			struct ip6_hdr *ip6;
4227 			struct sockaddr_in6 lsa6, fsa6;
4228 
4229 			ip6 = (struct ip6_hdr *)iph;
4230 			bzero(&lsa6, sizeof(lsa6));
4231 			lsa6.sin6_len = sizeof(lsa6);
4232 			lsa6.sin6_family = AF_INET6;
4233 			lsa6.sin6_addr = ip6->ip6_src;
4234 			lsa6.sin6_port = sh->src_port;
4235 			bzero(&fsa6, sizeof(fsa6));
4236 			fsa6.sin6_len = sizeof(fsa6);
4237 			fsa6.sin6_family = AF_INET6;
4238 			fsa6.sin6_addr = ip6->ip6_dst;
4239 			fsa6.sin6_port = sh->dest_port;
4240 			SCTP_PRINTF("src: ");
4241 			sctp_print_address((struct sockaddr *)&lsa6);
4242 			SCTP_PRINTF("dest: ");
4243 			sctp_print_address((struct sockaddr *)&fsa6);
4244 			break;
4245 		}
4246 #endif
4247 	default:
4248 		/* TSNH */
4249 		break;
4250 	}
4251 }
4252 
4253 void
4254 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4255     struct sctp_inpcb *new_inp,
4256     struct sctp_tcb *stcb,
4257     int waitflags)
4258 {
4259 	/*
4260 	 * go through our old INP and pull off any control structures that
4261 	 * belong to stcb and move then to the new inp.
4262 	 */
4263 	struct socket *old_so, *new_so;
4264 	struct sctp_queued_to_read *control, *nctl;
4265 	struct sctp_readhead tmp_queue;
4266 	struct mbuf *m;
4267 	int error = 0;
4268 
4269 	old_so = old_inp->sctp_socket;
4270 	new_so = new_inp->sctp_socket;
4271 	TAILQ_INIT(&tmp_queue);
4272 	error = sblock(&old_so->so_rcv, waitflags);
4273 	if (error) {
4274 		/*
4275 		 * Gak, can't get sblock, we have a problem. data will be
4276 		 * left stranded.. and we don't dare look at it since the
4277 		 * other thread may be reading something. Oh well, its a
4278 		 * screwed up app that does a peeloff OR a accept while
4279 		 * reading from the main socket... actually its only the
4280 		 * peeloff() case, since I think read will fail on a
4281 		 * listening socket..
4282 		 */
4283 		return;
4284 	}
4285 	/* lock the socket buffers */
4286 	SCTP_INP_READ_LOCK(old_inp);
4287 	control = TAILQ_FIRST(&old_inp->read_queue);
4288 	/* Pull off all for out target stcb */
4289 	while (control) {
4290 		nctl = TAILQ_NEXT(control, next);
4291 		if (control->stcb == stcb) {
4292 			/* remove it we want it */
4293 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4294 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4295 			m = control->data;
4296 			while (m) {
4297 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4298 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4299 				}
4300 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4301 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4302 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4303 				}
4304 				m = SCTP_BUF_NEXT(m);
4305 			}
4306 		}
4307 		control = nctl;
4308 	}
4309 	SCTP_INP_READ_UNLOCK(old_inp);
4310 	/* Remove the sb-lock on the old socket */
4311 
4312 	sbunlock(&old_so->so_rcv);
4313 	/* Now we move them over to the new socket buffer */
4314 	control = TAILQ_FIRST(&tmp_queue);
4315 	SCTP_INP_READ_LOCK(new_inp);
4316 	while (control) {
4317 		nctl = TAILQ_NEXT(control, next);
4318 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4319 		m = control->data;
4320 		while (m) {
4321 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4322 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4323 			}
4324 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4325 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4326 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4327 			}
4328 			m = SCTP_BUF_NEXT(m);
4329 		}
4330 		control = nctl;
4331 	}
4332 	SCTP_INP_READ_UNLOCK(new_inp);
4333 }
4334 
4335 void
4336 sctp_add_to_readq(struct sctp_inpcb *inp,
4337     struct sctp_tcb *stcb,
4338     struct sctp_queued_to_read *control,
4339     struct sockbuf *sb,
4340     int end,
4341     int inp_read_lock_held,
4342     int so_locked
4343 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4344     SCTP_UNUSED
4345 #endif
4346 )
4347 {
4348 	/*
4349 	 * Here we must place the control on the end of the socket read
4350 	 * queue AND increment sb_cc so that select will work properly on
4351 	 * read.
4352 	 */
4353 	struct mbuf *m, *prev = NULL;
4354 
4355 	if (inp == NULL) {
4356 		/* Gak, TSNH!! */
4357 #ifdef INVARIANTS
4358 		panic("Gak, inp NULL on add_to_readq");
4359 #endif
4360 		return;
4361 	}
4362 	if (inp_read_lock_held == 0)
4363 		SCTP_INP_READ_LOCK(inp);
4364 	if (!(control->spec_flags & M_NOTIFICATION)) {
4365 		atomic_add_int(&inp->total_recvs, 1);
4366 		if (!control->do_not_ref_stcb) {
4367 			atomic_add_int(&stcb->total_recvs, 1);
4368 		}
4369 	}
4370 	m = control->data;
4371 	control->held_length = 0;
4372 	control->length = 0;
4373 	while (m) {
4374 		if (SCTP_BUF_LEN(m) == 0) {
4375 			/* Skip mbufs with NO length */
4376 			if (prev == NULL) {
4377 				/* First one */
4378 				control->data = sctp_m_free(m);
4379 				m = control->data;
4380 			} else {
4381 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4382 				m = SCTP_BUF_NEXT(prev);
4383 			}
4384 			if (m == NULL) {
4385 				control->tail_mbuf = prev;;
4386 			}
4387 			continue;
4388 		}
4389 		prev = m;
4390 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4391 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4392 		}
4393 		sctp_sballoc(stcb, sb, m);
4394 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4395 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4396 		}
4397 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4398 		m = SCTP_BUF_NEXT(m);
4399 	}
4400 	if (prev != NULL) {
4401 		control->tail_mbuf = prev;
4402 	} else {
4403 		/* Everything got collapsed out?? */
4404 		if (inp_read_lock_held == 0)
4405 			SCTP_INP_READ_UNLOCK(inp);
4406 		return;
4407 	}
4408 	if (end) {
4409 		control->end_added = 1;
4410 	}
4411 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4412 	if (inp_read_lock_held == 0)
4413 		SCTP_INP_READ_UNLOCK(inp);
4414 	if (inp && inp->sctp_socket) {
4415 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4416 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4417 		} else {
4418 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4419 			struct socket *so;
4420 
4421 			so = SCTP_INP_SO(inp);
4422 			if (!so_locked) {
4423 				atomic_add_int(&stcb->asoc.refcnt, 1);
4424 				SCTP_TCB_UNLOCK(stcb);
4425 				SCTP_SOCKET_LOCK(so, 1);
4426 				SCTP_TCB_LOCK(stcb);
4427 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4428 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4429 					SCTP_SOCKET_UNLOCK(so, 1);
4430 					return;
4431 				}
4432 			}
4433 #endif
4434 			sctp_sorwakeup(inp, inp->sctp_socket);
4435 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4436 			if (!so_locked) {
4437 				SCTP_SOCKET_UNLOCK(so, 1);
4438 			}
4439 #endif
4440 		}
4441 	}
4442 }
4443 
4444 
4445 int
4446 sctp_append_to_readq(struct sctp_inpcb *inp,
4447     struct sctp_tcb *stcb,
4448     struct sctp_queued_to_read *control,
4449     struct mbuf *m,
4450     int end,
4451     int ctls_cumack,
4452     struct sockbuf *sb)
4453 {
4454 	/*
4455 	 * A partial delivery API event is underway. OR we are appending on
4456 	 * the reassembly queue.
4457 	 *
4458 	 * If PDAPI this means we need to add m to the end of the data.
4459 	 * Increase the length in the control AND increment the sb_cc.
4460 	 * Otherwise sb is NULL and all we need to do is put it at the end
4461 	 * of the mbuf chain.
4462 	 */
4463 	int len = 0;
4464 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4465 
4466 	if (inp) {
4467 		SCTP_INP_READ_LOCK(inp);
4468 	}
4469 	if (control == NULL) {
4470 get_out:
4471 		if (inp) {
4472 			SCTP_INP_READ_UNLOCK(inp);
4473 		}
4474 		return (-1);
4475 	}
4476 	if (control->end_added) {
4477 		/* huh this one is complete? */
4478 		goto get_out;
4479 	}
4480 	mm = m;
4481 	if (mm == NULL) {
4482 		goto get_out;
4483 	}
4484 	while (mm) {
4485 		if (SCTP_BUF_LEN(mm) == 0) {
4486 			/* Skip mbufs with NO lenght */
4487 			if (prev == NULL) {
4488 				/* First one */
4489 				m = sctp_m_free(mm);
4490 				mm = m;
4491 			} else {
4492 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4493 				mm = SCTP_BUF_NEXT(prev);
4494 			}
4495 			continue;
4496 		}
4497 		prev = mm;
4498 		len += SCTP_BUF_LEN(mm);
4499 		if (sb) {
4500 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4501 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4502 			}
4503 			sctp_sballoc(stcb, sb, mm);
4504 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4505 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4506 			}
4507 		}
4508 		mm = SCTP_BUF_NEXT(mm);
4509 	}
4510 	if (prev) {
4511 		tail = prev;
4512 	} else {
4513 		/* Really there should always be a prev */
4514 		if (m == NULL) {
4515 			/* Huh nothing left? */
4516 #ifdef INVARIANTS
4517 			panic("Nothing left to add?");
4518 #else
4519 			goto get_out;
4520 #endif
4521 		}
4522 		tail = m;
4523 	}
4524 	if (control->tail_mbuf) {
4525 		/* append */
4526 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4527 		control->tail_mbuf = tail;
4528 	} else {
4529 		/* nothing there */
4530 #ifdef INVARIANTS
4531 		if (control->data != NULL) {
4532 			panic("This should NOT happen");
4533 		}
4534 #endif
4535 		control->data = m;
4536 		control->tail_mbuf = tail;
4537 	}
4538 	atomic_add_int(&control->length, len);
4539 	if (end) {
4540 		/* message is complete */
4541 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4542 			stcb->asoc.control_pdapi = NULL;
4543 		}
4544 		control->held_length = 0;
4545 		control->end_added = 1;
4546 	}
4547 	if (stcb == NULL) {
4548 		control->do_not_ref_stcb = 1;
4549 	}
4550 	/*
4551 	 * When we are appending in partial delivery, the cum-ack is used
4552 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4553 	 * is populated in the outbound sinfo structure from the true cumack
4554 	 * if the association exists...
4555 	 */
4556 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4557 	if (inp) {
4558 		SCTP_INP_READ_UNLOCK(inp);
4559 	}
4560 	if (inp && inp->sctp_socket) {
4561 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4562 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4563 		} else {
4564 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4565 			struct socket *so;
4566 
4567 			so = SCTP_INP_SO(inp);
4568 			atomic_add_int(&stcb->asoc.refcnt, 1);
4569 			SCTP_TCB_UNLOCK(stcb);
4570 			SCTP_SOCKET_LOCK(so, 1);
4571 			SCTP_TCB_LOCK(stcb);
4572 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4573 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4574 				SCTP_SOCKET_UNLOCK(so, 1);
4575 				return (0);
4576 			}
4577 #endif
4578 			sctp_sorwakeup(inp, inp->sctp_socket);
4579 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4580 			SCTP_SOCKET_UNLOCK(so, 1);
4581 #endif
4582 		}
4583 	}
4584 	return (0);
4585 }
4586 
4587 
4588 
4589 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4590  *************ALTERNATE ROUTING CODE
4591  */
4592 
4593 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4594  *************ALTERNATE ROUTING CODE
4595  */
4596 
4597 struct mbuf *
4598 sctp_generate_invmanparam(int err)
4599 {
4600 	/* Return a MBUF with a invalid mandatory parameter */
4601 	struct mbuf *m;
4602 
4603 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4604 	if (m) {
4605 		struct sctp_paramhdr *ph;
4606 
4607 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4608 		ph = mtod(m, struct sctp_paramhdr *);
4609 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4610 		ph->param_type = htons(err);
4611 	}
4612 	return (m);
4613 }
4614 
4615 #ifdef SCTP_MBCNT_LOGGING
4616 void
4617 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4618     struct sctp_tmit_chunk *tp1, int chk_cnt)
4619 {
4620 	if (tp1->data == NULL) {
4621 		return;
4622 	}
4623 	asoc->chunks_on_out_queue -= chk_cnt;
4624 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4625 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4626 		    asoc->total_output_queue_size,
4627 		    tp1->book_size,
4628 		    0,
4629 		    tp1->mbcnt);
4630 	}
4631 	if (asoc->total_output_queue_size >= tp1->book_size) {
4632 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4633 	} else {
4634 		asoc->total_output_queue_size = 0;
4635 	}
4636 
4637 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4638 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4639 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4640 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4641 		} else {
4642 			stcb->sctp_socket->so_snd.sb_cc = 0;
4643 
4644 		}
4645 	}
4646 }
4647 
4648 #endif
4649 
4650 int
4651 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4652     int reason, int so_locked
4653 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4654     SCTP_UNUSED
4655 #endif
4656 )
4657 {
4658 	struct sctp_stream_out *strq;
4659 	struct sctp_tmit_chunk *chk = NULL;
4660 	struct sctp_stream_queue_pending *sp;
4661 	uint16_t stream = 0, seq = 0;
4662 	uint8_t foundeom = 0;
4663 	int ret_sz = 0;
4664 	int notdone;
4665 	int do_wakeup_routine = 0;
4666 
4667 	stream = tp1->rec.data.stream_number;
4668 	seq = tp1->rec.data.stream_seq;
4669 	do {
4670 		ret_sz += tp1->book_size;
4671 		if (tp1->data != NULL) {
4672 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4673 				sctp_flight_size_decrease(tp1);
4674 				sctp_total_flight_decrease(stcb, tp1);
4675 			}
4676 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4677 			stcb->asoc.peers_rwnd += tp1->send_size;
4678 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4679 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4680 			if (tp1->data) {
4681 				sctp_m_freem(tp1->data);
4682 				tp1->data = NULL;
4683 			}
4684 			do_wakeup_routine = 1;
4685 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4686 				stcb->asoc.sent_queue_cnt_removeable--;
4687 			}
4688 		}
4689 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4690 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4691 		    SCTP_DATA_NOT_FRAG) {
4692 			/* not frag'ed we ae done   */
4693 			notdone = 0;
4694 			foundeom = 1;
4695 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4696 			/* end of frag, we are done */
4697 			notdone = 0;
4698 			foundeom = 1;
4699 		} else {
4700 			/*
4701 			 * Its a begin or middle piece, we must mark all of
4702 			 * it
4703 			 */
4704 			notdone = 1;
4705 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4706 		}
4707 	} while (tp1 && notdone);
4708 	if (foundeom == 0) {
4709 		/*
4710 		 * The multi-part message was scattered across the send and
4711 		 * sent queue.
4712 		 */
4713 next_on_sent:
4714 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4715 		/*
4716 		 * recurse throught the send_queue too, starting at the
4717 		 * beginning.
4718 		 */
4719 		if ((tp1) &&
4720 		    (tp1->rec.data.stream_number == stream) &&
4721 		    (tp1->rec.data.stream_seq == seq)
4722 		    ) {
4723 			/*
4724 			 * save to chk in case we have some on stream out
4725 			 * queue. If so and we have an un-transmitted one we
4726 			 * don't have to fudge the TSN.
4727 			 */
4728 			chk = tp1;
4729 			ret_sz += tp1->book_size;
4730 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4731 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4732 			if (tp1->data) {
4733 				sctp_m_freem(tp1->data);
4734 				tp1->data = NULL;
4735 			}
4736 			/* No flight involved here book the size to 0 */
4737 			tp1->book_size = 0;
4738 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4739 				foundeom = 1;
4740 			}
4741 			do_wakeup_routine = 1;
4742 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4743 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4744 			/*
4745 			 * on to the sent queue so we can wait for it to be
4746 			 * passed by.
4747 			 */
4748 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4749 			    sctp_next);
4750 			stcb->asoc.send_queue_cnt--;
4751 			stcb->asoc.sent_queue_cnt++;
4752 			goto next_on_sent;
4753 		}
4754 	}
4755 	if (foundeom == 0) {
4756 		/*
4757 		 * Still no eom found. That means there is stuff left on the
4758 		 * stream out queue.. yuck.
4759 		 */
4760 		strq = &stcb->asoc.strmout[stream];
4761 		SCTP_TCB_SEND_LOCK(stcb);
4762 		sp = TAILQ_FIRST(&strq->outqueue);
4763 		while (sp->strseq <= seq) {
4764 			/* Check if its our SEQ */
4765 			if (sp->strseq == seq) {
4766 				sp->discard_rest = 1;
4767 				/*
4768 				 * We may need to put a chunk on the queue
4769 				 * that holds the TSN that would have been
4770 				 * sent with the LAST bit.
4771 				 */
4772 				if (chk == NULL) {
4773 					/* Yep, we have to */
4774 					sctp_alloc_a_chunk(stcb, chk);
4775 					if (chk == NULL) {
4776 						/*
4777 						 * we are hosed. All we can
4778 						 * do is nothing.. which
4779 						 * will cause an abort if
4780 						 * the peer is paying
4781 						 * attention.
4782 						 */
4783 						goto oh_well;
4784 					}
4785 					memset(chk, 0, sizeof(*chk));
4786 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4787 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4788 					chk->asoc = &stcb->asoc;
4789 					chk->rec.data.stream_seq = sp->strseq;
4790 					chk->rec.data.stream_number = sp->stream;
4791 					chk->rec.data.payloadtype = sp->ppid;
4792 					chk->rec.data.context = sp->context;
4793 					chk->flags = sp->act_flags;
4794 					chk->whoTo = sp->net;
4795 					atomic_add_int(&chk->whoTo->ref_count, 1);
4796 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4797 					stcb->asoc.pr_sctp_cnt++;
4798 					chk->pr_sctp_on = 1;
4799 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4800 					stcb->asoc.sent_queue_cnt++;
4801 					stcb->asoc.pr_sctp_cnt++;
4802 				} else {
4803 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4804 				}
4805 		oh_well:
4806 				if (sp->data) {
4807 					/*
4808 					 * Pull any data to free up the SB
4809 					 * and allow sender to "add more"
4810 					 * whilc we will throw away :-)
4811 					 */
4812 					sctp_free_spbufspace(stcb, &stcb->asoc,
4813 					    sp);
4814 					ret_sz += sp->length;
4815 					do_wakeup_routine = 1;
4816 					sp->some_taken = 1;
4817 					sctp_m_freem(sp->data);
4818 					sp->length = 0;
4819 					sp->data = NULL;
4820 					sp->tail_mbuf = NULL;
4821 				}
4822 				break;
4823 			} else {
4824 				/* Next one please */
4825 				sp = TAILQ_NEXT(sp, next);
4826 			}
4827 		}		/* End while */
4828 		SCTP_TCB_SEND_UNLOCK(stcb);
4829 	}
4830 	if (do_wakeup_routine) {
4831 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4832 		struct socket *so;
4833 
4834 		so = SCTP_INP_SO(stcb->sctp_ep);
4835 		if (!so_locked) {
4836 			atomic_add_int(&stcb->asoc.refcnt, 1);
4837 			SCTP_TCB_UNLOCK(stcb);
4838 			SCTP_SOCKET_LOCK(so, 1);
4839 			SCTP_TCB_LOCK(stcb);
4840 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4841 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4842 				/* assoc was freed while we were unlocked */
4843 				SCTP_SOCKET_UNLOCK(so, 1);
4844 				return (ret_sz);
4845 			}
4846 		}
4847 #endif
4848 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4849 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4850 		if (!so_locked) {
4851 			SCTP_SOCKET_UNLOCK(so, 1);
4852 		}
4853 #endif
4854 	}
4855 	return (ret_sz);
4856 }
4857 
4858 /*
4859  * checks to see if the given address, sa, is one that is currently known by
4860  * the kernel note: can't distinguish the same address on multiple interfaces
4861  * and doesn't handle multiple addresses with different zone/scope id's note:
4862  * ifa_ifwithaddr() compares the entire sockaddr struct
4863  */
4864 struct sctp_ifa *
4865 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4866     int holds_lock)
4867 {
4868 	struct sctp_laddr *laddr;
4869 
4870 	if (holds_lock == 0) {
4871 		SCTP_INP_RLOCK(inp);
4872 	}
4873 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4874 		if (laddr->ifa == NULL)
4875 			continue;
4876 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4877 			continue;
4878 		if (addr->sa_family == AF_INET) {
4879 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4880 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4881 				/* found him. */
4882 				if (holds_lock == 0) {
4883 					SCTP_INP_RUNLOCK(inp);
4884 				}
4885 				return (laddr->ifa);
4886 				break;
4887 			}
4888 		}
4889 #ifdef INET6
4890 		if (addr->sa_family == AF_INET6) {
4891 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4892 			    &laddr->ifa->address.sin6)) {
4893 				/* found him. */
4894 				if (holds_lock == 0) {
4895 					SCTP_INP_RUNLOCK(inp);
4896 				}
4897 				return (laddr->ifa);
4898 				break;
4899 			}
4900 		}
4901 #endif
4902 	}
4903 	if (holds_lock == 0) {
4904 		SCTP_INP_RUNLOCK(inp);
4905 	}
4906 	return (NULL);
4907 }
4908 
4909 uint32_t
4910 sctp_get_ifa_hash_val(struct sockaddr *addr)
4911 {
4912 	if (addr->sa_family == AF_INET) {
4913 		struct sockaddr_in *sin;
4914 
4915 		sin = (struct sockaddr_in *)addr;
4916 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4917 	} else if (addr->sa_family == AF_INET6) {
4918 		struct sockaddr_in6 *sin6;
4919 		uint32_t hash_of_addr;
4920 
4921 		sin6 = (struct sockaddr_in6 *)addr;
4922 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4923 		    sin6->sin6_addr.s6_addr32[1] +
4924 		    sin6->sin6_addr.s6_addr32[2] +
4925 		    sin6->sin6_addr.s6_addr32[3]);
4926 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4927 		return (hash_of_addr);
4928 	}
4929 	return (0);
4930 }
4931 
4932 struct sctp_ifa *
4933 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4934 {
4935 	struct sctp_ifa *sctp_ifap;
4936 	struct sctp_vrf *vrf;
4937 	struct sctp_ifalist *hash_head;
4938 	uint32_t hash_of_addr;
4939 
4940 	if (holds_lock == 0)
4941 		SCTP_IPI_ADDR_RLOCK();
4942 
4943 	vrf = sctp_find_vrf(vrf_id);
4944 	if (vrf == NULL) {
4945 stage_right:
4946 		if (holds_lock == 0)
4947 			SCTP_IPI_ADDR_RUNLOCK();
4948 		return (NULL);
4949 	}
4950 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4951 
4952 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4953 	if (hash_head == NULL) {
4954 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4955 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4956 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4957 		sctp_print_address(addr);
4958 		SCTP_PRINTF("No such bucket for address\n");
4959 		if (holds_lock == 0)
4960 			SCTP_IPI_ADDR_RUNLOCK();
4961 
4962 		return (NULL);
4963 	}
4964 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4965 		if (sctp_ifap == NULL) {
4966 #ifdef INVARIANTS
4967 			panic("Huh LIST_FOREACH corrupt");
4968 			goto stage_right;
4969 #else
4970 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4971 			goto stage_right;
4972 #endif
4973 		}
4974 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4975 			continue;
4976 		if (addr->sa_family == AF_INET) {
4977 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4978 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4979 				/* found him. */
4980 				if (holds_lock == 0)
4981 					SCTP_IPI_ADDR_RUNLOCK();
4982 				return (sctp_ifap);
4983 				break;
4984 			}
4985 		}
4986 #ifdef INET6
4987 		if (addr->sa_family == AF_INET6) {
4988 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4989 			    &sctp_ifap->address.sin6)) {
4990 				/* found him. */
4991 				if (holds_lock == 0)
4992 					SCTP_IPI_ADDR_RUNLOCK();
4993 				return (sctp_ifap);
4994 				break;
4995 			}
4996 		}
4997 #endif
4998 	}
4999 	if (holds_lock == 0)
5000 		SCTP_IPI_ADDR_RUNLOCK();
5001 	return (NULL);
5002 }
5003 
5004 static void
5005 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5006     uint32_t rwnd_req)
5007 {
5008 	/* User pulled some data, do we need a rwnd update? */
5009 	int r_unlocked = 0;
5010 	uint32_t dif, rwnd;
5011 	struct socket *so = NULL;
5012 
5013 	if (stcb == NULL)
5014 		return;
5015 
5016 	atomic_add_int(&stcb->asoc.refcnt, 1);
5017 
5018 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5019 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5020 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5021 		/* Pre-check If we are freeing no update */
5022 		goto no_lock;
5023 	}
5024 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5025 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5026 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5027 		goto out;
5028 	}
5029 	so = stcb->sctp_socket;
5030 	if (so == NULL) {
5031 		goto out;
5032 	}
5033 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5034 	/* Have you have freed enough to look */
5035 	*freed_so_far = 0;
5036 	/* Yep, its worth a look and the lock overhead */
5037 
5038 	/* Figure out what the rwnd would be */
5039 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5040 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5041 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5042 	} else {
5043 		dif = 0;
5044 	}
5045 	if (dif >= rwnd_req) {
5046 		if (hold_rlock) {
5047 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5048 			r_unlocked = 1;
5049 		}
5050 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5051 			/*
5052 			 * One last check before we allow the guy possibly
5053 			 * to get in. There is a race, where the guy has not
5054 			 * reached the gate. In that case
5055 			 */
5056 			goto out;
5057 		}
5058 		SCTP_TCB_LOCK(stcb);
5059 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5060 			/* No reports here */
5061 			SCTP_TCB_UNLOCK(stcb);
5062 			goto out;
5063 		}
5064 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5065 		/*
5066 		 * EY if nr_sacks used then send an nr-sack , a sack
5067 		 * otherwise
5068 		 */
5069 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
5070 			sctp_send_nr_sack(stcb);
5071 		else
5072 			sctp_send_sack(stcb);
5073 
5074 		sctp_chunk_output(stcb->sctp_ep, stcb,
5075 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5076 		/* make sure no timer is running */
5077 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5078 		SCTP_TCB_UNLOCK(stcb);
5079 	} else {
5080 		/* Update how much we have pending */
5081 		stcb->freed_by_sorcv_sincelast = dif;
5082 	}
5083 out:
5084 	if (so && r_unlocked && hold_rlock) {
5085 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5086 	}
5087 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5088 no_lock:
5089 	atomic_add_int(&stcb->asoc.refcnt, -1);
5090 	return;
5091 }
5092 
5093 int
5094 sctp_sorecvmsg(struct socket *so,
5095     struct uio *uio,
5096     struct mbuf **mp,
5097     struct sockaddr *from,
5098     int fromlen,
5099     int *msg_flags,
5100     struct sctp_sndrcvinfo *sinfo,
5101     int filling_sinfo)
5102 {
5103 	/*
5104 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5105 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5106 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5107 	 * On the way out we may send out any combination of:
5108 	 * MSG_NOTIFICATION MSG_EOR
5109 	 *
5110 	 */
5111 	struct sctp_inpcb *inp = NULL;
5112 	int my_len = 0;
5113 	int cp_len = 0, error = 0;
5114 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5115 	struct mbuf *m = NULL, *embuf = NULL;
5116 	struct sctp_tcb *stcb = NULL;
5117 	int wakeup_read_socket = 0;
5118 	int freecnt_applied = 0;
5119 	int out_flags = 0, in_flags = 0;
5120 	int block_allowed = 1;
5121 	uint32_t freed_so_far = 0;
5122 	uint32_t copied_so_far = 0;
5123 	int in_eeor_mode = 0;
5124 	int no_rcv_needed = 0;
5125 	uint32_t rwnd_req = 0;
5126 	int hold_sblock = 0;
5127 	int hold_rlock = 0;
5128 	int slen = 0;
5129 	uint32_t held_length = 0;
5130 	int sockbuf_lock = 0;
5131 
5132 	if (uio == NULL) {
5133 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5134 		return (EINVAL);
5135 	}
5136 	if (msg_flags) {
5137 		in_flags = *msg_flags;
5138 		if (in_flags & MSG_PEEK)
5139 			SCTP_STAT_INCR(sctps_read_peeks);
5140 	} else {
5141 		in_flags = 0;
5142 	}
5143 	slen = uio->uio_resid;
5144 
5145 	/* Pull in and set up our int flags */
5146 	if (in_flags & MSG_OOB) {
5147 		/* Out of band's NOT supported */
5148 		return (EOPNOTSUPP);
5149 	}
5150 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5151 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5152 		return (EINVAL);
5153 	}
5154 	if ((in_flags & (MSG_DONTWAIT
5155 	    | MSG_NBIO
5156 	    )) ||
5157 	    SCTP_SO_IS_NBIO(so)) {
5158 		block_allowed = 0;
5159 	}
5160 	/* setup the endpoint */
5161 	inp = (struct sctp_inpcb *)so->so_pcb;
5162 	if (inp == NULL) {
5163 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5164 		return (EFAULT);
5165 	}
5166 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5167 	/* Must be at least a MTU's worth */
5168 	if (rwnd_req < SCTP_MIN_RWND)
5169 		rwnd_req = SCTP_MIN_RWND;
5170 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5171 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5172 		sctp_misc_ints(SCTP_SORECV_ENTER,
5173 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5174 	}
5175 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5176 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5177 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5178 	}
5179 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5180 	sockbuf_lock = 1;
5181 	if (error) {
5182 		goto release_unlocked;
5183 	}
5184 restart:
5185 
5186 
5187 restart_nosblocks:
5188 	if (hold_sblock == 0) {
5189 		SOCKBUF_LOCK(&so->so_rcv);
5190 		hold_sblock = 1;
5191 	}
5192 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5193 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5194 		goto out;
5195 	}
5196 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5197 		if (so->so_error) {
5198 			error = so->so_error;
5199 			if ((in_flags & MSG_PEEK) == 0)
5200 				so->so_error = 0;
5201 			goto out;
5202 		} else {
5203 			if (so->so_rcv.sb_cc == 0) {
5204 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5205 				/* indicate EOF */
5206 				error = 0;
5207 				goto out;
5208 			}
5209 		}
5210 	}
5211 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5212 		/* we need to wait for data */
5213 		if ((so->so_rcv.sb_cc == 0) &&
5214 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5215 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5216 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5217 				/*
5218 				 * For active open side clear flags for
5219 				 * re-use passive open is blocked by
5220 				 * connect.
5221 				 */
5222 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5223 					/*
5224 					 * You were aborted, passive side
5225 					 * always hits here
5226 					 */
5227 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5228 					error = ECONNRESET;
5229 					/*
5230 					 * You get this once if you are
5231 					 * active open side
5232 					 */
5233 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5234 						/*
5235 						 * Remove flag if on the
5236 						 * active open side
5237 						 */
5238 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5239 					}
5240 				}
5241 				so->so_state &= ~(SS_ISCONNECTING |
5242 				    SS_ISDISCONNECTING |
5243 				    SS_ISCONFIRMING |
5244 				    SS_ISCONNECTED);
5245 				if (error == 0) {
5246 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5247 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5248 						error = ENOTCONN;
5249 					} else {
5250 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5251 					}
5252 				}
5253 				goto out;
5254 			}
5255 		}
5256 		error = sbwait(&so->so_rcv);
5257 		if (error) {
5258 			goto out;
5259 		}
5260 		held_length = 0;
5261 		goto restart_nosblocks;
5262 	} else if (so->so_rcv.sb_cc == 0) {
5263 		if (so->so_error) {
5264 			error = so->so_error;
5265 			if ((in_flags & MSG_PEEK) == 0)
5266 				so->so_error = 0;
5267 		} else {
5268 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5269 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5270 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5271 					/*
5272 					 * For active open side clear flags
5273 					 * for re-use passive open is
5274 					 * blocked by connect.
5275 					 */
5276 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5277 						/*
5278 						 * You were aborted, passive
5279 						 * side always hits here
5280 						 */
5281 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5282 						error = ECONNRESET;
5283 						/*
5284 						 * You get this once if you
5285 						 * are active open side
5286 						 */
5287 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5288 							/*
5289 							 * Remove flag if on
5290 							 * the active open
5291 							 * side
5292 							 */
5293 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5294 						}
5295 					}
5296 					so->so_state &= ~(SS_ISCONNECTING |
5297 					    SS_ISDISCONNECTING |
5298 					    SS_ISCONFIRMING |
5299 					    SS_ISCONNECTED);
5300 					if (error == 0) {
5301 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5302 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5303 							error = ENOTCONN;
5304 						} else {
5305 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5306 						}
5307 					}
5308 					goto out;
5309 				}
5310 			}
5311 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5312 			error = EWOULDBLOCK;
5313 		}
5314 		goto out;
5315 	}
5316 	if (hold_sblock == 1) {
5317 		SOCKBUF_UNLOCK(&so->so_rcv);
5318 		hold_sblock = 0;
5319 	}
5320 	/* we possibly have data we can read */
5321 	/* sa_ignore FREED_MEMORY */
5322 	control = TAILQ_FIRST(&inp->read_queue);
5323 	if (control == NULL) {
5324 		/*
5325 		 * This could be happening since the appender did the
5326 		 * increment but as not yet did the tailq insert onto the
5327 		 * read_queue
5328 		 */
5329 		if (hold_rlock == 0) {
5330 			SCTP_INP_READ_LOCK(inp);
5331 			hold_rlock = 1;
5332 		}
5333 		control = TAILQ_FIRST(&inp->read_queue);
5334 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5335 #ifdef INVARIANTS
5336 			panic("Huh, its non zero and nothing on control?");
5337 #endif
5338 			so->so_rcv.sb_cc = 0;
5339 		}
5340 		SCTP_INP_READ_UNLOCK(inp);
5341 		hold_rlock = 0;
5342 		goto restart;
5343 	}
5344 	if ((control->length == 0) &&
5345 	    (control->do_not_ref_stcb)) {
5346 		/*
5347 		 * Clean up code for freeing assoc that left behind a
5348 		 * pdapi.. maybe a peer in EEOR that just closed after
5349 		 * sending and never indicated a EOR.
5350 		 */
5351 		if (hold_rlock == 0) {
5352 			hold_rlock = 1;
5353 			SCTP_INP_READ_LOCK(inp);
5354 		}
5355 		control->held_length = 0;
5356 		if (control->data) {
5357 			/* Hmm there is data here .. fix */
5358 			struct mbuf *m_tmp;
5359 			int cnt = 0;
5360 
5361 			m_tmp = control->data;
5362 			while (m_tmp) {
5363 				cnt += SCTP_BUF_LEN(m_tmp);
5364 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5365 					control->tail_mbuf = m_tmp;
5366 					control->end_added = 1;
5367 				}
5368 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5369 			}
5370 			control->length = cnt;
5371 		} else {
5372 			/* remove it */
5373 			TAILQ_REMOVE(&inp->read_queue, control, next);
5374 			/* Add back any hiddend data */
5375 			sctp_free_remote_addr(control->whoFrom);
5376 			sctp_free_a_readq(stcb, control);
5377 		}
5378 		if (hold_rlock) {
5379 			hold_rlock = 0;
5380 			SCTP_INP_READ_UNLOCK(inp);
5381 		}
5382 		goto restart;
5383 	}
5384 	if ((control->length == 0) &&
5385 	    (control->end_added == 1)) {
5386 		/*
5387 		 * Do we also need to check for (control->pdapi_aborted ==
5388 		 * 1)?
5389 		 */
5390 		if (hold_rlock == 0) {
5391 			hold_rlock = 1;
5392 			SCTP_INP_READ_LOCK(inp);
5393 		}
5394 		TAILQ_REMOVE(&inp->read_queue, control, next);
5395 		if (control->data) {
5396 #ifdef INVARIANTS
5397 			panic("control->data not null but control->length == 0");
5398 #else
5399 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5400 			sctp_m_freem(control->data);
5401 			control->data = NULL;
5402 #endif
5403 		}
5404 		if (control->aux_data) {
5405 			sctp_m_free(control->aux_data);
5406 			control->aux_data = NULL;
5407 		}
5408 		sctp_free_remote_addr(control->whoFrom);
5409 		sctp_free_a_readq(stcb, control);
5410 		if (hold_rlock) {
5411 			hold_rlock = 0;
5412 			SCTP_INP_READ_UNLOCK(inp);
5413 		}
5414 		goto restart;
5415 	}
5416 	if (control->length == 0) {
5417 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5418 		    (filling_sinfo)) {
5419 			/* find a more suitable one then this */
5420 			ctl = TAILQ_NEXT(control, next);
5421 			while (ctl) {
5422 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5423 				    (ctl->some_taken ||
5424 				    (ctl->spec_flags & M_NOTIFICATION) ||
5425 				    ((ctl->do_not_ref_stcb == 0) &&
5426 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5427 				    ) {
5428 					/*-
5429 					 * If we have a different TCB next, and there is data
5430 					 * present. If we have already taken some (pdapi), OR we can
5431 					 * ref the tcb and no delivery as started on this stream, we
5432 					 * take it. Note we allow a notification on a different
5433 					 * assoc to be delivered..
5434 					 */
5435 					control = ctl;
5436 					goto found_one;
5437 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5438 					    (ctl->length) &&
5439 					    ((ctl->some_taken) ||
5440 					    ((ctl->do_not_ref_stcb == 0) &&
5441 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5442 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5443 				    ) {
5444 					/*-
5445 					 * If we have the same tcb, and there is data present, and we
5446 					 * have the strm interleave feature present. Then if we have
5447 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5448 					 * not started a delivery for this stream, we can take it.
5449 					 * Note we do NOT allow a notificaiton on the same assoc to
5450 					 * be delivered.
5451 					 */
5452 					control = ctl;
5453 					goto found_one;
5454 				}
5455 				ctl = TAILQ_NEXT(ctl, next);
5456 			}
5457 		}
5458 		/*
5459 		 * if we reach here, not suitable replacement is available
5460 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5461 		 * into the our held count, and its time to sleep again.
5462 		 */
5463 		held_length = so->so_rcv.sb_cc;
5464 		control->held_length = so->so_rcv.sb_cc;
5465 		goto restart;
5466 	}
5467 	/* Clear the held length since there is something to read */
5468 	control->held_length = 0;
5469 	if (hold_rlock) {
5470 		SCTP_INP_READ_UNLOCK(inp);
5471 		hold_rlock = 0;
5472 	}
5473 found_one:
5474 	/*
5475 	 * If we reach here, control has a some data for us to read off.
5476 	 * Note that stcb COULD be NULL.
5477 	 */
5478 	control->some_taken++;
5479 	if (hold_sblock) {
5480 		SOCKBUF_UNLOCK(&so->so_rcv);
5481 		hold_sblock = 0;
5482 	}
5483 	stcb = control->stcb;
5484 	if (stcb) {
5485 		if ((control->do_not_ref_stcb == 0) &&
5486 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5487 			if (freecnt_applied == 0)
5488 				stcb = NULL;
5489 		} else if (control->do_not_ref_stcb == 0) {
5490 			/* you can't free it on me please */
5491 			/*
5492 			 * The lock on the socket buffer protects us so the
5493 			 * free code will stop. But since we used the
5494 			 * socketbuf lock and the sender uses the tcb_lock
5495 			 * to increment, we need to use the atomic add to
5496 			 * the refcnt
5497 			 */
5498 			if (freecnt_applied) {
5499 #ifdef INVARIANTS
5500 				panic("refcnt already incremented");
5501 #else
5502 				printf("refcnt already incremented?\n");
5503 #endif
5504 			} else {
5505 				atomic_add_int(&stcb->asoc.refcnt, 1);
5506 				freecnt_applied = 1;
5507 			}
5508 			/*
5509 			 * Setup to remember how much we have not yet told
5510 			 * the peer our rwnd has opened up. Note we grab the
5511 			 * value from the tcb from last time. Note too that
5512 			 * sack sending clears this when a sack is sent,
5513 			 * which is fine. Once we hit the rwnd_req, we then
5514 			 * will go to the sctp_user_rcvd() that will not
5515 			 * lock until it KNOWs it MUST send a WUP-SACK.
5516 			 */
5517 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5518 			stcb->freed_by_sorcv_sincelast = 0;
5519 		}
5520 	}
5521 	if (stcb &&
5522 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5523 	    control->do_not_ref_stcb == 0) {
5524 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5525 	}
5526 	/* First lets get off the sinfo and sockaddr info */
5527 	if ((sinfo) && filling_sinfo) {
5528 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5529 		nxt = TAILQ_NEXT(control, next);
5530 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5531 			struct sctp_extrcvinfo *s_extra;
5532 
5533 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5534 			if ((nxt) &&
5535 			    (nxt->length)) {
5536 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5537 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5538 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5539 				}
5540 				if (nxt->spec_flags & M_NOTIFICATION) {
5541 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5542 				}
5543 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5544 				s_extra->sreinfo_next_length = nxt->length;
5545 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5546 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5547 				if (nxt->tail_mbuf != NULL) {
5548 					if (nxt->end_added) {
5549 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5550 					}
5551 				}
5552 			} else {
5553 				/*
5554 				 * we explicitly 0 this, since the memcpy
5555 				 * got some other things beyond the older
5556 				 * sinfo_ that is on the control's structure
5557 				 * :-D
5558 				 */
5559 				nxt = NULL;
5560 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5561 				s_extra->sreinfo_next_aid = 0;
5562 				s_extra->sreinfo_next_length = 0;
5563 				s_extra->sreinfo_next_ppid = 0;
5564 				s_extra->sreinfo_next_stream = 0;
5565 			}
5566 		}
5567 		/*
5568 		 * update off the real current cum-ack, if we have an stcb.
5569 		 */
5570 		if ((control->do_not_ref_stcb == 0) && stcb)
5571 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5572 		/*
5573 		 * mask off the high bits, we keep the actual chunk bits in
5574 		 * there.
5575 		 */
5576 		sinfo->sinfo_flags &= 0x00ff;
5577 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5578 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5579 		}
5580 	}
5581 #ifdef SCTP_ASOCLOG_OF_TSNS
5582 	{
5583 		int index, newindex;
5584 		struct sctp_pcbtsn_rlog *entry;
5585 
5586 		do {
5587 			index = inp->readlog_index;
5588 			newindex = index + 1;
5589 			if (newindex >= SCTP_READ_LOG_SIZE) {
5590 				newindex = 0;
5591 			}
5592 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5593 		entry = &inp->readlog[index];
5594 		entry->vtag = control->sinfo_assoc_id;
5595 		entry->strm = control->sinfo_stream;
5596 		entry->seq = control->sinfo_ssn;
5597 		entry->sz = control->length;
5598 		entry->flgs = control->sinfo_flags;
5599 	}
5600 #endif
5601 	if (fromlen && from) {
5602 		struct sockaddr *to;
5603 
5604 #ifdef INET
5605 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5606 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5607 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5608 #else
5609 		/* No AF_INET use AF_INET6 */
5610 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5611 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5612 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5613 #endif
5614 
5615 		to = from;
5616 #if defined(INET) && defined(INET6)
5617 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5618 		    (to->sa_family == AF_INET) &&
5619 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5620 			struct sockaddr_in *sin;
5621 			struct sockaddr_in6 sin6;
5622 
5623 			sin = (struct sockaddr_in *)to;
5624 			bzero(&sin6, sizeof(sin6));
5625 			sin6.sin6_family = AF_INET6;
5626 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5627 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5628 			bcopy(&sin->sin_addr,
5629 			    &sin6.sin6_addr.s6_addr32[3],
5630 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5631 			sin6.sin6_port = sin->sin_port;
5632 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5633 		}
5634 #endif
5635 #if defined(INET6)
5636 		{
5637 			struct sockaddr_in6 lsa6, *to6;
5638 
5639 			to6 = (struct sockaddr_in6 *)to;
5640 			sctp_recover_scope_mac(to6, (&lsa6));
5641 		}
5642 #endif
5643 	}
5644 	/* now copy out what data we can */
5645 	if (mp == NULL) {
5646 		/* copy out each mbuf in the chain up to length */
5647 get_more_data:
5648 		m = control->data;
5649 		while (m) {
5650 			/* Move out all we can */
5651 			cp_len = (int)uio->uio_resid;
5652 			my_len = (int)SCTP_BUF_LEN(m);
5653 			if (cp_len > my_len) {
5654 				/* not enough in this buf */
5655 				cp_len = my_len;
5656 			}
5657 			if (hold_rlock) {
5658 				SCTP_INP_READ_UNLOCK(inp);
5659 				hold_rlock = 0;
5660 			}
5661 			if (cp_len > 0)
5662 				error = uiomove(mtod(m, char *), cp_len, uio);
5663 			/* re-read */
5664 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5665 				goto release;
5666 			}
5667 			if ((control->do_not_ref_stcb == 0) && stcb &&
5668 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5669 				no_rcv_needed = 1;
5670 			}
5671 			if (error) {
5672 				/* error we are out of here */
5673 				goto release;
5674 			}
5675 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5676 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5677 			    ((control->end_added == 0) ||
5678 			    (control->end_added &&
5679 			    (TAILQ_NEXT(control, next) == NULL)))
5680 			    ) {
5681 				SCTP_INP_READ_LOCK(inp);
5682 				hold_rlock = 1;
5683 			}
5684 			if (cp_len == SCTP_BUF_LEN(m)) {
5685 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5686 				    (control->end_added)) {
5687 					out_flags |= MSG_EOR;
5688 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5689 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5690 				}
5691 				if (control->spec_flags & M_NOTIFICATION) {
5692 					out_flags |= MSG_NOTIFICATION;
5693 				}
5694 				/* we ate up the mbuf */
5695 				if (in_flags & MSG_PEEK) {
5696 					/* just looking */
5697 					m = SCTP_BUF_NEXT(m);
5698 					copied_so_far += cp_len;
5699 				} else {
5700 					/* dispose of the mbuf */
5701 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5702 						sctp_sblog(&so->so_rcv,
5703 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5704 					}
5705 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5706 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5707 						sctp_sblog(&so->so_rcv,
5708 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5709 					}
5710 					embuf = m;
5711 					copied_so_far += cp_len;
5712 					freed_so_far += cp_len;
5713 					freed_so_far += MSIZE;
5714 					atomic_subtract_int(&control->length, cp_len);
5715 					control->data = sctp_m_free(m);
5716 					m = control->data;
5717 					/*
5718 					 * been through it all, must hold sb
5719 					 * lock ok to null tail
5720 					 */
5721 					if (control->data == NULL) {
5722 #ifdef INVARIANTS
5723 						if ((control->end_added == 0) ||
5724 						    (TAILQ_NEXT(control, next) == NULL)) {
5725 							/*
5726 							 * If the end is not
5727 							 * added, OR the
5728 							 * next is NOT null
5729 							 * we MUST have the
5730 							 * lock.
5731 							 */
5732 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5733 								panic("Hmm we don't own the lock?");
5734 							}
5735 						}
5736 #endif
5737 						control->tail_mbuf = NULL;
5738 #ifdef INVARIANTS
5739 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5740 							panic("end_added, nothing left and no MSG_EOR");
5741 						}
5742 #endif
5743 					}
5744 				}
5745 			} else {
5746 				/* Do we need to trim the mbuf? */
5747 				if (control->spec_flags & M_NOTIFICATION) {
5748 					out_flags |= MSG_NOTIFICATION;
5749 				}
5750 				if ((in_flags & MSG_PEEK) == 0) {
5751 					SCTP_BUF_RESV_UF(m, cp_len);
5752 					SCTP_BUF_LEN(m) -= cp_len;
5753 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5754 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5755 					}
5756 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5757 					if ((control->do_not_ref_stcb == 0) &&
5758 					    stcb) {
5759 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5760 					}
5761 					copied_so_far += cp_len;
5762 					embuf = m;
5763 					freed_so_far += cp_len;
5764 					freed_so_far += MSIZE;
5765 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5766 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5767 						    SCTP_LOG_SBRESULT, 0);
5768 					}
5769 					atomic_subtract_int(&control->length, cp_len);
5770 				} else {
5771 					copied_so_far += cp_len;
5772 				}
5773 			}
5774 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5775 				break;
5776 			}
5777 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5778 			    (control->do_not_ref_stcb == 0) &&
5779 			    (freed_so_far >= rwnd_req)) {
5780 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5781 			}
5782 		}		/* end while(m) */
5783 		/*
5784 		 * At this point we have looked at it all and we either have
5785 		 * a MSG_EOR/or read all the user wants... <OR>
5786 		 * control->length == 0.
5787 		 */
5788 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5789 			/* we are done with this control */
5790 			if (control->length == 0) {
5791 				if (control->data) {
5792 #ifdef INVARIANTS
5793 					panic("control->data not null at read eor?");
5794 #else
5795 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5796 					sctp_m_freem(control->data);
5797 					control->data = NULL;
5798 #endif
5799 				}
5800 		done_with_control:
5801 				if (TAILQ_NEXT(control, next) == NULL) {
5802 					/*
5803 					 * If we don't have a next we need a
5804 					 * lock, if there is a next
5805 					 * interrupt is filling ahead of us
5806 					 * and we don't need a lock to
5807 					 * remove this guy (which is the
5808 					 * head of the queue).
5809 					 */
5810 					if (hold_rlock == 0) {
5811 						SCTP_INP_READ_LOCK(inp);
5812 						hold_rlock = 1;
5813 					}
5814 				}
5815 				TAILQ_REMOVE(&inp->read_queue, control, next);
5816 				/* Add back any hiddend data */
5817 				if (control->held_length) {
5818 					held_length = 0;
5819 					control->held_length = 0;
5820 					wakeup_read_socket = 1;
5821 				}
5822 				if (control->aux_data) {
5823 					sctp_m_free(control->aux_data);
5824 					control->aux_data = NULL;
5825 				}
5826 				no_rcv_needed = control->do_not_ref_stcb;
5827 				sctp_free_remote_addr(control->whoFrom);
5828 				control->data = NULL;
5829 				sctp_free_a_readq(stcb, control);
5830 				control = NULL;
5831 				if ((freed_so_far >= rwnd_req) &&
5832 				    (no_rcv_needed == 0))
5833 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5834 
5835 			} else {
5836 				/*
5837 				 * The user did not read all of this
5838 				 * message, turn off the returned MSG_EOR
5839 				 * since we are leaving more behind on the
5840 				 * control to read.
5841 				 */
5842 #ifdef INVARIANTS
5843 				if (control->end_added &&
5844 				    (control->data == NULL) &&
5845 				    (control->tail_mbuf == NULL)) {
5846 					panic("Gak, control->length is corrupt?");
5847 				}
5848 #endif
5849 				no_rcv_needed = control->do_not_ref_stcb;
5850 				out_flags &= ~MSG_EOR;
5851 			}
5852 		}
5853 		if (out_flags & MSG_EOR) {
5854 			goto release;
5855 		}
5856 		if ((uio->uio_resid == 0) ||
5857 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5858 		    ) {
5859 			goto release;
5860 		}
5861 		/*
5862 		 * If I hit here the receiver wants more and this message is
5863 		 * NOT done (pd-api). So two questions. Can we block? if not
5864 		 * we are done. Did the user NOT set MSG_WAITALL?
5865 		 */
5866 		if (block_allowed == 0) {
5867 			goto release;
5868 		}
5869 		/*
5870 		 * We need to wait for more data a few things: - We don't
5871 		 * sbunlock() so we don't get someone else reading. - We
5872 		 * must be sure to account for the case where what is added
5873 		 * is NOT to our control when we wakeup.
5874 		 */
5875 
5876 		/*
5877 		 * Do we need to tell the transport a rwnd update might be
5878 		 * needed before we go to sleep?
5879 		 */
5880 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5881 		    ((freed_so_far >= rwnd_req) &&
5882 		    (control->do_not_ref_stcb == 0) &&
5883 		    (no_rcv_needed == 0))) {
5884 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5885 		}
5886 wait_some_more:
5887 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5888 			goto release;
5889 		}
5890 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5891 			goto release;
5892 
5893 		if (hold_rlock == 1) {
5894 			SCTP_INP_READ_UNLOCK(inp);
5895 			hold_rlock = 0;
5896 		}
5897 		if (hold_sblock == 0) {
5898 			SOCKBUF_LOCK(&so->so_rcv);
5899 			hold_sblock = 1;
5900 		}
5901 		if ((copied_so_far) && (control->length == 0) &&
5902 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5903 		    ) {
5904 			goto release;
5905 		}
5906 		if (so->so_rcv.sb_cc <= control->held_length) {
5907 			error = sbwait(&so->so_rcv);
5908 			if (error) {
5909 				goto release;
5910 			}
5911 			control->held_length = 0;
5912 		}
5913 		if (hold_sblock) {
5914 			SOCKBUF_UNLOCK(&so->so_rcv);
5915 			hold_sblock = 0;
5916 		}
5917 		if (control->length == 0) {
5918 			/* still nothing here */
5919 			if (control->end_added == 1) {
5920 				/* he aborted, or is done i.e.did a shutdown */
5921 				out_flags |= MSG_EOR;
5922 				if (control->pdapi_aborted) {
5923 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5924 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5925 
5926 					out_flags |= MSG_TRUNC;
5927 				} else {
5928 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5929 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5930 				}
5931 				goto done_with_control;
5932 			}
5933 			if (so->so_rcv.sb_cc > held_length) {
5934 				control->held_length = so->so_rcv.sb_cc;
5935 				held_length = 0;
5936 			}
5937 			goto wait_some_more;
5938 		} else if (control->data == NULL) {
5939 			/*
5940 			 * we must re-sync since data is probably being
5941 			 * added
5942 			 */
5943 			SCTP_INP_READ_LOCK(inp);
5944 			if ((control->length > 0) && (control->data == NULL)) {
5945 				/*
5946 				 * big trouble.. we have the lock and its
5947 				 * corrupt?
5948 				 */
5949 #ifdef INVARIANTS
5950 				panic("Impossible data==NULL length !=0");
5951 #endif
5952 				out_flags |= MSG_EOR;
5953 				out_flags |= MSG_TRUNC;
5954 				control->length = 0;
5955 				SCTP_INP_READ_UNLOCK(inp);
5956 				goto done_with_control;
5957 			}
5958 			SCTP_INP_READ_UNLOCK(inp);
5959 			/* We will fall around to get more data */
5960 		}
5961 		goto get_more_data;
5962 	} else {
5963 		/*-
5964 		 * Give caller back the mbuf chain,
5965 		 * store in uio_resid the length
5966 		 */
5967 		wakeup_read_socket = 0;
5968 		if ((control->end_added == 0) ||
5969 		    (TAILQ_NEXT(control, next) == NULL)) {
5970 			/* Need to get rlock */
5971 			if (hold_rlock == 0) {
5972 				SCTP_INP_READ_LOCK(inp);
5973 				hold_rlock = 1;
5974 			}
5975 		}
5976 		if (control->end_added) {
5977 			out_flags |= MSG_EOR;
5978 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5979 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5980 		}
5981 		if (control->spec_flags & M_NOTIFICATION) {
5982 			out_flags |= MSG_NOTIFICATION;
5983 		}
5984 		uio->uio_resid = control->length;
5985 		*mp = control->data;
5986 		m = control->data;
5987 		while (m) {
5988 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5989 				sctp_sblog(&so->so_rcv,
5990 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5991 			}
5992 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5993 			freed_so_far += SCTP_BUF_LEN(m);
5994 			freed_so_far += MSIZE;
5995 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5996 				sctp_sblog(&so->so_rcv,
5997 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5998 			}
5999 			m = SCTP_BUF_NEXT(m);
6000 		}
6001 		control->data = control->tail_mbuf = NULL;
6002 		control->length = 0;
6003 		if (out_flags & MSG_EOR) {
6004 			/* Done with this control */
6005 			goto done_with_control;
6006 		}
6007 	}
6008 release:
6009 	if (hold_rlock == 1) {
6010 		SCTP_INP_READ_UNLOCK(inp);
6011 		hold_rlock = 0;
6012 	}
6013 	if (hold_sblock == 1) {
6014 		SOCKBUF_UNLOCK(&so->so_rcv);
6015 		hold_sblock = 0;
6016 	}
6017 	sbunlock(&so->so_rcv);
6018 	sockbuf_lock = 0;
6019 
6020 release_unlocked:
6021 	if (hold_sblock) {
6022 		SOCKBUF_UNLOCK(&so->so_rcv);
6023 		hold_sblock = 0;
6024 	}
6025 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6026 		if ((freed_so_far >= rwnd_req) &&
6027 		    (control && (control->do_not_ref_stcb == 0)) &&
6028 		    (no_rcv_needed == 0))
6029 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6030 	}
6031 out:
6032 	if (msg_flags) {
6033 		*msg_flags = out_flags;
6034 	}
6035 	if (((out_flags & MSG_EOR) == 0) &&
6036 	    ((in_flags & MSG_PEEK) == 0) &&
6037 	    (sinfo) &&
6038 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
6039 		struct sctp_extrcvinfo *s_extra;
6040 
6041 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6042 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6043 	}
6044 	if (hold_rlock == 1) {
6045 		SCTP_INP_READ_UNLOCK(inp);
6046 		hold_rlock = 0;
6047 	}
6048 	if (hold_sblock) {
6049 		SOCKBUF_UNLOCK(&so->so_rcv);
6050 		hold_sblock = 0;
6051 	}
6052 	if (sockbuf_lock) {
6053 		sbunlock(&so->so_rcv);
6054 	}
6055 	if (freecnt_applied) {
6056 		/*
6057 		 * The lock on the socket buffer protects us so the free
6058 		 * code will stop. But since we used the socketbuf lock and
6059 		 * the sender uses the tcb_lock to increment, we need to use
6060 		 * the atomic add to the refcnt.
6061 		 */
6062 		if (stcb == NULL) {
6063 #ifdef INVARIANTS
6064 			panic("stcb for refcnt has gone NULL?");
6065 			goto stage_left;
6066 #else
6067 			goto stage_left;
6068 #endif
6069 		}
6070 		atomic_add_int(&stcb->asoc.refcnt, -1);
6071 		freecnt_applied = 0;
6072 		/* Save the value back for next time */
6073 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6074 	}
6075 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6076 		if (stcb) {
6077 			sctp_misc_ints(SCTP_SORECV_DONE,
6078 			    freed_so_far,
6079 			    ((uio) ? (slen - uio->uio_resid) : slen),
6080 			    stcb->asoc.my_rwnd,
6081 			    so->so_rcv.sb_cc);
6082 		} else {
6083 			sctp_misc_ints(SCTP_SORECV_DONE,
6084 			    freed_so_far,
6085 			    ((uio) ? (slen - uio->uio_resid) : slen),
6086 			    0,
6087 			    so->so_rcv.sb_cc);
6088 		}
6089 	}
6090 stage_left:
6091 	if (wakeup_read_socket) {
6092 		sctp_sorwakeup(inp, so);
6093 	}
6094 	return (error);
6095 }
6096 
6097 
6098 #ifdef SCTP_MBUF_LOGGING
6099 struct mbuf *
6100 sctp_m_free(struct mbuf *m)
6101 {
6102 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6103 		if (SCTP_BUF_IS_EXTENDED(m)) {
6104 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6105 		}
6106 	}
6107 	return (m_free(m));
6108 }
6109 
6110 void
6111 sctp_m_freem(struct mbuf *mb)
6112 {
6113 	while (mb != NULL)
6114 		mb = sctp_m_free(mb);
6115 }
6116 
6117 #endif
6118 
6119 int
6120 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6121 {
6122 	/*
6123 	 * Given a local address. For all associations that holds the
6124 	 * address, request a peer-set-primary.
6125 	 */
6126 	struct sctp_ifa *ifa;
6127 	struct sctp_laddr *wi;
6128 
6129 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6130 	if (ifa == NULL) {
6131 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6132 		return (EADDRNOTAVAIL);
6133 	}
6134 	/*
6135 	 * Now that we have the ifa we must awaken the iterator with this
6136 	 * message.
6137 	 */
6138 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6139 	if (wi == NULL) {
6140 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6141 		return (ENOMEM);
6142 	}
6143 	/* Now incr the count and int wi structure */
6144 	SCTP_INCR_LADDR_COUNT();
6145 	bzero(wi, sizeof(*wi));
6146 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6147 	wi->ifa = ifa;
6148 	wi->action = SCTP_SET_PRIM_ADDR;
6149 	atomic_add_int(&ifa->refcount, 1);
6150 
6151 	/* Now add it to the work queue */
6152 	SCTP_IPI_ITERATOR_WQ_LOCK();
6153 	/*
6154 	 * Should this really be a tailq? As it is we will process the
6155 	 * newest first :-0
6156 	 */
6157 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6158 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
6159 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6160 	    (struct sctp_inpcb *)NULL,
6161 	    (struct sctp_tcb *)NULL,
6162 	    (struct sctp_nets *)NULL);
6163 	return (0);
6164 }
6165 
6166 
6167 int
6168 sctp_soreceive(struct socket *so,
6169     struct sockaddr **psa,
6170     struct uio *uio,
6171     struct mbuf **mp0,
6172     struct mbuf **controlp,
6173     int *flagsp)
6174 {
6175 	int error, fromlen;
6176 	uint8_t sockbuf[256];
6177 	struct sockaddr *from;
6178 	struct sctp_extrcvinfo sinfo;
6179 	int filling_sinfo = 1;
6180 	struct sctp_inpcb *inp;
6181 
6182 	inp = (struct sctp_inpcb *)so->so_pcb;
6183 	/* pickup the assoc we are reading from */
6184 	if (inp == NULL) {
6185 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6186 		return (EINVAL);
6187 	}
6188 	if ((sctp_is_feature_off(inp,
6189 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6190 	    (controlp == NULL)) {
6191 		/* user does not want the sndrcv ctl */
6192 		filling_sinfo = 0;
6193 	}
6194 	if (psa) {
6195 		from = (struct sockaddr *)sockbuf;
6196 		fromlen = sizeof(sockbuf);
6197 		from->sa_len = 0;
6198 	} else {
6199 		from = NULL;
6200 		fromlen = 0;
6201 	}
6202 
6203 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6204 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6205 	if ((controlp) && (filling_sinfo)) {
6206 		/* copy back the sinfo in a CMSG format */
6207 		if (filling_sinfo)
6208 			*controlp = sctp_build_ctl_nchunk(inp,
6209 			    (struct sctp_sndrcvinfo *)&sinfo);
6210 		else
6211 			*controlp = NULL;
6212 	}
6213 	if (psa) {
6214 		/* copy back the address info */
6215 		if (from && from->sa_len) {
6216 			*psa = sodupsockaddr(from, M_NOWAIT);
6217 		} else {
6218 			*psa = NULL;
6219 		}
6220 	}
6221 	return (error);
6222 }
6223 
6224 
6225 int
6226 sctp_l_soreceive(struct socket *so,
6227     struct sockaddr **name,
6228     struct uio *uio,
6229     char **controlp,
6230     int *controllen,
6231     int *flag)
6232 {
6233 	int error, fromlen;
6234 	uint8_t sockbuf[256];
6235 	struct sockaddr *from;
6236 	struct sctp_extrcvinfo sinfo;
6237 	int filling_sinfo = 1;
6238 	struct sctp_inpcb *inp;
6239 
6240 	inp = (struct sctp_inpcb *)so->so_pcb;
6241 	/* pickup the assoc we are reading from */
6242 	if (inp == NULL) {
6243 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6244 		return (EINVAL);
6245 	}
6246 	if ((sctp_is_feature_off(inp,
6247 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6248 	    (controlp == NULL)) {
6249 		/* user does not want the sndrcv ctl */
6250 		filling_sinfo = 0;
6251 	}
6252 	if (name) {
6253 		from = (struct sockaddr *)sockbuf;
6254 		fromlen = sizeof(sockbuf);
6255 		from->sa_len = 0;
6256 	} else {
6257 		from = NULL;
6258 		fromlen = 0;
6259 	}
6260 
6261 	error = sctp_sorecvmsg(so, uio,
6262 	    (struct mbuf **)NULL,
6263 	    from, fromlen, flag,
6264 	    (struct sctp_sndrcvinfo *)&sinfo,
6265 	    filling_sinfo);
6266 	if ((controlp) && (filling_sinfo)) {
6267 		/*
6268 		 * copy back the sinfo in a CMSG format note that the caller
6269 		 * has reponsibility for freeing the memory.
6270 		 */
6271 		if (filling_sinfo)
6272 			*controlp = sctp_build_ctl_cchunk(inp,
6273 			    controllen,
6274 			    (struct sctp_sndrcvinfo *)&sinfo);
6275 	}
6276 	if (name) {
6277 		/* copy back the address info */
6278 		if (from && from->sa_len) {
6279 			*name = sodupsockaddr(from, M_WAIT);
6280 		} else {
6281 			*name = NULL;
6282 		}
6283 	}
6284 	return (error);
6285 }
6286 
6287 
6288 
6289 
6290 
6291 
6292 
6293 int
6294 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6295     int totaddr, int *error)
6296 {
6297 	int added = 0;
6298 	int i;
6299 	struct sctp_inpcb *inp;
6300 	struct sockaddr *sa;
6301 	size_t incr = 0;
6302 
6303 	sa = addr;
6304 	inp = stcb->sctp_ep;
6305 	*error = 0;
6306 	for (i = 0; i < totaddr; i++) {
6307 		if (sa->sa_family == AF_INET) {
6308 			incr = sizeof(struct sockaddr_in);
6309 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6310 				/* assoc gone no un-lock */
6311 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6312 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6313 				*error = ENOBUFS;
6314 				goto out_now;
6315 			}
6316 			added++;
6317 		} else if (sa->sa_family == AF_INET6) {
6318 			incr = sizeof(struct sockaddr_in6);
6319 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6320 				/* assoc gone no un-lock */
6321 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6322 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6323 				*error = ENOBUFS;
6324 				goto out_now;
6325 			}
6326 			added++;
6327 		}
6328 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6329 	}
6330 out_now:
6331 	return (added);
6332 }
6333 
6334 struct sctp_tcb *
6335 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6336     int *totaddr, int *num_v4, int *num_v6, int *error,
6337     int limit, int *bad_addr)
6338 {
6339 	struct sockaddr *sa;
6340 	struct sctp_tcb *stcb = NULL;
6341 	size_t incr, at, i;
6342 
6343 	at = incr = 0;
6344 	sa = addr;
6345 	*error = *num_v6 = *num_v4 = 0;
6346 	/* account and validate addresses */
6347 	for (i = 0; i < (size_t)*totaddr; i++) {
6348 		if (sa->sa_family == AF_INET) {
6349 			(*num_v4) += 1;
6350 			incr = sizeof(struct sockaddr_in);
6351 			if (sa->sa_len != incr) {
6352 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6353 				*error = EINVAL;
6354 				*bad_addr = 1;
6355 				return (NULL);
6356 			}
6357 		} else if (sa->sa_family == AF_INET6) {
6358 			struct sockaddr_in6 *sin6;
6359 
6360 			sin6 = (struct sockaddr_in6 *)sa;
6361 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6362 				/* Must be non-mapped for connectx */
6363 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6364 				*error = EINVAL;
6365 				*bad_addr = 1;
6366 				return (NULL);
6367 			}
6368 			(*num_v6) += 1;
6369 			incr = sizeof(struct sockaddr_in6);
6370 			if (sa->sa_len != incr) {
6371 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6372 				*error = EINVAL;
6373 				*bad_addr = 1;
6374 				return (NULL);
6375 			}
6376 		} else {
6377 			*totaddr = i;
6378 			/* we are done */
6379 			break;
6380 		}
6381 		SCTP_INP_INCR_REF(inp);
6382 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6383 		if (stcb != NULL) {
6384 			/* Already have or am bring up an association */
6385 			return (stcb);
6386 		} else {
6387 			SCTP_INP_DECR_REF(inp);
6388 		}
6389 		if ((at + incr) > (size_t)limit) {
6390 			*totaddr = i;
6391 			break;
6392 		}
6393 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6394 	}
6395 	return ((struct sctp_tcb *)NULL);
6396 }
6397 
6398 /*
6399  * sctp_bindx(ADD) for one address.
6400  * assumes all arguments are valid/checked by caller.
6401  */
6402 void
6403 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6404     struct sockaddr *sa, sctp_assoc_t assoc_id,
6405     uint32_t vrf_id, int *error, void *p)
6406 {
6407 	struct sockaddr *addr_touse;
6408 
6409 #ifdef INET6
6410 	struct sockaddr_in sin;
6411 
6412 #endif
6413 
6414 	/* see if we're bound all already! */
6415 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6416 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6417 		*error = EINVAL;
6418 		return;
6419 	}
6420 	addr_touse = sa;
6421 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6422 	if (sa->sa_family == AF_INET6) {
6423 		struct sockaddr_in6 *sin6;
6424 
6425 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6426 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6427 			*error = EINVAL;
6428 			return;
6429 		}
6430 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6431 			/* can only bind v6 on PF_INET6 sockets */
6432 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6433 			*error = EINVAL;
6434 			return;
6435 		}
6436 		sin6 = (struct sockaddr_in6 *)addr_touse;
6437 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6438 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6439 			    SCTP_IPV6_V6ONLY(inp)) {
6440 				/* can't bind v4-mapped on PF_INET sockets */
6441 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6442 				*error = EINVAL;
6443 				return;
6444 			}
6445 			in6_sin6_2_sin(&sin, sin6);
6446 			addr_touse = (struct sockaddr *)&sin;
6447 		}
6448 	}
6449 #endif
6450 	if (sa->sa_family == AF_INET) {
6451 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6452 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6453 			*error = EINVAL;
6454 			return;
6455 		}
6456 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6457 		    SCTP_IPV6_V6ONLY(inp)) {
6458 			/* can't bind v4 on PF_INET sockets */
6459 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6460 			*error = EINVAL;
6461 			return;
6462 		}
6463 	}
6464 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6465 		if (p == NULL) {
6466 			/* Can't get proc for Net/Open BSD */
6467 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6468 			*error = EINVAL;
6469 			return;
6470 		}
6471 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6472 		return;
6473 	}
6474 	/*
6475 	 * No locks required here since bind and mgmt_ep_sa all do their own
6476 	 * locking. If we do something for the FIX: below we may need to
6477 	 * lock in that case.
6478 	 */
6479 	if (assoc_id == 0) {
6480 		/* add the address */
6481 		struct sctp_inpcb *lep;
6482 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6483 
6484 		/* validate the incoming port */
6485 		if ((lsin->sin_port != 0) &&
6486 		    (lsin->sin_port != inp->sctp_lport)) {
6487 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6488 			*error = EINVAL;
6489 			return;
6490 		} else {
6491 			/* user specified 0 port, set it to existing port */
6492 			lsin->sin_port = inp->sctp_lport;
6493 		}
6494 
6495 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6496 		if (lep != NULL) {
6497 			/*
6498 			 * We must decrement the refcount since we have the
6499 			 * ep already and are binding. No remove going on
6500 			 * here.
6501 			 */
6502 			SCTP_INP_DECR_REF(lep);
6503 		}
6504 		if (lep == inp) {
6505 			/* already bound to it.. ok */
6506 			return;
6507 		} else if (lep == NULL) {
6508 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6509 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6510 			    SCTP_ADD_IP_ADDRESS,
6511 			    vrf_id, NULL);
6512 		} else {
6513 			*error = EADDRINUSE;
6514 		}
6515 		if (*error)
6516 			return;
6517 	} else {
6518 		/*
6519 		 * FIX: decide whether we allow assoc based bindx
6520 		 */
6521 	}
6522 }
6523 
6524 /*
6525  * sctp_bindx(DELETE) for one address.
6526  * assumes all arguments are valid/checked by caller.
6527  */
6528 void
6529 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6530     struct sockaddr *sa, sctp_assoc_t assoc_id,
6531     uint32_t vrf_id, int *error)
6532 {
6533 	struct sockaddr *addr_touse;
6534 
6535 #ifdef INET6
6536 	struct sockaddr_in sin;
6537 
6538 #endif
6539 
6540 	/* see if we're bound all already! */
6541 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6542 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6543 		*error = EINVAL;
6544 		return;
6545 	}
6546 	addr_touse = sa;
6547 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6548 	if (sa->sa_family == AF_INET6) {
6549 		struct sockaddr_in6 *sin6;
6550 
6551 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6552 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6553 			*error = EINVAL;
6554 			return;
6555 		}
6556 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6557 			/* can only bind v6 on PF_INET6 sockets */
6558 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6559 			*error = EINVAL;
6560 			return;
6561 		}
6562 		sin6 = (struct sockaddr_in6 *)addr_touse;
6563 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6564 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6565 			    SCTP_IPV6_V6ONLY(inp)) {
6566 				/* can't bind mapped-v4 on PF_INET sockets */
6567 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6568 				*error = EINVAL;
6569 				return;
6570 			}
6571 			in6_sin6_2_sin(&sin, sin6);
6572 			addr_touse = (struct sockaddr *)&sin;
6573 		}
6574 	}
6575 #endif
6576 	if (sa->sa_family == AF_INET) {
6577 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6578 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6579 			*error = EINVAL;
6580 			return;
6581 		}
6582 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6583 		    SCTP_IPV6_V6ONLY(inp)) {
6584 			/* can't bind v4 on PF_INET sockets */
6585 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6586 			*error = EINVAL;
6587 			return;
6588 		}
6589 	}
6590 	/*
6591 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6592 	 * below is ever changed we may need to lock before calling
6593 	 * association level binding.
6594 	 */
6595 	if (assoc_id == 0) {
6596 		/* delete the address */
6597 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6598 		    SCTP_DEL_IP_ADDRESS,
6599 		    vrf_id, NULL);
6600 	} else {
6601 		/*
6602 		 * FIX: decide whether we allow assoc based bindx
6603 		 */
6604 	}
6605 }
6606 
6607 /*
6608  * returns the valid local address count for an assoc, taking into account
6609  * all scoping rules
6610  */
6611 int
6612 sctp_local_addr_count(struct sctp_tcb *stcb)
6613 {
6614 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6615 	int ipv4_addr_legal, ipv6_addr_legal;
6616 	struct sctp_vrf *vrf;
6617 	struct sctp_ifn *sctp_ifn;
6618 	struct sctp_ifa *sctp_ifa;
6619 	int count = 0;
6620 
6621 	/* Turn on all the appropriate scopes */
6622 	loopback_scope = stcb->asoc.loopback_scope;
6623 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6624 	local_scope = stcb->asoc.local_scope;
6625 	site_scope = stcb->asoc.site_scope;
6626 	ipv4_addr_legal = ipv6_addr_legal = 0;
6627 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6628 		ipv6_addr_legal = 1;
6629 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6630 			ipv4_addr_legal = 1;
6631 		}
6632 	} else {
6633 		ipv4_addr_legal = 1;
6634 	}
6635 
6636 	SCTP_IPI_ADDR_RLOCK();
6637 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6638 	if (vrf == NULL) {
6639 		/* no vrf, no addresses */
6640 		SCTP_IPI_ADDR_RUNLOCK();
6641 		return (0);
6642 	}
6643 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6644 		/*
6645 		 * bound all case: go through all ifns on the vrf
6646 		 */
6647 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6648 			if ((loopback_scope == 0) &&
6649 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6650 				continue;
6651 			}
6652 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6653 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6654 					continue;
6655 				switch (sctp_ifa->address.sa.sa_family) {
6656 				case AF_INET:
6657 					if (ipv4_addr_legal) {
6658 						struct sockaddr_in *sin;
6659 
6660 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6661 						if (sin->sin_addr.s_addr == 0) {
6662 							/*
6663 							 * skip unspecified
6664 							 * addrs
6665 							 */
6666 							continue;
6667 						}
6668 						if ((ipv4_local_scope == 0) &&
6669 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6670 							continue;
6671 						}
6672 						/* count this one */
6673 						count++;
6674 					} else {
6675 						continue;
6676 					}
6677 					break;
6678 #ifdef INET6
6679 				case AF_INET6:
6680 					if (ipv6_addr_legal) {
6681 						struct sockaddr_in6 *sin6;
6682 
6683 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6684 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6685 							continue;
6686 						}
6687 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6688 							if (local_scope == 0)
6689 								continue;
6690 							if (sin6->sin6_scope_id == 0) {
6691 								if (sa6_recoverscope(sin6) != 0)
6692 									/*
6693 									 *
6694 									 * bad
6695 									 *
6696 									 * li
6697 									 * nk
6698 									 *
6699 									 * loc
6700 									 * al
6701 									 *
6702 									 * add
6703 									 * re
6704 									 * ss
6705 									 * */
6706 									continue;
6707 							}
6708 						}
6709 						if ((site_scope == 0) &&
6710 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6711 							continue;
6712 						}
6713 						/* count this one */
6714 						count++;
6715 					}
6716 					break;
6717 #endif
6718 				default:
6719 					/* TSNH */
6720 					break;
6721 				}
6722 			}
6723 		}
6724 	} else {
6725 		/*
6726 		 * subset bound case
6727 		 */
6728 		struct sctp_laddr *laddr;
6729 
6730 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6731 		    sctp_nxt_addr) {
6732 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6733 				continue;
6734 			}
6735 			/* count this one */
6736 			count++;
6737 		}
6738 	}
6739 	SCTP_IPI_ADDR_RUNLOCK();
6740 	return (count);
6741 }
6742 
6743 #if defined(SCTP_LOCAL_TRACE_BUF)
6744 
6745 void
6746 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6747 {
6748 	uint32_t saveindex, newindex;
6749 
6750 	do {
6751 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6752 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6753 			newindex = 1;
6754 		} else {
6755 			newindex = saveindex + 1;
6756 		}
6757 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6758 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6759 		saveindex = 0;
6760 	}
6761 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6762 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6763 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6764 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6765 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6766 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6767 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6768 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6769 }
6770 
6771 #endif
6772 /* We will need to add support
6773  * to bind the ports and such here
6774  * so we can do UDP tunneling. In
6775  * the mean-time, we return error
6776  */
6777 #include <netinet/udp.h>
6778 #include <netinet/udp_var.h>
6779 #include <sys/proc.h>
6780 #ifdef INET6
6781 #include <netinet6/sctp6_var.h>
6782 #endif
6783 
6784 static void
6785 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6786 {
6787 	struct ip *iph;
6788 	struct mbuf *sp, *last;
6789 	struct udphdr *uhdr;
6790 	uint16_t port = 0, len;
6791 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6792 
6793 	/*
6794 	 * Split out the mbuf chain. Leave the IP header in m, place the
6795 	 * rest in the sp.
6796 	 */
6797 	if ((m->m_flags & M_PKTHDR) == 0) {
6798 		/* Can't handle one that is not a pkt hdr */
6799 		goto out;
6800 	}
6801 	/* pull the src port */
6802 	iph = mtod(m, struct ip *);
6803 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6804 
6805 	port = uhdr->uh_sport;
6806 	sp = m_split(m, off, M_DONTWAIT);
6807 	if (sp == NULL) {
6808 		/* Gak, drop packet, we can't do a split */
6809 		goto out;
6810 	}
6811 	if (sp->m_pkthdr.len < header_size) {
6812 		/* Gak, packet can't have an SCTP header in it - to small */
6813 		m_freem(sp);
6814 		goto out;
6815 	}
6816 	/* ok now pull up the UDP header and SCTP header together */
6817 	sp = m_pullup(sp, header_size);
6818 	if (sp == NULL) {
6819 		/* Gak pullup failed */
6820 		goto out;
6821 	}
6822 	/* trim out the UDP header */
6823 	m_adj(sp, sizeof(struct udphdr));
6824 
6825 	/* Now reconstruct the mbuf chain */
6826 	/* 1) find last one */
6827 	last = m;
6828 	while (last->m_next != NULL) {
6829 		last = last->m_next;
6830 	}
6831 	last->m_next = sp;
6832 	m->m_pkthdr.len += sp->m_pkthdr.len;
6833 	last = m;
6834 	while (last != NULL) {
6835 		last = last->m_next;
6836 	}
6837 	/* Now its ready for sctp_input or sctp6_input */
6838 	iph = mtod(m, struct ip *);
6839 	switch (iph->ip_v) {
6840 	case IPVERSION:
6841 		{
6842 			/* its IPv4 */
6843 			len = SCTP_GET_IPV4_LENGTH(iph);
6844 			len -= sizeof(struct udphdr);
6845 			SCTP_GET_IPV4_LENGTH(iph) = len;
6846 			sctp_input_with_port(m, off, port);
6847 			break;
6848 		}
6849 #ifdef INET6
6850 	case IPV6_VERSION >> 4:
6851 		{
6852 			/* its IPv6 - NOT supported */
6853 			goto out;
6854 			break;
6855 
6856 		}
6857 #endif
6858 	default:
6859 		{
6860 			m_freem(m);
6861 			break;
6862 		}
6863 	}
6864 	return;
6865 out:
6866 	m_freem(m);
6867 }
6868 
6869 void
6870 sctp_over_udp_stop(void)
6871 {
6872 	struct socket *sop;
6873 
6874 	/*
6875 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6876 	 * for writting!
6877 	 */
6878 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6879 		/* Nothing to do */
6880 		return;
6881 	}
6882 	sop = SCTP_BASE_INFO(udp_tun_socket);
6883 	soclose(sop);
6884 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6885 }
6886 int
6887 sctp_over_udp_start(void)
6888 {
6889 	uint16_t port;
6890 	int ret;
6891 	struct sockaddr_in sin;
6892 	struct socket *sop = NULL;
6893 	struct thread *th;
6894 	struct ucred *cred;
6895 
6896 	/*
6897 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6898 	 * for writting!
6899 	 */
6900 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6901 	if (port == 0) {
6902 		/* Must have a port set */
6903 		return (EINVAL);
6904 	}
6905 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6906 		/* Already running -- must stop first */
6907 		return (EALREADY);
6908 	}
6909 	th = curthread;
6910 	cred = th->td_ucred;
6911 	if ((ret = socreate(PF_INET, &sop,
6912 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6913 		return (ret);
6914 	}
6915 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6916 	/* call the special UDP hook */
6917 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6918 	if (ret) {
6919 		goto exit_stage_left;
6920 	}
6921 	/* Ok we have a socket, bind it to the port */
6922 	memset(&sin, 0, sizeof(sin));
6923 	sin.sin_len = sizeof(sin);
6924 	sin.sin_family = AF_INET;
6925 	sin.sin_port = htons(port);
6926 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6927 	if (ret) {
6928 		/* Close up we cant get the port */
6929 exit_stage_left:
6930 		sctp_over_udp_stop();
6931 		return (ret);
6932 	}
6933 	/*
6934 	 * Ok we should now get UDP packets directly to our input routine
6935 	 * sctp_recv_upd_tunneled_packet().
6936 	 */
6937 	return (0);
6938 }
6939