xref: /freebsd/sys/netinet/sctputil.c (revision 50c9ba2c76d9f1b41c0f007beaebd3d8ce112a59)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #endif
45 #include <netinet/sctp_header.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_bsd_addr.h>
53 
54 
55 #ifndef KTR_SCTP
56 #define KTR_SCTP KTR_SUBSYS
57 #endif
58 
59 extern struct sctp_cc_functions sctp_cc_functions[];
60 extern struct sctp_ss_functions sctp_ss_functions[];
61 
62 void
63 sctp_sblog(struct sockbuf *sb,
64     struct sctp_tcb *stcb, int from, int incr)
65 {
66 	struct sctp_cwnd_log sctp_clog;
67 
68 	sctp_clog.x.sb.stcb = stcb;
69 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
70 	if (stcb)
71 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
72 	else
73 		sctp_clog.x.sb.stcb_sbcc = 0;
74 	sctp_clog.x.sb.incr = incr;
75 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
76 	    SCTP_LOG_EVENT_SB,
77 	    from,
78 	    sctp_clog.x.misc.log1,
79 	    sctp_clog.x.misc.log2,
80 	    sctp_clog.x.misc.log3,
81 	    sctp_clog.x.misc.log4);
82 }
83 
84 void
85 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
86 {
87 	struct sctp_cwnd_log sctp_clog;
88 
89 	sctp_clog.x.close.inp = (void *)inp;
90 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
91 	if (stcb) {
92 		sctp_clog.x.close.stcb = (void *)stcb;
93 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
94 	} else {
95 		sctp_clog.x.close.stcb = 0;
96 		sctp_clog.x.close.state = 0;
97 	}
98 	sctp_clog.x.close.loc = loc;
99 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
100 	    SCTP_LOG_EVENT_CLOSE,
101 	    0,
102 	    sctp_clog.x.misc.log1,
103 	    sctp_clog.x.misc.log2,
104 	    sctp_clog.x.misc.log3,
105 	    sctp_clog.x.misc.log4);
106 }
107 
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 }
125 
126 void
127 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
128 {
129 	struct sctp_cwnd_log sctp_clog;
130 
131 	sctp_clog.x.strlog.stcb = stcb;
132 	sctp_clog.x.strlog.n_tsn = tsn;
133 	sctp_clog.x.strlog.n_sseq = sseq;
134 	sctp_clog.x.strlog.e_tsn = 0;
135 	sctp_clog.x.strlog.e_sseq = 0;
136 	sctp_clog.x.strlog.strm = stream;
137 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
138 	    SCTP_LOG_EVENT_STRM,
139 	    from,
140 	    sctp_clog.x.misc.log1,
141 	    sctp_clog.x.misc.log2,
142 	    sctp_clog.x.misc.log3,
143 	    sctp_clog.x.misc.log4);
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
204     int from)
205 {
206 	struct sctp_cwnd_log sctp_clog;
207 
208 	memset(&sctp_clog, 0, sizeof(sctp_clog));
209 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
210 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
211 	sctp_clog.x.fr.tsn = tsn;
212 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
213 	    SCTP_LOG_EVENT_FR,
214 	    from,
215 	    sctp_clog.x.misc.log1,
216 	    sctp_clog.x.misc.log2,
217 	    sctp_clog.x.misc.log3,
218 	    sctp_clog.x.misc.log4);
219 }
220 
221 void
222 sctp_log_mb(struct mbuf *m, int from)
223 {
224 	struct sctp_cwnd_log sctp_clog;
225 
226 	sctp_clog.x.mb.mp = m;
227 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
228 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
229 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
230 	if (SCTP_BUF_IS_EXTENDED(m)) {
231 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
232 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
233 	} else {
234 		sctp_clog.x.mb.ext = 0;
235 		sctp_clog.x.mb.refcnt = 0;
236 	}
237 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
238 	    SCTP_LOG_EVENT_MBUF,
239 	    from,
240 	    sctp_clog.x.misc.log1,
241 	    sctp_clog.x.misc.log2,
242 	    sctp_clog.x.misc.log3,
243 	    sctp_clog.x.misc.log4);
244 }
245 
246 void
247 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
248     int from)
249 {
250 	struct sctp_cwnd_log sctp_clog;
251 
252 	if (control == NULL) {
253 		SCTP_PRINTF("Gak log of NULL?\n");
254 		return;
255 	}
256 	sctp_clog.x.strlog.stcb = control->stcb;
257 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
258 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
259 	sctp_clog.x.strlog.strm = control->sinfo_stream;
260 	if (poschk != NULL) {
261 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
262 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
263 	} else {
264 		sctp_clog.x.strlog.e_tsn = 0;
265 		sctp_clog.x.strlog.e_sseq = 0;
266 	}
267 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
268 	    SCTP_LOG_EVENT_STRM,
269 	    from,
270 	    sctp_clog.x.misc.log1,
271 	    sctp_clog.x.misc.log2,
272 	    sctp_clog.x.misc.log3,
273 	    sctp_clog.x.misc.log4);
274 }
275 
276 void
277 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
278 {
279 	struct sctp_cwnd_log sctp_clog;
280 
281 	sctp_clog.x.cwnd.net = net;
282 	if (stcb->asoc.send_queue_cnt > 255)
283 		sctp_clog.x.cwnd.cnt_in_send = 255;
284 	else
285 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
286 	if (stcb->asoc.stream_queue_cnt > 255)
287 		sctp_clog.x.cwnd.cnt_in_str = 255;
288 	else
289 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
290 
291 	if (net) {
292 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
293 		sctp_clog.x.cwnd.inflight = net->flight_size;
294 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
295 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
296 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
297 	}
298 	if (SCTP_CWNDLOG_PRESEND == from) {
299 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
300 	}
301 	sctp_clog.x.cwnd.cwnd_augment = augment;
302 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
303 	    SCTP_LOG_EVENT_CWND,
304 	    from,
305 	    sctp_clog.x.misc.log1,
306 	    sctp_clog.x.misc.log2,
307 	    sctp_clog.x.misc.log3,
308 	    sctp_clog.x.misc.log4);
309 }
310 
311 void
312 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
313 {
314 	struct sctp_cwnd_log sctp_clog;
315 
316 	memset(&sctp_clog, 0, sizeof(sctp_clog));
317 	if (inp) {
318 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
319 
320 	} else {
321 		sctp_clog.x.lock.sock = (void *)NULL;
322 	}
323 	sctp_clog.x.lock.inp = (void *)inp;
324 	if (stcb) {
325 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
326 	} else {
327 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
328 	}
329 	if (inp) {
330 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
331 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
332 	} else {
333 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
334 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
335 	}
336 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
337 	if (inp && (inp->sctp_socket)) {
338 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
339 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
340 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
341 	} else {
342 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
343 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
344 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
345 	}
346 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
347 	    SCTP_LOG_LOCK_EVENT,
348 	    from,
349 	    sctp_clog.x.misc.log1,
350 	    sctp_clog.x.misc.log2,
351 	    sctp_clog.x.misc.log3,
352 	    sctp_clog.x.misc.log4);
353 }
354 
355 void
356 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
357 {
358 	struct sctp_cwnd_log sctp_clog;
359 
360 	memset(&sctp_clog, 0, sizeof(sctp_clog));
361 	sctp_clog.x.cwnd.net = net;
362 	sctp_clog.x.cwnd.cwnd_new_value = error;
363 	sctp_clog.x.cwnd.inflight = net->flight_size;
364 	sctp_clog.x.cwnd.cwnd_augment = burst;
365 	if (stcb->asoc.send_queue_cnt > 255)
366 		sctp_clog.x.cwnd.cnt_in_send = 255;
367 	else
368 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
369 	if (stcb->asoc.stream_queue_cnt > 255)
370 		sctp_clog.x.cwnd.cnt_in_str = 255;
371 	else
372 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
373 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
374 	    SCTP_LOG_EVENT_MAXBURST,
375 	    from,
376 	    sctp_clog.x.misc.log1,
377 	    sctp_clog.x.misc.log2,
378 	    sctp_clog.x.misc.log3,
379 	    sctp_clog.x.misc.log4);
380 }
381 
382 void
383 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
384 {
385 	struct sctp_cwnd_log sctp_clog;
386 
387 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
388 	sctp_clog.x.rwnd.send_size = snd_size;
389 	sctp_clog.x.rwnd.overhead = overhead;
390 	sctp_clog.x.rwnd.new_rwnd = 0;
391 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
392 	    SCTP_LOG_EVENT_RWND,
393 	    from,
394 	    sctp_clog.x.misc.log1,
395 	    sctp_clog.x.misc.log2,
396 	    sctp_clog.x.misc.log3,
397 	    sctp_clog.x.misc.log4);
398 }
399 
400 void
401 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
402 {
403 	struct sctp_cwnd_log sctp_clog;
404 
405 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
406 	sctp_clog.x.rwnd.send_size = flight_size;
407 	sctp_clog.x.rwnd.overhead = overhead;
408 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
409 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
410 	    SCTP_LOG_EVENT_RWND,
411 	    from,
412 	    sctp_clog.x.misc.log1,
413 	    sctp_clog.x.misc.log2,
414 	    sctp_clog.x.misc.log3,
415 	    sctp_clog.x.misc.log4);
416 }
417 
418 void
419 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
420 {
421 	struct sctp_cwnd_log sctp_clog;
422 
423 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
424 	sctp_clog.x.mbcnt.size_change = book;
425 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
426 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
427 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
428 	    SCTP_LOG_EVENT_MBCNT,
429 	    from,
430 	    sctp_clog.x.misc.log1,
431 	    sctp_clog.x.misc.log2,
432 	    sctp_clog.x.misc.log3,
433 	    sctp_clog.x.misc.log4);
434 }
435 
436 void
437 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
438 {
439 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
440 	    SCTP_LOG_MISC_EVENT,
441 	    from,
442 	    a, b, c, d);
443 }
444 
445 void
446 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
447 {
448 	struct sctp_cwnd_log sctp_clog;
449 
450 	sctp_clog.x.wake.stcb = (void *)stcb;
451 	sctp_clog.x.wake.wake_cnt = wake_cnt;
452 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
453 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
454 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
455 
456 	if (stcb->asoc.stream_queue_cnt < 0xff)
457 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
458 	else
459 		sctp_clog.x.wake.stream_qcnt = 0xff;
460 
461 	if (stcb->asoc.chunks_on_out_queue < 0xff)
462 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
463 	else
464 		sctp_clog.x.wake.chunks_on_oque = 0xff;
465 
466 	sctp_clog.x.wake.sctpflags = 0;
467 	/* set in the defered mode stuff */
468 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
469 		sctp_clog.x.wake.sctpflags |= 1;
470 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
471 		sctp_clog.x.wake.sctpflags |= 2;
472 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
473 		sctp_clog.x.wake.sctpflags |= 4;
474 	/* what about the sb */
475 	if (stcb->sctp_socket) {
476 		struct socket *so = stcb->sctp_socket;
477 
478 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
479 	} else {
480 		sctp_clog.x.wake.sbflags = 0xff;
481 	}
482 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
483 	    SCTP_LOG_EVENT_WAKE,
484 	    from,
485 	    sctp_clog.x.misc.log1,
486 	    sctp_clog.x.misc.log2,
487 	    sctp_clog.x.misc.log3,
488 	    sctp_clog.x.misc.log4);
489 }
490 
491 void
492 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
493 {
494 	struct sctp_cwnd_log sctp_clog;
495 
496 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
497 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
498 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
499 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
500 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
501 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
502 	sctp_clog.x.blk.sndlen = sendlen;
503 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
504 	    SCTP_LOG_EVENT_BLOCK,
505 	    from,
506 	    sctp_clog.x.misc.log1,
507 	    sctp_clog.x.misc.log2,
508 	    sctp_clog.x.misc.log3,
509 	    sctp_clog.x.misc.log4);
510 }
511 
512 int
513 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
514 {
515 	/* May need to fix this if ktrdump does not work */
516 	return (0);
517 }
518 
519 #ifdef SCTP_AUDITING_ENABLED
520 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
521 static int sctp_audit_indx = 0;
522 
523 static
524 void
525 sctp_print_audit_report(void)
526 {
527 	int i;
528 	int cnt;
529 
530 	cnt = 0;
531 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
532 		if ((sctp_audit_data[i][0] == 0xe0) &&
533 		    (sctp_audit_data[i][1] == 0x01)) {
534 			cnt = 0;
535 			SCTP_PRINTF("\n");
536 		} else if (sctp_audit_data[i][0] == 0xf0) {
537 			cnt = 0;
538 			SCTP_PRINTF("\n");
539 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
540 		    (sctp_audit_data[i][1] == 0x01)) {
541 			SCTP_PRINTF("\n");
542 			cnt = 0;
543 		}
544 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
545 		    (uint32_t) sctp_audit_data[i][1]);
546 		cnt++;
547 		if ((cnt % 14) == 0)
548 			SCTP_PRINTF("\n");
549 	}
550 	for (i = 0; i < sctp_audit_indx; i++) {
551 		if ((sctp_audit_data[i][0] == 0xe0) &&
552 		    (sctp_audit_data[i][1] == 0x01)) {
553 			cnt = 0;
554 			SCTP_PRINTF("\n");
555 		} else if (sctp_audit_data[i][0] == 0xf0) {
556 			cnt = 0;
557 			SCTP_PRINTF("\n");
558 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
559 		    (sctp_audit_data[i][1] == 0x01)) {
560 			SCTP_PRINTF("\n");
561 			cnt = 0;
562 		}
563 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
564 		    (uint32_t) sctp_audit_data[i][1]);
565 		cnt++;
566 		if ((cnt % 14) == 0)
567 			SCTP_PRINTF("\n");
568 	}
569 	SCTP_PRINTF("\n");
570 }
571 
572 void
573 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
574     struct sctp_nets *net)
575 {
576 	int resend_cnt, tot_out, rep, tot_book_cnt;
577 	struct sctp_nets *lnet;
578 	struct sctp_tmit_chunk *chk;
579 
580 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
581 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
582 	sctp_audit_indx++;
583 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
584 		sctp_audit_indx = 0;
585 	}
586 	if (inp == NULL) {
587 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
588 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
589 		sctp_audit_indx++;
590 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
591 			sctp_audit_indx = 0;
592 		}
593 		return;
594 	}
595 	if (stcb == NULL) {
596 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
597 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
598 		sctp_audit_indx++;
599 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
600 			sctp_audit_indx = 0;
601 		}
602 		return;
603 	}
604 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
605 	sctp_audit_data[sctp_audit_indx][1] =
606 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
607 	sctp_audit_indx++;
608 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
609 		sctp_audit_indx = 0;
610 	}
611 	rep = 0;
612 	tot_book_cnt = 0;
613 	resend_cnt = tot_out = 0;
614 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
615 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
616 			resend_cnt++;
617 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
618 			tot_out += chk->book_size;
619 			tot_book_cnt++;
620 		}
621 	}
622 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
623 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
624 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
625 		sctp_audit_indx++;
626 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
627 			sctp_audit_indx = 0;
628 		}
629 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
630 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
631 		rep = 1;
632 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
633 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
634 		sctp_audit_data[sctp_audit_indx][1] =
635 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
636 		sctp_audit_indx++;
637 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
638 			sctp_audit_indx = 0;
639 		}
640 	}
641 	if (tot_out != stcb->asoc.total_flight) {
642 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
643 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
644 		sctp_audit_indx++;
645 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
646 			sctp_audit_indx = 0;
647 		}
648 		rep = 1;
649 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
650 		    (int)stcb->asoc.total_flight);
651 		stcb->asoc.total_flight = tot_out;
652 	}
653 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
654 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
655 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
656 		sctp_audit_indx++;
657 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
658 			sctp_audit_indx = 0;
659 		}
660 		rep = 1;
661 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
662 
663 		stcb->asoc.total_flight_count = tot_book_cnt;
664 	}
665 	tot_out = 0;
666 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
667 		tot_out += lnet->flight_size;
668 	}
669 	if (tot_out != stcb->asoc.total_flight) {
670 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
671 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
672 		sctp_audit_indx++;
673 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
674 			sctp_audit_indx = 0;
675 		}
676 		rep = 1;
677 		SCTP_PRINTF("real flight:%d net total was %d\n",
678 		    stcb->asoc.total_flight, tot_out);
679 		/* now corrective action */
680 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
681 
682 			tot_out = 0;
683 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
684 				if ((chk->whoTo == lnet) &&
685 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
686 					tot_out += chk->book_size;
687 				}
688 			}
689 			if (lnet->flight_size != tot_out) {
690 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
691 				    lnet, lnet->flight_size,
692 				    tot_out);
693 				lnet->flight_size = tot_out;
694 			}
695 		}
696 	}
697 	if (rep) {
698 		sctp_print_audit_report();
699 	}
700 }
701 
702 void
703 sctp_audit_log(uint8_t ev, uint8_t fd)
704 {
705 
706 	sctp_audit_data[sctp_audit_indx][0] = ev;
707 	sctp_audit_data[sctp_audit_indx][1] = fd;
708 	sctp_audit_indx++;
709 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
710 		sctp_audit_indx = 0;
711 	}
712 }
713 
714 #endif
715 
716 /*
717  * sctp_stop_timers_for_shutdown() should be called
718  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
719  * state to make sure that all timers are stopped.
720  */
721 void
722 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
723 {
724 	struct sctp_association *asoc;
725 	struct sctp_nets *net;
726 
727 	asoc = &stcb->asoc;
728 
729 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
730 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
731 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
732 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
733 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
734 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
735 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
736 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
737 	}
738 }
739 
740 /*
741  * a list of sizes based on typical mtu's, used only if next hop size not
742  * returned.
743  */
744 static uint32_t sctp_mtu_sizes[] = {
745 	68,
746 	296,
747 	508,
748 	512,
749 	544,
750 	576,
751 	1006,
752 	1492,
753 	1500,
754 	1536,
755 	2002,
756 	2048,
757 	4352,
758 	4464,
759 	8166,
760 	17914,
761 	32000,
762 	65535
763 };
764 
765 /*
766  * Return the largest MTU smaller than val. If there is no
767  * entry, just return val.
768  */
769 uint32_t
770 sctp_get_prev_mtu(uint32_t val)
771 {
772 	uint32_t i;
773 
774 	if (val <= sctp_mtu_sizes[0]) {
775 		return (val);
776 	}
777 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
778 		if (val <= sctp_mtu_sizes[i]) {
779 			break;
780 		}
781 	}
782 	return (sctp_mtu_sizes[i - 1]);
783 }
784 
785 /*
786  * Return the smallest MTU larger than val. If there is no
787  * entry, just return val.
788  */
789 uint32_t
790 sctp_get_next_mtu(uint32_t val)
791 {
792 	/* select another MTU that is just bigger than this one */
793 	uint32_t i;
794 
795 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
796 		if (val < sctp_mtu_sizes[i]) {
797 			return (sctp_mtu_sizes[i]);
798 		}
799 	}
800 	return (val);
801 }
802 
803 void
804 sctp_fill_random_store(struct sctp_pcb *m)
805 {
806 	/*
807 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
808 	 * our counter. The result becomes our good random numbers and we
809 	 * then setup to give these out. Note that we do no locking to
810 	 * protect this. This is ok, since if competing folks call this we
811 	 * will get more gobbled gook in the random store which is what we
812 	 * want. There is a danger that two guys will use the same random
813 	 * numbers, but thats ok too since that is random as well :->
814 	 */
815 	m->store_at = 0;
816 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
817 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
818 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
819 	m->random_counter++;
820 }
821 
822 uint32_t
823 sctp_select_initial_TSN(struct sctp_pcb *inp)
824 {
825 	/*
826 	 * A true implementation should use random selection process to get
827 	 * the initial stream sequence number, using RFC1750 as a good
828 	 * guideline
829 	 */
830 	uint32_t x, *xp;
831 	uint8_t *p;
832 	int store_at, new_store;
833 
834 	if (inp->initial_sequence_debug != 0) {
835 		uint32_t ret;
836 
837 		ret = inp->initial_sequence_debug;
838 		inp->initial_sequence_debug++;
839 		return (ret);
840 	}
841 retry:
842 	store_at = inp->store_at;
843 	new_store = store_at + sizeof(uint32_t);
844 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
845 		new_store = 0;
846 	}
847 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
848 		goto retry;
849 	}
850 	if (new_store == 0) {
851 		/* Refill the random store */
852 		sctp_fill_random_store(inp);
853 	}
854 	p = &inp->random_store[store_at];
855 	xp = (uint32_t *) p;
856 	x = *xp;
857 	return (x);
858 }
859 
860 uint32_t
861 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
862 {
863 	uint32_t x;
864 	struct timeval now;
865 
866 	if (check) {
867 		(void)SCTP_GETTIME_TIMEVAL(&now);
868 	}
869 	for (;;) {
870 		x = sctp_select_initial_TSN(&inp->sctp_ep);
871 		if (x == 0) {
872 			/* we never use 0 */
873 			continue;
874 		}
875 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
876 			break;
877 		}
878 	}
879 	return (x);
880 }
881 
882 int
883 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
884     uint32_t override_tag, uint32_t vrf_id)
885 {
886 	struct sctp_association *asoc;
887 
888 	/*
889 	 * Anything set to zero is taken care of by the allocation routine's
890 	 * bzero
891 	 */
892 
893 	/*
894 	 * Up front select what scoping to apply on addresses I tell my peer
895 	 * Not sure what to do with these right now, we will need to come up
896 	 * with a way to set them. We may need to pass them through from the
897 	 * caller in the sctp_aloc_assoc() function.
898 	 */
899 	int i;
900 
901 	asoc = &stcb->asoc;
902 	/* init all variables to a known value. */
903 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
904 	asoc->max_burst = m->sctp_ep.max_burst;
905 	asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
906 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
907 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
908 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
909 	asoc->ecn_allowed = m->sctp_ecn_enable;
910 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
911 	asoc->sctp_cmt_pf = (uint8_t) 0;
912 	asoc->sctp_frag_point = m->sctp_frag_point;
913 	asoc->sctp_features = m->sctp_features;
914 	asoc->default_dscp = m->sctp_ep.default_dscp;
915 #ifdef INET6
916 	if (m->sctp_ep.default_flowlabel) {
917 		asoc->default_flowlabel = m->sctp_ep.default_flowlabel;
918 	} else {
919 		if (m->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
920 			asoc->default_flowlabel = sctp_select_initial_TSN(&m->sctp_ep);
921 			asoc->default_flowlabel &= 0x000fffff;
922 			asoc->default_flowlabel |= 0x80000000;
923 		} else {
924 			asoc->default_flowlabel = 0;
925 		}
926 	}
927 #endif
928 	asoc->sb_send_resv = 0;
929 	if (override_tag) {
930 		asoc->my_vtag = override_tag;
931 	} else {
932 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
933 	}
934 	/* Get the nonce tags */
935 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
936 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
937 	asoc->vrf_id = vrf_id;
938 
939 #ifdef SCTP_ASOCLOG_OF_TSNS
940 	asoc->tsn_in_at = 0;
941 	asoc->tsn_out_at = 0;
942 	asoc->tsn_in_wrapped = 0;
943 	asoc->tsn_out_wrapped = 0;
944 	asoc->cumack_log_at = 0;
945 	asoc->cumack_log_atsnt = 0;
946 #endif
947 #ifdef SCTP_FS_SPEC_LOG
948 	asoc->fs_index = 0;
949 #endif
950 	asoc->refcnt = 0;
951 	asoc->assoc_up_sent = 0;
952 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
953 	    sctp_select_initial_TSN(&m->sctp_ep);
954 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
955 	/* we are optimisitic here */
956 	asoc->peer_supports_pktdrop = 1;
957 	asoc->peer_supports_nat = 0;
958 	asoc->sent_queue_retran_cnt = 0;
959 
960 	/* for CMT */
961 	asoc->last_net_cmt_send_started = NULL;
962 
963 	/* This will need to be adjusted */
964 	asoc->last_acked_seq = asoc->init_seq_number - 1;
965 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
966 	asoc->asconf_seq_in = asoc->last_acked_seq;
967 
968 	/* here we are different, we hold the next one we expect */
969 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
970 
971 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
972 	asoc->initial_rto = m->sctp_ep.initial_rto;
973 
974 	asoc->max_init_times = m->sctp_ep.max_init_times;
975 	asoc->max_send_times = m->sctp_ep.max_send_times;
976 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
977 	asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold;
978 	asoc->free_chunk_cnt = 0;
979 
980 	asoc->iam_blocking = 0;
981 	asoc->context = m->sctp_context;
982 	asoc->local_strreset_support = m->local_strreset_support;
983 	asoc->def_send = m->def_send;
984 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
985 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
986 	asoc->pr_sctp_cnt = 0;
987 	asoc->total_output_queue_size = 0;
988 
989 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
990 		struct in6pcb *inp6;
991 
992 		/* Its a V6 socket */
993 		inp6 = (struct in6pcb *)m;
994 		asoc->ipv6_addr_legal = 1;
995 		/* Now look at the binding flag to see if V4 will be legal */
996 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
997 			asoc->ipv4_addr_legal = 1;
998 		} else {
999 			/* V4 addresses are NOT legal on the association */
1000 			asoc->ipv4_addr_legal = 0;
1001 		}
1002 	} else {
1003 		/* Its a V4 socket, no - V6 */
1004 		asoc->ipv4_addr_legal = 1;
1005 		asoc->ipv6_addr_legal = 0;
1006 	}
1007 
1008 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1009 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1010 
1011 	asoc->smallest_mtu = m->sctp_frag_point;
1012 	asoc->minrto = m->sctp_ep.sctp_minrto;
1013 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1014 
1015 	asoc->locked_on_sending = NULL;
1016 	asoc->stream_locked_on = 0;
1017 	asoc->ecn_echo_cnt_onq = 0;
1018 	asoc->stream_locked = 0;
1019 
1020 	asoc->send_sack = 1;
1021 
1022 	LIST_INIT(&asoc->sctp_restricted_addrs);
1023 
1024 	TAILQ_INIT(&asoc->nets);
1025 	TAILQ_INIT(&asoc->pending_reply_queue);
1026 	TAILQ_INIT(&asoc->asconf_ack_sent);
1027 	/* Setup to fill the hb random cache at first HB */
1028 	asoc->hb_random_idx = 4;
1029 
1030 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1031 
1032 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1033 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1034 
1035 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1036 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1037 
1038 	/*
1039 	 * Now the stream parameters, here we allocate space for all streams
1040 	 * that we request by default.
1041 	 */
1042 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1043 	    m->sctp_ep.pre_open_stream_count;
1044 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1045 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1046 	    SCTP_M_STRMO);
1047 	if (asoc->strmout == NULL) {
1048 		/* big trouble no memory */
1049 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1050 		return (ENOMEM);
1051 	}
1052 	for (i = 0; i < asoc->streamoutcnt; i++) {
1053 		/*
1054 		 * inbound side must be set to 0xffff, also NOTE when we get
1055 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1056 		 * count (streamoutcnt) but first check if we sent to any of
1057 		 * the upper streams that were dropped (if some were). Those
1058 		 * that were dropped must be notified to the upper layer as
1059 		 * failed to send.
1060 		 */
1061 		asoc->strmout[i].next_sequence_sent = 0x0;
1062 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1063 		asoc->strmout[i].stream_no = i;
1064 		asoc->strmout[i].last_msg_incomplete = 0;
1065 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1066 	}
1067 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1068 
1069 	/* Now the mapping array */
1070 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1071 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1072 	    SCTP_M_MAP);
1073 	if (asoc->mapping_array == NULL) {
1074 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1075 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1076 		return (ENOMEM);
1077 	}
1078 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1079 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1080 	    SCTP_M_MAP);
1081 	if (asoc->nr_mapping_array == NULL) {
1082 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1083 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1084 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1085 		return (ENOMEM);
1086 	}
1087 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1088 
1089 	/* Now the init of the other outqueues */
1090 	TAILQ_INIT(&asoc->free_chunks);
1091 	TAILQ_INIT(&asoc->control_send_queue);
1092 	TAILQ_INIT(&asoc->asconf_send_queue);
1093 	TAILQ_INIT(&asoc->send_queue);
1094 	TAILQ_INIT(&asoc->sent_queue);
1095 	TAILQ_INIT(&asoc->reasmqueue);
1096 	TAILQ_INIT(&asoc->resetHead);
1097 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1098 	TAILQ_INIT(&asoc->asconf_queue);
1099 	/* authentication fields */
1100 	asoc->authinfo.random = NULL;
1101 	asoc->authinfo.active_keyid = 0;
1102 	asoc->authinfo.assoc_key = NULL;
1103 	asoc->authinfo.assoc_keyid = 0;
1104 	asoc->authinfo.recv_key = NULL;
1105 	asoc->authinfo.recv_keyid = 0;
1106 	LIST_INIT(&asoc->shared_keys);
1107 	asoc->marked_retrans = 0;
1108 	asoc->port = m->sctp_ep.port;
1109 	asoc->timoinit = 0;
1110 	asoc->timodata = 0;
1111 	asoc->timosack = 0;
1112 	asoc->timoshutdown = 0;
1113 	asoc->timoheartbeat = 0;
1114 	asoc->timocookie = 0;
1115 	asoc->timoshutdownack = 0;
1116 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1117 	asoc->discontinuity_time = asoc->start_time;
1118 	/*
1119 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1120 	 * freed later when the association is freed.
1121 	 */
1122 	return (0);
1123 }
1124 
1125 void
1126 sctp_print_mapping_array(struct sctp_association *asoc)
1127 {
1128 	unsigned int i, limit;
1129 
1130 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1131 	    asoc->mapping_array_size,
1132 	    asoc->mapping_array_base_tsn,
1133 	    asoc->cumulative_tsn,
1134 	    asoc->highest_tsn_inside_map,
1135 	    asoc->highest_tsn_inside_nr_map);
1136 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1137 		if (asoc->mapping_array[limit - 1] != 0) {
1138 			break;
1139 		}
1140 	}
1141 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1142 	for (i = 0; i < limit; i++) {
1143 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1144 	}
1145 	if (limit % 16)
1146 		SCTP_PRINTF("\n");
1147 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1148 		if (asoc->nr_mapping_array[limit - 1]) {
1149 			break;
1150 		}
1151 	}
1152 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1153 	for (i = 0; i < limit; i++) {
1154 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1155 	}
1156 	if (limit % 16)
1157 		SCTP_PRINTF("\n");
1158 }
1159 
1160 int
1161 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1162 {
1163 	/* mapping array needs to grow */
1164 	uint8_t *new_array1, *new_array2;
1165 	uint32_t new_size;
1166 
1167 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1168 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1169 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1170 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1171 		/* can't get more, forget it */
1172 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1173 		if (new_array1) {
1174 			SCTP_FREE(new_array1, SCTP_M_MAP);
1175 		}
1176 		if (new_array2) {
1177 			SCTP_FREE(new_array2, SCTP_M_MAP);
1178 		}
1179 		return (-1);
1180 	}
1181 	memset(new_array1, 0, new_size);
1182 	memset(new_array2, 0, new_size);
1183 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1184 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1185 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1186 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1187 	asoc->mapping_array = new_array1;
1188 	asoc->nr_mapping_array = new_array2;
1189 	asoc->mapping_array_size = new_size;
1190 	return (0);
1191 }
1192 
1193 
1194 static void
1195 sctp_iterator_work(struct sctp_iterator *it)
1196 {
1197 	int iteration_count = 0;
1198 	int inp_skip = 0;
1199 	int first_in = 1;
1200 	struct sctp_inpcb *tinp;
1201 
1202 	SCTP_INP_INFO_RLOCK();
1203 	SCTP_ITERATOR_LOCK();
1204 	if (it->inp) {
1205 		SCTP_INP_RLOCK(it->inp);
1206 		SCTP_INP_DECR_REF(it->inp);
1207 	}
1208 	if (it->inp == NULL) {
1209 		/* iterator is complete */
1210 done_with_iterator:
1211 		SCTP_ITERATOR_UNLOCK();
1212 		SCTP_INP_INFO_RUNLOCK();
1213 		if (it->function_atend != NULL) {
1214 			(*it->function_atend) (it->pointer, it->val);
1215 		}
1216 		SCTP_FREE(it, SCTP_M_ITER);
1217 		return;
1218 	}
1219 select_a_new_ep:
1220 	if (first_in) {
1221 		first_in = 0;
1222 	} else {
1223 		SCTP_INP_RLOCK(it->inp);
1224 	}
1225 	while (((it->pcb_flags) &&
1226 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1227 	    ((it->pcb_features) &&
1228 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1229 		/* endpoint flags or features don't match, so keep looking */
1230 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1231 			SCTP_INP_RUNLOCK(it->inp);
1232 			goto done_with_iterator;
1233 		}
1234 		tinp = it->inp;
1235 		it->inp = LIST_NEXT(it->inp, sctp_list);
1236 		SCTP_INP_RUNLOCK(tinp);
1237 		if (it->inp == NULL) {
1238 			goto done_with_iterator;
1239 		}
1240 		SCTP_INP_RLOCK(it->inp);
1241 	}
1242 	/* now go through each assoc which is in the desired state */
1243 	if (it->done_current_ep == 0) {
1244 		if (it->function_inp != NULL)
1245 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1246 		it->done_current_ep = 1;
1247 	}
1248 	if (it->stcb == NULL) {
1249 		/* run the per instance function */
1250 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1251 	}
1252 	if ((inp_skip) || it->stcb == NULL) {
1253 		if (it->function_inp_end != NULL) {
1254 			inp_skip = (*it->function_inp_end) (it->inp,
1255 			    it->pointer,
1256 			    it->val);
1257 		}
1258 		SCTP_INP_RUNLOCK(it->inp);
1259 		goto no_stcb;
1260 	}
1261 	while (it->stcb) {
1262 		SCTP_TCB_LOCK(it->stcb);
1263 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1264 			/* not in the right state... keep looking */
1265 			SCTP_TCB_UNLOCK(it->stcb);
1266 			goto next_assoc;
1267 		}
1268 		/* see if we have limited out the iterator loop */
1269 		iteration_count++;
1270 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1271 			/* Pause to let others grab the lock */
1272 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1273 			SCTP_TCB_UNLOCK(it->stcb);
1274 			SCTP_INP_INCR_REF(it->inp);
1275 			SCTP_INP_RUNLOCK(it->inp);
1276 			SCTP_ITERATOR_UNLOCK();
1277 			SCTP_INP_INFO_RUNLOCK();
1278 			SCTP_INP_INFO_RLOCK();
1279 			SCTP_ITERATOR_LOCK();
1280 			if (sctp_it_ctl.iterator_flags) {
1281 				/* We won't be staying here */
1282 				SCTP_INP_DECR_REF(it->inp);
1283 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1284 				if (sctp_it_ctl.iterator_flags &
1285 				    SCTP_ITERATOR_STOP_CUR_IT) {
1286 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1287 					goto done_with_iterator;
1288 				}
1289 				if (sctp_it_ctl.iterator_flags &
1290 				    SCTP_ITERATOR_STOP_CUR_INP) {
1291 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1292 					goto no_stcb;
1293 				}
1294 				/* If we reach here huh? */
1295 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1296 				    sctp_it_ctl.iterator_flags);
1297 				sctp_it_ctl.iterator_flags = 0;
1298 			}
1299 			SCTP_INP_RLOCK(it->inp);
1300 			SCTP_INP_DECR_REF(it->inp);
1301 			SCTP_TCB_LOCK(it->stcb);
1302 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1303 			iteration_count = 0;
1304 		}
1305 		/* run function on this one */
1306 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1307 
1308 		/*
1309 		 * we lie here, it really needs to have its own type but
1310 		 * first I must verify that this won't effect things :-0
1311 		 */
1312 		if (it->no_chunk_output == 0)
1313 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1314 
1315 		SCTP_TCB_UNLOCK(it->stcb);
1316 next_assoc:
1317 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1318 		if (it->stcb == NULL) {
1319 			/* Run last function */
1320 			if (it->function_inp_end != NULL) {
1321 				inp_skip = (*it->function_inp_end) (it->inp,
1322 				    it->pointer,
1323 				    it->val);
1324 			}
1325 		}
1326 	}
1327 	SCTP_INP_RUNLOCK(it->inp);
1328 no_stcb:
1329 	/* done with all assocs on this endpoint, move on to next endpoint */
1330 	it->done_current_ep = 0;
1331 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1332 		it->inp = NULL;
1333 	} else {
1334 		it->inp = LIST_NEXT(it->inp, sctp_list);
1335 	}
1336 	if (it->inp == NULL) {
1337 		goto done_with_iterator;
1338 	}
1339 	goto select_a_new_ep;
1340 }
1341 
1342 void
1343 sctp_iterator_worker(void)
1344 {
1345 	struct sctp_iterator *it, *nit;
1346 
1347 	/* This function is called with the WQ lock in place */
1348 
1349 	sctp_it_ctl.iterator_running = 1;
1350 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1351 		sctp_it_ctl.cur_it = it;
1352 		/* now lets work on this one */
1353 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1354 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1355 		CURVNET_SET(it->vn);
1356 		sctp_iterator_work(it);
1357 		sctp_it_ctl.cur_it = NULL;
1358 		CURVNET_RESTORE();
1359 		SCTP_IPI_ITERATOR_WQ_LOCK();
1360 		/* sa_ignore FREED_MEMORY */
1361 	}
1362 	sctp_it_ctl.iterator_running = 0;
1363 	return;
1364 }
1365 
1366 
1367 static void
1368 sctp_handle_addr_wq(void)
1369 {
1370 	/* deal with the ADDR wq from the rtsock calls */
1371 	struct sctp_laddr *wi, *nwi;
1372 	struct sctp_asconf_iterator *asc;
1373 
1374 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1375 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1376 	if (asc == NULL) {
1377 		/* Try later, no memory */
1378 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1379 		    (struct sctp_inpcb *)NULL,
1380 		    (struct sctp_tcb *)NULL,
1381 		    (struct sctp_nets *)NULL);
1382 		return;
1383 	}
1384 	LIST_INIT(&asc->list_of_work);
1385 	asc->cnt = 0;
1386 
1387 	SCTP_WQ_ADDR_LOCK();
1388 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1389 		LIST_REMOVE(wi, sctp_nxt_addr);
1390 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1391 		asc->cnt++;
1392 	}
1393 	SCTP_WQ_ADDR_UNLOCK();
1394 
1395 	if (asc->cnt == 0) {
1396 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1397 	} else {
1398 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1399 		    sctp_asconf_iterator_stcb,
1400 		    NULL,	/* No ep end for boundall */
1401 		    SCTP_PCB_FLAGS_BOUNDALL,
1402 		    SCTP_PCB_ANY_FEATURES,
1403 		    SCTP_ASOC_ANY_STATE,
1404 		    (void *)asc, 0,
1405 		    sctp_asconf_iterator_end, NULL, 0);
1406 	}
1407 }
1408 
1409 void
1410 sctp_timeout_handler(void *t)
1411 {
1412 	struct sctp_inpcb *inp;
1413 	struct sctp_tcb *stcb;
1414 	struct sctp_nets *net;
1415 	struct sctp_timer *tmr;
1416 
1417 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1418 	struct socket *so;
1419 
1420 #endif
1421 	int did_output, type;
1422 
1423 	tmr = (struct sctp_timer *)t;
1424 	inp = (struct sctp_inpcb *)tmr->ep;
1425 	stcb = (struct sctp_tcb *)tmr->tcb;
1426 	net = (struct sctp_nets *)tmr->net;
1427 	CURVNET_SET((struct vnet *)tmr->vnet);
1428 	did_output = 1;
1429 
1430 #ifdef SCTP_AUDITING_ENABLED
1431 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1432 	sctp_auditing(3, inp, stcb, net);
1433 #endif
1434 
1435 	/* sanity checks... */
1436 	if (tmr->self != (void *)tmr) {
1437 		/*
1438 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1439 		 * tmr);
1440 		 */
1441 		CURVNET_RESTORE();
1442 		return;
1443 	}
1444 	tmr->stopped_from = 0xa001;
1445 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1446 		/*
1447 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1448 		 * tmr->type);
1449 		 */
1450 		CURVNET_RESTORE();
1451 		return;
1452 	}
1453 	tmr->stopped_from = 0xa002;
1454 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1455 		CURVNET_RESTORE();
1456 		return;
1457 	}
1458 	/* if this is an iterator timeout, get the struct and clear inp */
1459 	tmr->stopped_from = 0xa003;
1460 	type = tmr->type;
1461 	if (inp) {
1462 		SCTP_INP_INCR_REF(inp);
1463 		if ((inp->sctp_socket == NULL) &&
1464 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1465 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1466 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1467 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1468 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1469 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1470 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1471 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1472 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1473 		    ) {
1474 			SCTP_INP_DECR_REF(inp);
1475 			CURVNET_RESTORE();
1476 			return;
1477 		}
1478 	}
1479 	tmr->stopped_from = 0xa004;
1480 	if (stcb) {
1481 		atomic_add_int(&stcb->asoc.refcnt, 1);
1482 		if (stcb->asoc.state == 0) {
1483 			atomic_add_int(&stcb->asoc.refcnt, -1);
1484 			if (inp) {
1485 				SCTP_INP_DECR_REF(inp);
1486 			}
1487 			CURVNET_RESTORE();
1488 			return;
1489 		}
1490 	}
1491 	tmr->stopped_from = 0xa005;
1492 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1493 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1494 		if (inp) {
1495 			SCTP_INP_DECR_REF(inp);
1496 		}
1497 		if (stcb) {
1498 			atomic_add_int(&stcb->asoc.refcnt, -1);
1499 		}
1500 		CURVNET_RESTORE();
1501 		return;
1502 	}
1503 	tmr->stopped_from = 0xa006;
1504 
1505 	if (stcb) {
1506 		SCTP_TCB_LOCK(stcb);
1507 		atomic_add_int(&stcb->asoc.refcnt, -1);
1508 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1509 		    ((stcb->asoc.state == 0) ||
1510 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1511 			SCTP_TCB_UNLOCK(stcb);
1512 			if (inp) {
1513 				SCTP_INP_DECR_REF(inp);
1514 			}
1515 			CURVNET_RESTORE();
1516 			return;
1517 		}
1518 	}
1519 	/* record in stopped what t-o occured */
1520 	tmr->stopped_from = tmr->type;
1521 
1522 	/* mark as being serviced now */
1523 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1524 		/*
1525 		 * Callout has been rescheduled.
1526 		 */
1527 		goto get_out;
1528 	}
1529 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1530 		/*
1531 		 * Not active, so no action.
1532 		 */
1533 		goto get_out;
1534 	}
1535 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1536 
1537 	/* call the handler for the appropriate timer type */
1538 	switch (tmr->type) {
1539 	case SCTP_TIMER_TYPE_ZERO_COPY:
1540 		if (inp == NULL) {
1541 			break;
1542 		}
1543 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1544 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1545 		}
1546 		break;
1547 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1548 		if (inp == NULL) {
1549 			break;
1550 		}
1551 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1552 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1553 		}
1554 		break;
1555 	case SCTP_TIMER_TYPE_ADDR_WQ:
1556 		sctp_handle_addr_wq();
1557 		break;
1558 	case SCTP_TIMER_TYPE_SEND:
1559 		if ((stcb == NULL) || (inp == NULL)) {
1560 			break;
1561 		}
1562 		SCTP_STAT_INCR(sctps_timodata);
1563 		stcb->asoc.timodata++;
1564 		stcb->asoc.num_send_timers_up--;
1565 		if (stcb->asoc.num_send_timers_up < 0) {
1566 			stcb->asoc.num_send_timers_up = 0;
1567 		}
1568 		SCTP_TCB_LOCK_ASSERT(stcb);
1569 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1570 			/* no need to unlock on tcb its gone */
1571 
1572 			goto out_decr;
1573 		}
1574 		SCTP_TCB_LOCK_ASSERT(stcb);
1575 #ifdef SCTP_AUDITING_ENABLED
1576 		sctp_auditing(4, inp, stcb, net);
1577 #endif
1578 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1579 		if ((stcb->asoc.num_send_timers_up == 0) &&
1580 		    (stcb->asoc.sent_queue_cnt > 0)) {
1581 			struct sctp_tmit_chunk *chk;
1582 
1583 			/*
1584 			 * safeguard. If there on some on the sent queue
1585 			 * somewhere but no timers running something is
1586 			 * wrong... so we start a timer on the first chunk
1587 			 * on the send queue on whatever net it is sent to.
1588 			 */
1589 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1590 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1591 			    chk->whoTo);
1592 		}
1593 		break;
1594 	case SCTP_TIMER_TYPE_INIT:
1595 		if ((stcb == NULL) || (inp == NULL)) {
1596 			break;
1597 		}
1598 		SCTP_STAT_INCR(sctps_timoinit);
1599 		stcb->asoc.timoinit++;
1600 		if (sctp_t1init_timer(inp, stcb, net)) {
1601 			/* no need to unlock on tcb its gone */
1602 			goto out_decr;
1603 		}
1604 		/* We do output but not here */
1605 		did_output = 0;
1606 		break;
1607 	case SCTP_TIMER_TYPE_RECV:
1608 		if ((stcb == NULL) || (inp == NULL)) {
1609 			break;
1610 		}
1611 		SCTP_STAT_INCR(sctps_timosack);
1612 		stcb->asoc.timosack++;
1613 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1614 #ifdef SCTP_AUDITING_ENABLED
1615 		sctp_auditing(4, inp, stcb, net);
1616 #endif
1617 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1618 		break;
1619 	case SCTP_TIMER_TYPE_SHUTDOWN:
1620 		if ((stcb == NULL) || (inp == NULL)) {
1621 			break;
1622 		}
1623 		if (sctp_shutdown_timer(inp, stcb, net)) {
1624 			/* no need to unlock on tcb its gone */
1625 			goto out_decr;
1626 		}
1627 		SCTP_STAT_INCR(sctps_timoshutdown);
1628 		stcb->asoc.timoshutdown++;
1629 #ifdef SCTP_AUDITING_ENABLED
1630 		sctp_auditing(4, inp, stcb, net);
1631 #endif
1632 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1633 		break;
1634 	case SCTP_TIMER_TYPE_HEARTBEAT:
1635 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1636 			break;
1637 		}
1638 		SCTP_STAT_INCR(sctps_timoheartbeat);
1639 		stcb->asoc.timoheartbeat++;
1640 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1641 			/* no need to unlock on tcb its gone */
1642 			goto out_decr;
1643 		}
1644 #ifdef SCTP_AUDITING_ENABLED
1645 		sctp_auditing(4, inp, stcb, net);
1646 #endif
1647 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1648 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1649 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1650 		}
1651 		break;
1652 	case SCTP_TIMER_TYPE_COOKIE:
1653 		if ((stcb == NULL) || (inp == NULL)) {
1654 			break;
1655 		}
1656 		if (sctp_cookie_timer(inp, stcb, net)) {
1657 			/* no need to unlock on tcb its gone */
1658 			goto out_decr;
1659 		}
1660 		SCTP_STAT_INCR(sctps_timocookie);
1661 		stcb->asoc.timocookie++;
1662 #ifdef SCTP_AUDITING_ENABLED
1663 		sctp_auditing(4, inp, stcb, net);
1664 #endif
1665 		/*
1666 		 * We consider T3 and Cookie timer pretty much the same with
1667 		 * respect to where from in chunk_output.
1668 		 */
1669 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1670 		break;
1671 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1672 		{
1673 			struct timeval tv;
1674 			int i, secret;
1675 
1676 			if (inp == NULL) {
1677 				break;
1678 			}
1679 			SCTP_STAT_INCR(sctps_timosecret);
1680 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1681 			SCTP_INP_WLOCK(inp);
1682 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1683 			inp->sctp_ep.last_secret_number =
1684 			    inp->sctp_ep.current_secret_number;
1685 			inp->sctp_ep.current_secret_number++;
1686 			if (inp->sctp_ep.current_secret_number >=
1687 			    SCTP_HOW_MANY_SECRETS) {
1688 				inp->sctp_ep.current_secret_number = 0;
1689 			}
1690 			secret = (int)inp->sctp_ep.current_secret_number;
1691 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1692 				inp->sctp_ep.secret_key[secret][i] =
1693 				    sctp_select_initial_TSN(&inp->sctp_ep);
1694 			}
1695 			SCTP_INP_WUNLOCK(inp);
1696 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1697 		}
1698 		did_output = 0;
1699 		break;
1700 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1701 		if ((stcb == NULL) || (inp == NULL)) {
1702 			break;
1703 		}
1704 		SCTP_STAT_INCR(sctps_timopathmtu);
1705 		sctp_pathmtu_timer(inp, stcb, net);
1706 		did_output = 0;
1707 		break;
1708 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1709 		if ((stcb == NULL) || (inp == NULL)) {
1710 			break;
1711 		}
1712 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1713 			/* no need to unlock on tcb its gone */
1714 			goto out_decr;
1715 		}
1716 		SCTP_STAT_INCR(sctps_timoshutdownack);
1717 		stcb->asoc.timoshutdownack++;
1718 #ifdef SCTP_AUDITING_ENABLED
1719 		sctp_auditing(4, inp, stcb, net);
1720 #endif
1721 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1722 		break;
1723 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1724 		if ((stcb == NULL) || (inp == NULL)) {
1725 			break;
1726 		}
1727 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1728 		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1729 		/* no need to unlock on tcb its gone */
1730 		goto out_decr;
1731 
1732 	case SCTP_TIMER_TYPE_STRRESET:
1733 		if ((stcb == NULL) || (inp == NULL)) {
1734 			break;
1735 		}
1736 		if (sctp_strreset_timer(inp, stcb, net)) {
1737 			/* no need to unlock on tcb its gone */
1738 			goto out_decr;
1739 		}
1740 		SCTP_STAT_INCR(sctps_timostrmrst);
1741 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1742 		break;
1743 	case SCTP_TIMER_TYPE_ASCONF:
1744 		if ((stcb == NULL) || (inp == NULL)) {
1745 			break;
1746 		}
1747 		if (sctp_asconf_timer(inp, stcb, net)) {
1748 			/* no need to unlock on tcb its gone */
1749 			goto out_decr;
1750 		}
1751 		SCTP_STAT_INCR(sctps_timoasconf);
1752 #ifdef SCTP_AUDITING_ENABLED
1753 		sctp_auditing(4, inp, stcb, net);
1754 #endif
1755 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1756 		break;
1757 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1758 		if ((stcb == NULL) || (inp == NULL)) {
1759 			break;
1760 		}
1761 		sctp_delete_prim_timer(inp, stcb, net);
1762 		SCTP_STAT_INCR(sctps_timodelprim);
1763 		break;
1764 
1765 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1766 		if ((stcb == NULL) || (inp == NULL)) {
1767 			break;
1768 		}
1769 		SCTP_STAT_INCR(sctps_timoautoclose);
1770 		sctp_autoclose_timer(inp, stcb, net);
1771 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1772 		did_output = 0;
1773 		break;
1774 	case SCTP_TIMER_TYPE_ASOCKILL:
1775 		if ((stcb == NULL) || (inp == NULL)) {
1776 			break;
1777 		}
1778 		SCTP_STAT_INCR(sctps_timoassockill);
1779 		/* Can we free it yet? */
1780 		SCTP_INP_DECR_REF(inp);
1781 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1782 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1783 		so = SCTP_INP_SO(inp);
1784 		atomic_add_int(&stcb->asoc.refcnt, 1);
1785 		SCTP_TCB_UNLOCK(stcb);
1786 		SCTP_SOCKET_LOCK(so, 1);
1787 		SCTP_TCB_LOCK(stcb);
1788 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1789 #endif
1790 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1791 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1792 		SCTP_SOCKET_UNLOCK(so, 1);
1793 #endif
1794 		/*
1795 		 * free asoc, always unlocks (or destroy's) so prevent
1796 		 * duplicate unlock or unlock of a free mtx :-0
1797 		 */
1798 		stcb = NULL;
1799 		goto out_no_decr;
1800 	case SCTP_TIMER_TYPE_INPKILL:
1801 		SCTP_STAT_INCR(sctps_timoinpkill);
1802 		if (inp == NULL) {
1803 			break;
1804 		}
1805 		/*
1806 		 * special case, take away our increment since WE are the
1807 		 * killer
1808 		 */
1809 		SCTP_INP_DECR_REF(inp);
1810 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1811 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1812 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1813 		inp = NULL;
1814 		goto out_no_decr;
1815 	default:
1816 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1817 		    tmr->type);
1818 		break;
1819 	}
1820 #ifdef SCTP_AUDITING_ENABLED
1821 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1822 	if (inp)
1823 		sctp_auditing(5, inp, stcb, net);
1824 #endif
1825 	if ((did_output) && stcb) {
1826 		/*
1827 		 * Now we need to clean up the control chunk chain if an
1828 		 * ECNE is on it. It must be marked as UNSENT again so next
1829 		 * call will continue to send it until such time that we get
1830 		 * a CWR, to remove it. It is, however, less likely that we
1831 		 * will find a ecn echo on the chain though.
1832 		 */
1833 		sctp_fix_ecn_echo(&stcb->asoc);
1834 	}
1835 get_out:
1836 	if (stcb) {
1837 		SCTP_TCB_UNLOCK(stcb);
1838 	}
1839 out_decr:
1840 	if (inp) {
1841 		SCTP_INP_DECR_REF(inp);
1842 	}
1843 out_no_decr:
1844 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1845 	    type);
1846 	CURVNET_RESTORE();
1847 }
1848 
1849 void
1850 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1851     struct sctp_nets *net)
1852 {
1853 	uint32_t to_ticks;
1854 	struct sctp_timer *tmr;
1855 
1856 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1857 		return;
1858 
1859 	tmr = NULL;
1860 	if (stcb) {
1861 		SCTP_TCB_LOCK_ASSERT(stcb);
1862 	}
1863 	switch (t_type) {
1864 	case SCTP_TIMER_TYPE_ZERO_COPY:
1865 		tmr = &inp->sctp_ep.zero_copy_timer;
1866 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1867 		break;
1868 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1869 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1870 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1871 		break;
1872 	case SCTP_TIMER_TYPE_ADDR_WQ:
1873 		/* Only 1 tick away :-) */
1874 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1875 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1876 		break;
1877 	case SCTP_TIMER_TYPE_SEND:
1878 		/* Here we use the RTO timer */
1879 		{
1880 			int rto_val;
1881 
1882 			if ((stcb == NULL) || (net == NULL)) {
1883 				return;
1884 			}
1885 			tmr = &net->rxt_timer;
1886 			if (net->RTO == 0) {
1887 				rto_val = stcb->asoc.initial_rto;
1888 			} else {
1889 				rto_val = net->RTO;
1890 			}
1891 			to_ticks = MSEC_TO_TICKS(rto_val);
1892 		}
1893 		break;
1894 	case SCTP_TIMER_TYPE_INIT:
1895 		/*
1896 		 * Here we use the INIT timer default usually about 1
1897 		 * minute.
1898 		 */
1899 		if ((stcb == NULL) || (net == NULL)) {
1900 			return;
1901 		}
1902 		tmr = &net->rxt_timer;
1903 		if (net->RTO == 0) {
1904 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1905 		} else {
1906 			to_ticks = MSEC_TO_TICKS(net->RTO);
1907 		}
1908 		break;
1909 	case SCTP_TIMER_TYPE_RECV:
1910 		/*
1911 		 * Here we use the Delayed-Ack timer value from the inp
1912 		 * ususually about 200ms.
1913 		 */
1914 		if (stcb == NULL) {
1915 			return;
1916 		}
1917 		tmr = &stcb->asoc.dack_timer;
1918 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1919 		break;
1920 	case SCTP_TIMER_TYPE_SHUTDOWN:
1921 		/* Here we use the RTO of the destination. */
1922 		if ((stcb == NULL) || (net == NULL)) {
1923 			return;
1924 		}
1925 		if (net->RTO == 0) {
1926 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1927 		} else {
1928 			to_ticks = MSEC_TO_TICKS(net->RTO);
1929 		}
1930 		tmr = &net->rxt_timer;
1931 		break;
1932 	case SCTP_TIMER_TYPE_HEARTBEAT:
1933 		/*
1934 		 * the net is used here so that we can add in the RTO. Even
1935 		 * though we use a different timer. We also add the HB timer
1936 		 * PLUS a random jitter.
1937 		 */
1938 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1939 			return;
1940 		} else {
1941 			uint32_t rndval;
1942 			uint32_t jitter;
1943 
1944 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1945 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1946 				return;
1947 			}
1948 			if (net->RTO == 0) {
1949 				to_ticks = stcb->asoc.initial_rto;
1950 			} else {
1951 				to_ticks = net->RTO;
1952 			}
1953 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1954 			jitter = rndval % to_ticks;
1955 			if (jitter >= (to_ticks >> 1)) {
1956 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1957 			} else {
1958 				to_ticks = to_ticks - jitter;
1959 			}
1960 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1961 			    !(net->dest_state & SCTP_ADDR_PF)) {
1962 				to_ticks += net->heart_beat_delay;
1963 			}
1964 			/*
1965 			 * Now we must convert the to_ticks that are now in
1966 			 * ms to ticks.
1967 			 */
1968 			to_ticks = MSEC_TO_TICKS(to_ticks);
1969 			tmr = &net->hb_timer;
1970 		}
1971 		break;
1972 	case SCTP_TIMER_TYPE_COOKIE:
1973 		/*
1974 		 * Here we can use the RTO timer from the network since one
1975 		 * RTT was compelete. If a retran happened then we will be
1976 		 * using the RTO initial value.
1977 		 */
1978 		if ((stcb == NULL) || (net == NULL)) {
1979 			return;
1980 		}
1981 		if (net->RTO == 0) {
1982 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1983 		} else {
1984 			to_ticks = MSEC_TO_TICKS(net->RTO);
1985 		}
1986 		tmr = &net->rxt_timer;
1987 		break;
1988 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1989 		/*
1990 		 * nothing needed but the endpoint here ususually about 60
1991 		 * minutes.
1992 		 */
1993 		if (inp == NULL) {
1994 			return;
1995 		}
1996 		tmr = &inp->sctp_ep.signature_change;
1997 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1998 		break;
1999 	case SCTP_TIMER_TYPE_ASOCKILL:
2000 		if (stcb == NULL) {
2001 			return;
2002 		}
2003 		tmr = &stcb->asoc.strreset_timer;
2004 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2005 		break;
2006 	case SCTP_TIMER_TYPE_INPKILL:
2007 		/*
2008 		 * The inp is setup to die. We re-use the signature_chage
2009 		 * timer since that has stopped and we are in the GONE
2010 		 * state.
2011 		 */
2012 		if (inp == NULL) {
2013 			return;
2014 		}
2015 		tmr = &inp->sctp_ep.signature_change;
2016 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2017 		break;
2018 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2019 		/*
2020 		 * Here we use the value found in the EP for PMTU ususually
2021 		 * about 10 minutes.
2022 		 */
2023 		if ((stcb == NULL) || (inp == NULL)) {
2024 			return;
2025 		}
2026 		if (net == NULL) {
2027 			return;
2028 		}
2029 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2030 			return;
2031 		}
2032 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2033 		tmr = &net->pmtu_timer;
2034 		break;
2035 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2036 		/* Here we use the RTO of the destination */
2037 		if ((stcb == NULL) || (net == NULL)) {
2038 			return;
2039 		}
2040 		if (net->RTO == 0) {
2041 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2042 		} else {
2043 			to_ticks = MSEC_TO_TICKS(net->RTO);
2044 		}
2045 		tmr = &net->rxt_timer;
2046 		break;
2047 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2048 		/*
2049 		 * Here we use the endpoints shutdown guard timer usually
2050 		 * about 3 minutes.
2051 		 */
2052 		if ((inp == NULL) || (stcb == NULL)) {
2053 			return;
2054 		}
2055 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2056 		tmr = &stcb->asoc.shut_guard_timer;
2057 		break;
2058 	case SCTP_TIMER_TYPE_STRRESET:
2059 		/*
2060 		 * Here the timer comes from the stcb but its value is from
2061 		 * the net's RTO.
2062 		 */
2063 		if ((stcb == NULL) || (net == NULL)) {
2064 			return;
2065 		}
2066 		if (net->RTO == 0) {
2067 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2068 		} else {
2069 			to_ticks = MSEC_TO_TICKS(net->RTO);
2070 		}
2071 		tmr = &stcb->asoc.strreset_timer;
2072 		break;
2073 	case SCTP_TIMER_TYPE_ASCONF:
2074 		/*
2075 		 * Here the timer comes from the stcb but its value is from
2076 		 * the net's RTO.
2077 		 */
2078 		if ((stcb == NULL) || (net == NULL)) {
2079 			return;
2080 		}
2081 		if (net->RTO == 0) {
2082 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2083 		} else {
2084 			to_ticks = MSEC_TO_TICKS(net->RTO);
2085 		}
2086 		tmr = &stcb->asoc.asconf_timer;
2087 		break;
2088 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2089 		if ((stcb == NULL) || (net != NULL)) {
2090 			return;
2091 		}
2092 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2093 		tmr = &stcb->asoc.delete_prim_timer;
2094 		break;
2095 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2096 		if (stcb == NULL) {
2097 			return;
2098 		}
2099 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2100 			/*
2101 			 * Really an error since stcb is NOT set to
2102 			 * autoclose
2103 			 */
2104 			return;
2105 		}
2106 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2107 		tmr = &stcb->asoc.autoclose_timer;
2108 		break;
2109 	default:
2110 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2111 		    __FUNCTION__, t_type);
2112 		return;
2113 		break;
2114 	}
2115 	if ((to_ticks <= 0) || (tmr == NULL)) {
2116 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2117 		    __FUNCTION__, t_type, to_ticks, tmr);
2118 		return;
2119 	}
2120 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2121 		/*
2122 		 * we do NOT allow you to have it already running. if it is
2123 		 * we leave the current one up unchanged
2124 		 */
2125 		return;
2126 	}
2127 	/* At this point we can proceed */
2128 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2129 		stcb->asoc.num_send_timers_up++;
2130 	}
2131 	tmr->stopped_from = 0;
2132 	tmr->type = t_type;
2133 	tmr->ep = (void *)inp;
2134 	tmr->tcb = (void *)stcb;
2135 	tmr->net = (void *)net;
2136 	tmr->self = (void *)tmr;
2137 	tmr->vnet = (void *)curvnet;
2138 	tmr->ticks = sctp_get_tick_count();
2139 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2140 	return;
2141 }
2142 
2143 void
2144 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2145     struct sctp_nets *net, uint32_t from)
2146 {
2147 	struct sctp_timer *tmr;
2148 
2149 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2150 	    (inp == NULL))
2151 		return;
2152 
2153 	tmr = NULL;
2154 	if (stcb) {
2155 		SCTP_TCB_LOCK_ASSERT(stcb);
2156 	}
2157 	switch (t_type) {
2158 	case SCTP_TIMER_TYPE_ZERO_COPY:
2159 		tmr = &inp->sctp_ep.zero_copy_timer;
2160 		break;
2161 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2162 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2163 		break;
2164 	case SCTP_TIMER_TYPE_ADDR_WQ:
2165 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2166 		break;
2167 	case SCTP_TIMER_TYPE_SEND:
2168 		if ((stcb == NULL) || (net == NULL)) {
2169 			return;
2170 		}
2171 		tmr = &net->rxt_timer;
2172 		break;
2173 	case SCTP_TIMER_TYPE_INIT:
2174 		if ((stcb == NULL) || (net == NULL)) {
2175 			return;
2176 		}
2177 		tmr = &net->rxt_timer;
2178 		break;
2179 	case SCTP_TIMER_TYPE_RECV:
2180 		if (stcb == NULL) {
2181 			return;
2182 		}
2183 		tmr = &stcb->asoc.dack_timer;
2184 		break;
2185 	case SCTP_TIMER_TYPE_SHUTDOWN:
2186 		if ((stcb == NULL) || (net == NULL)) {
2187 			return;
2188 		}
2189 		tmr = &net->rxt_timer;
2190 		break;
2191 	case SCTP_TIMER_TYPE_HEARTBEAT:
2192 		if ((stcb == NULL) || (net == NULL)) {
2193 			return;
2194 		}
2195 		tmr = &net->hb_timer;
2196 		break;
2197 	case SCTP_TIMER_TYPE_COOKIE:
2198 		if ((stcb == NULL) || (net == NULL)) {
2199 			return;
2200 		}
2201 		tmr = &net->rxt_timer;
2202 		break;
2203 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2204 		/* nothing needed but the endpoint here */
2205 		tmr = &inp->sctp_ep.signature_change;
2206 		/*
2207 		 * We re-use the newcookie timer for the INP kill timer. We
2208 		 * must assure that we do not kill it by accident.
2209 		 */
2210 		break;
2211 	case SCTP_TIMER_TYPE_ASOCKILL:
2212 		/*
2213 		 * Stop the asoc kill timer.
2214 		 */
2215 		if (stcb == NULL) {
2216 			return;
2217 		}
2218 		tmr = &stcb->asoc.strreset_timer;
2219 		break;
2220 
2221 	case SCTP_TIMER_TYPE_INPKILL:
2222 		/*
2223 		 * The inp is setup to die. We re-use the signature_chage
2224 		 * timer since that has stopped and we are in the GONE
2225 		 * state.
2226 		 */
2227 		tmr = &inp->sctp_ep.signature_change;
2228 		break;
2229 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2230 		if ((stcb == NULL) || (net == NULL)) {
2231 			return;
2232 		}
2233 		tmr = &net->pmtu_timer;
2234 		break;
2235 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2236 		if ((stcb == NULL) || (net == NULL)) {
2237 			return;
2238 		}
2239 		tmr = &net->rxt_timer;
2240 		break;
2241 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2242 		if (stcb == NULL) {
2243 			return;
2244 		}
2245 		tmr = &stcb->asoc.shut_guard_timer;
2246 		break;
2247 	case SCTP_TIMER_TYPE_STRRESET:
2248 		if (stcb == NULL) {
2249 			return;
2250 		}
2251 		tmr = &stcb->asoc.strreset_timer;
2252 		break;
2253 	case SCTP_TIMER_TYPE_ASCONF:
2254 		if (stcb == NULL) {
2255 			return;
2256 		}
2257 		tmr = &stcb->asoc.asconf_timer;
2258 		break;
2259 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2260 		if (stcb == NULL) {
2261 			return;
2262 		}
2263 		tmr = &stcb->asoc.delete_prim_timer;
2264 		break;
2265 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2266 		if (stcb == NULL) {
2267 			return;
2268 		}
2269 		tmr = &stcb->asoc.autoclose_timer;
2270 		break;
2271 	default:
2272 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2273 		    __FUNCTION__, t_type);
2274 		break;
2275 	}
2276 	if (tmr == NULL) {
2277 		return;
2278 	}
2279 	if ((tmr->type != t_type) && tmr->type) {
2280 		/*
2281 		 * Ok we have a timer that is under joint use. Cookie timer
2282 		 * per chance with the SEND timer. We therefore are NOT
2283 		 * running the timer that the caller wants stopped.  So just
2284 		 * return.
2285 		 */
2286 		return;
2287 	}
2288 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2289 		stcb->asoc.num_send_timers_up--;
2290 		if (stcb->asoc.num_send_timers_up < 0) {
2291 			stcb->asoc.num_send_timers_up = 0;
2292 		}
2293 	}
2294 	tmr->self = NULL;
2295 	tmr->stopped_from = from;
2296 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2297 	return;
2298 }
2299 
2300 uint32_t
2301 sctp_calculate_len(struct mbuf *m)
2302 {
2303 	uint32_t tlen = 0;
2304 	struct mbuf *at;
2305 
2306 	at = m;
2307 	while (at) {
2308 		tlen += SCTP_BUF_LEN(at);
2309 		at = SCTP_BUF_NEXT(at);
2310 	}
2311 	return (tlen);
2312 }
2313 
2314 void
2315 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2316     struct sctp_association *asoc, uint32_t mtu)
2317 {
2318 	/*
2319 	 * Reset the P-MTU size on this association, this involves changing
2320 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2321 	 * allow the DF flag to be cleared.
2322 	 */
2323 	struct sctp_tmit_chunk *chk;
2324 	unsigned int eff_mtu, ovh;
2325 
2326 	asoc->smallest_mtu = mtu;
2327 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2328 		ovh = SCTP_MIN_OVERHEAD;
2329 	} else {
2330 		ovh = SCTP_MIN_V4_OVERHEAD;
2331 	}
2332 	eff_mtu = mtu - ovh;
2333 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2334 		if (chk->send_size > eff_mtu) {
2335 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2336 		}
2337 	}
2338 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2339 		if (chk->send_size > eff_mtu) {
2340 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2341 		}
2342 	}
2343 }
2344 
2345 
2346 /*
2347  * given an association and starting time of the current RTT period return
2348  * RTO in number of msecs net should point to the current network
2349  */
2350 
2351 uint32_t
2352 sctp_calculate_rto(struct sctp_tcb *stcb,
2353     struct sctp_association *asoc,
2354     struct sctp_nets *net,
2355     struct timeval *told,
2356     int safe, int rtt_from_sack)
2357 {
2358 	/*-
2359 	 * given an association and the starting time of the current RTT
2360 	 * period (in value1/value2) return RTO in number of msecs.
2361 	 */
2362 	int32_t rtt;		/* RTT in ms */
2363 	uint32_t new_rto;
2364 	int first_measure = 0;
2365 	struct timeval now, then, *old;
2366 
2367 	/* Copy it out for sparc64 */
2368 	if (safe == sctp_align_unsafe_makecopy) {
2369 		old = &then;
2370 		memcpy(&then, told, sizeof(struct timeval));
2371 	} else if (safe == sctp_align_safe_nocopy) {
2372 		old = told;
2373 	} else {
2374 		/* error */
2375 		SCTP_PRINTF("Huh, bad rto calc call\n");
2376 		return (0);
2377 	}
2378 	/************************/
2379 	/* 1. calculate new RTT */
2380 	/************************/
2381 	/* get the current time */
2382 	if (stcb->asoc.use_precise_time) {
2383 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2384 	} else {
2385 		(void)SCTP_GETTIME_TIMEVAL(&now);
2386 	}
2387 	timevalsub(&now, old);
2388 	/* store the current RTT in us */
2389 	net->rtt = (uint64_t) 10000000 *(uint64_t) now.tv_sec +
2390 	         (uint64_t) now.tv_usec;
2391 
2392 	/* computer rtt in ms */
2393 	rtt = net->rtt / 1000;
2394 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2395 		/*
2396 		 * Tell the CC module that a new update has just occurred
2397 		 * from a sack
2398 		 */
2399 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2400 	}
2401 	/*
2402 	 * Do we need to determine the lan? We do this only on sacks i.e.
2403 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2404 	 */
2405 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2406 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2407 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2408 			net->lan_type = SCTP_LAN_INTERNET;
2409 		} else {
2410 			net->lan_type = SCTP_LAN_LOCAL;
2411 		}
2412 	}
2413 	/***************************/
2414 	/* 2. update RTTVAR & SRTT */
2415 	/***************************/
2416 	/*-
2417 	 * Compute the scaled average lastsa and the
2418 	 * scaled variance lastsv as described in van Jacobson
2419 	 * Paper "Congestion Avoidance and Control", Annex A.
2420 	 *
2421 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2422 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2423 	 */
2424 	if (net->RTO_measured) {
2425 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2426 		net->lastsa += rtt;
2427 		if (rtt < 0) {
2428 			rtt = -rtt;
2429 		}
2430 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2431 		net->lastsv += rtt;
2432 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2433 			rto_logging(net, SCTP_LOG_RTTVAR);
2434 		}
2435 	} else {
2436 		/* First RTO measurment */
2437 		net->RTO_measured = 1;
2438 		first_measure = 1;
2439 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2440 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2441 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2442 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2443 		}
2444 	}
2445 	if (net->lastsv == 0) {
2446 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2447 	}
2448 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2449 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2450 	    (stcb->asoc.sat_network_lockout == 0)) {
2451 		stcb->asoc.sat_network = 1;
2452 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2453 		stcb->asoc.sat_network = 0;
2454 		stcb->asoc.sat_network_lockout = 1;
2455 	}
2456 	/* bound it, per C6/C7 in Section 5.3.1 */
2457 	if (new_rto < stcb->asoc.minrto) {
2458 		new_rto = stcb->asoc.minrto;
2459 	}
2460 	if (new_rto > stcb->asoc.maxrto) {
2461 		new_rto = stcb->asoc.maxrto;
2462 	}
2463 	/* we are now returning the RTO */
2464 	return (new_rto);
2465 }
2466 
2467 /*
2468  * return a pointer to a contiguous piece of data from the given mbuf chain
2469  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2470  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2471  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2472  */
2473 caddr_t
2474 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2475 {
2476 	uint32_t count;
2477 	uint8_t *ptr;
2478 
2479 	ptr = in_ptr;
2480 	if ((off < 0) || (len <= 0))
2481 		return (NULL);
2482 
2483 	/* find the desired start location */
2484 	while ((m != NULL) && (off > 0)) {
2485 		if (off < SCTP_BUF_LEN(m))
2486 			break;
2487 		off -= SCTP_BUF_LEN(m);
2488 		m = SCTP_BUF_NEXT(m);
2489 	}
2490 	if (m == NULL)
2491 		return (NULL);
2492 
2493 	/* is the current mbuf large enough (eg. contiguous)? */
2494 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2495 		return (mtod(m, caddr_t)+off);
2496 	} else {
2497 		/* else, it spans more than one mbuf, so save a temp copy... */
2498 		while ((m != NULL) && (len > 0)) {
2499 			count = min(SCTP_BUF_LEN(m) - off, len);
2500 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2501 			len -= count;
2502 			ptr += count;
2503 			off = 0;
2504 			m = SCTP_BUF_NEXT(m);
2505 		}
2506 		if ((m == NULL) && (len > 0))
2507 			return (NULL);
2508 		else
2509 			return ((caddr_t)in_ptr);
2510 	}
2511 }
2512 
2513 
2514 
2515 struct sctp_paramhdr *
2516 sctp_get_next_param(struct mbuf *m,
2517     int offset,
2518     struct sctp_paramhdr *pull,
2519     int pull_limit)
2520 {
2521 	/* This just provides a typed signature to Peter's Pull routine */
2522 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2523 	    (uint8_t *) pull));
2524 }
2525 
2526 
2527 int
2528 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2529 {
2530 	/*
2531 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2532 	 * padlen is > 3 this routine will fail.
2533 	 */
2534 	uint8_t *dp;
2535 	int i;
2536 
2537 	if (padlen > 3) {
2538 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2539 		return (ENOBUFS);
2540 	}
2541 	if (padlen <= M_TRAILINGSPACE(m)) {
2542 		/*
2543 		 * The easy way. We hope the majority of the time we hit
2544 		 * here :)
2545 		 */
2546 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2547 		SCTP_BUF_LEN(m) += padlen;
2548 	} else {
2549 		/* Hard way we must grow the mbuf */
2550 		struct mbuf *tmp;
2551 
2552 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2553 		if (tmp == NULL) {
2554 			/* Out of space GAK! we are in big trouble. */
2555 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2556 			return (ENOBUFS);
2557 		}
2558 		/* setup and insert in middle */
2559 		SCTP_BUF_LEN(tmp) = padlen;
2560 		SCTP_BUF_NEXT(tmp) = NULL;
2561 		SCTP_BUF_NEXT(m) = tmp;
2562 		dp = mtod(tmp, uint8_t *);
2563 	}
2564 	/* zero out the pad */
2565 	for (i = 0; i < padlen; i++) {
2566 		*dp = 0;
2567 		dp++;
2568 	}
2569 	return (0);
2570 }
2571 
2572 int
2573 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2574 {
2575 	/* find the last mbuf in chain and pad it */
2576 	struct mbuf *m_at;
2577 
2578 	m_at = m;
2579 	if (last_mbuf) {
2580 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2581 	} else {
2582 		while (m_at) {
2583 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2584 				return (sctp_add_pad_tombuf(m_at, padval));
2585 			}
2586 			m_at = SCTP_BUF_NEXT(m_at);
2587 		}
2588 	}
2589 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2590 	return (EFAULT);
2591 }
2592 
2593 static void
2594 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2595     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2596 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2597     SCTP_UNUSED
2598 #endif
2599 )
2600 {
2601 	struct mbuf *m_notify;
2602 	struct sctp_assoc_change *sac;
2603 	struct sctp_queued_to_read *control;
2604 	size_t notif_len, abort_len;
2605 	unsigned int i;
2606 
2607 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2608 	struct socket *so;
2609 
2610 #endif
2611 
2612 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2613 		notif_len = sizeof(struct sctp_assoc_change);
2614 		if (abort != NULL) {
2615 			abort_len = htons(abort->ch.chunk_length);
2616 		} else {
2617 			abort_len = 0;
2618 		}
2619 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2620 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2621 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2622 			notif_len += abort_len;
2623 		}
2624 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
2625 		if (m_notify == NULL) {
2626 			/* Retry with smaller value. */
2627 			notif_len = sizeof(struct sctp_assoc_change);
2628 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
2629 			if (m_notify == NULL) {
2630 				goto set_error;
2631 			}
2632 		}
2633 		SCTP_BUF_NEXT(m_notify) = NULL;
2634 		sac = mtod(m_notify, struct sctp_assoc_change *);
2635 		sac->sac_type = SCTP_ASSOC_CHANGE;
2636 		sac->sac_flags = 0;
2637 		sac->sac_length = sizeof(struct sctp_assoc_change);
2638 		sac->sac_state = state;
2639 		sac->sac_error = error;
2640 		/* XXX verify these stream counts */
2641 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2642 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2643 		sac->sac_assoc_id = sctp_get_associd(stcb);
2644 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2645 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2646 				i = 0;
2647 				if (stcb->asoc.peer_supports_prsctp) {
2648 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2649 				}
2650 				if (stcb->asoc.peer_supports_auth) {
2651 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2652 				}
2653 				if (stcb->asoc.peer_supports_asconf) {
2654 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2655 				}
2656 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2657 				if (stcb->asoc.peer_supports_strreset) {
2658 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2659 				}
2660 				sac->sac_length += i;
2661 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2662 				memcpy(sac->sac_info, abort, abort_len);
2663 				sac->sac_length += abort_len;
2664 			}
2665 		}
2666 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2667 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2668 		    0, 0, stcb->asoc.context, 0, 0, 0,
2669 		    m_notify);
2670 		if (control != NULL) {
2671 			control->length = SCTP_BUF_LEN(m_notify);
2672 			/* not that we need this */
2673 			control->tail_mbuf = m_notify;
2674 			control->spec_flags = M_NOTIFICATION;
2675 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2676 			    control,
2677 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2678 			    so_locked);
2679 		} else {
2680 			sctp_m_freem(m_notify);
2681 		}
2682 	}
2683 	/*
2684 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2685 	 * comes in.
2686 	 */
2687 set_error:
2688 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2689 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2690 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2691 		if (from_peer) {
2692 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2693 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2694 				stcb->sctp_socket->so_error = ECONNREFUSED;
2695 			} else {
2696 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2697 				stcb->sctp_socket->so_error = ECONNRESET;
2698 			}
2699 		} else {
2700 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2701 			stcb->sctp_socket->so_error = ECONNABORTED;
2702 		}
2703 	}
2704 	/* Wake ANY sleepers */
2705 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2706 	so = SCTP_INP_SO(stcb->sctp_ep);
2707 	if (!so_locked) {
2708 		atomic_add_int(&stcb->asoc.refcnt, 1);
2709 		SCTP_TCB_UNLOCK(stcb);
2710 		SCTP_SOCKET_LOCK(so, 1);
2711 		SCTP_TCB_LOCK(stcb);
2712 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2713 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2714 			SCTP_SOCKET_UNLOCK(so, 1);
2715 			return;
2716 		}
2717 	}
2718 #endif
2719 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2720 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2721 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2722 		socantrcvmore(stcb->sctp_socket);
2723 	}
2724 	sorwakeup(stcb->sctp_socket);
2725 	sowwakeup(stcb->sctp_socket);
2726 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2727 	if (!so_locked) {
2728 		SCTP_SOCKET_UNLOCK(so, 1);
2729 	}
2730 #endif
2731 }
2732 
2733 static void
2734 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2735     struct sockaddr *sa, uint32_t error)
2736 {
2737 	struct mbuf *m_notify;
2738 	struct sctp_paddr_change *spc;
2739 	struct sctp_queued_to_read *control;
2740 
2741 	if ((stcb == NULL) ||
2742 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2743 		/* event not enabled */
2744 		return;
2745 	}
2746 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2747 	if (m_notify == NULL)
2748 		return;
2749 	SCTP_BUF_LEN(m_notify) = 0;
2750 	spc = mtod(m_notify, struct sctp_paddr_change *);
2751 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2752 	spc->spc_flags = 0;
2753 	spc->spc_length = sizeof(struct sctp_paddr_change);
2754 	switch (sa->sa_family) {
2755 #ifdef INET
2756 	case AF_INET:
2757 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2758 		break;
2759 #endif
2760 #ifdef INET6
2761 	case AF_INET6:
2762 		{
2763 			struct sockaddr_in6 *sin6;
2764 
2765 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2766 
2767 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2768 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2769 				if (sin6->sin6_scope_id == 0) {
2770 					/* recover scope_id for user */
2771 					(void)sa6_recoverscope(sin6);
2772 				} else {
2773 					/* clear embedded scope_id for user */
2774 					in6_clearscope(&sin6->sin6_addr);
2775 				}
2776 			}
2777 			break;
2778 		}
2779 #endif
2780 	default:
2781 		/* TSNH */
2782 		break;
2783 	}
2784 	spc->spc_state = state;
2785 	spc->spc_error = error;
2786 	spc->spc_assoc_id = sctp_get_associd(stcb);
2787 
2788 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2789 	SCTP_BUF_NEXT(m_notify) = NULL;
2790 
2791 	/* append to socket */
2792 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2793 	    0, 0, stcb->asoc.context, 0, 0, 0,
2794 	    m_notify);
2795 	if (control == NULL) {
2796 		/* no memory */
2797 		sctp_m_freem(m_notify);
2798 		return;
2799 	}
2800 	control->length = SCTP_BUF_LEN(m_notify);
2801 	control->spec_flags = M_NOTIFICATION;
2802 	/* not that we need this */
2803 	control->tail_mbuf = m_notify;
2804 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2805 	    control,
2806 	    &stcb->sctp_socket->so_rcv, 1,
2807 	    SCTP_READ_LOCK_NOT_HELD,
2808 	    SCTP_SO_NOT_LOCKED);
2809 }
2810 
2811 
2812 static void
2813 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2814     struct sctp_tmit_chunk *chk, int so_locked
2815 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2816     SCTP_UNUSED
2817 #endif
2818 )
2819 {
2820 	struct mbuf *m_notify;
2821 	struct sctp_send_failed *ssf;
2822 	struct sctp_send_failed_event *ssfe;
2823 	struct sctp_queued_to_read *control;
2824 	int length;
2825 
2826 	if ((stcb == NULL) ||
2827 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2828 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2829 		/* event not enabled */
2830 		return;
2831 	}
2832 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2833 		length = sizeof(struct sctp_send_failed_event);
2834 	} else {
2835 		length = sizeof(struct sctp_send_failed);
2836 	}
2837 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA);
2838 	if (m_notify == NULL)
2839 		/* no space left */
2840 		return;
2841 	length += chk->send_size;
2842 	length -= sizeof(struct sctp_data_chunk);
2843 	SCTP_BUF_LEN(m_notify) = 0;
2844 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2845 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2846 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2847 		if (sent) {
2848 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2849 		} else {
2850 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2851 		}
2852 		ssfe->ssfe_length = length;
2853 		ssfe->ssfe_error = error;
2854 		/* not exactly what the user sent in, but should be close :) */
2855 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2856 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2857 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2858 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2859 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2860 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2861 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2862 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2863 	} else {
2864 		ssf = mtod(m_notify, struct sctp_send_failed *);
2865 		ssf->ssf_type = SCTP_SEND_FAILED;
2866 		if (sent) {
2867 			ssf->ssf_flags = SCTP_DATA_SENT;
2868 		} else {
2869 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2870 		}
2871 		ssf->ssf_length = length;
2872 		ssf->ssf_error = error;
2873 		/* not exactly what the user sent in, but should be close :) */
2874 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2875 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2876 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2877 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2878 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2879 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2880 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2881 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2882 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2883 	}
2884 	if (chk->data) {
2885 		/*
2886 		 * trim off the sctp chunk header(it should be there)
2887 		 */
2888 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2889 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2890 			sctp_mbuf_crush(chk->data);
2891 			chk->send_size -= sizeof(struct sctp_data_chunk);
2892 		}
2893 	}
2894 	SCTP_BUF_NEXT(m_notify) = chk->data;
2895 	/* Steal off the mbuf */
2896 	chk->data = NULL;
2897 	/*
2898 	 * For this case, we check the actual socket buffer, since the assoc
2899 	 * is going away we don't want to overfill the socket buffer for a
2900 	 * non-reader
2901 	 */
2902 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2903 		sctp_m_freem(m_notify);
2904 		return;
2905 	}
2906 	/* append to socket */
2907 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2908 	    0, 0, stcb->asoc.context, 0, 0, 0,
2909 	    m_notify);
2910 	if (control == NULL) {
2911 		/* no memory */
2912 		sctp_m_freem(m_notify);
2913 		return;
2914 	}
2915 	control->spec_flags = M_NOTIFICATION;
2916 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2917 	    control,
2918 	    &stcb->sctp_socket->so_rcv, 1,
2919 	    SCTP_READ_LOCK_NOT_HELD,
2920 	    so_locked);
2921 }
2922 
2923 
2924 static void
2925 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2926     struct sctp_stream_queue_pending *sp, int so_locked
2927 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2928     SCTP_UNUSED
2929 #endif
2930 )
2931 {
2932 	struct mbuf *m_notify;
2933 	struct sctp_send_failed *ssf;
2934 	struct sctp_send_failed_event *ssfe;
2935 	struct sctp_queued_to_read *control;
2936 	int length;
2937 
2938 	if ((stcb == NULL) ||
2939 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2940 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2941 		/* event not enabled */
2942 		return;
2943 	}
2944 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2945 		length = sizeof(struct sctp_send_failed_event);
2946 	} else {
2947 		length = sizeof(struct sctp_send_failed);
2948 	}
2949 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA);
2950 	if (m_notify == NULL) {
2951 		/* no space left */
2952 		return;
2953 	}
2954 	length += sp->length;
2955 	SCTP_BUF_LEN(m_notify) = 0;
2956 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2957 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2958 		ssfe->ssfe_type = SCTP_SEND_FAILED;
2959 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2960 		ssfe->ssfe_length = length;
2961 		ssfe->ssfe_error = error;
2962 		/* not exactly what the user sent in, but should be close :) */
2963 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2964 		ssfe->ssfe_info.snd_sid = sp->stream;
2965 		if (sp->some_taken) {
2966 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2967 		} else {
2968 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2969 		}
2970 		ssfe->ssfe_info.snd_ppid = sp->ppid;
2971 		ssfe->ssfe_info.snd_context = sp->context;
2972 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2973 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2974 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2975 	} else {
2976 		ssf = mtod(m_notify, struct sctp_send_failed *);
2977 		ssf->ssf_type = SCTP_SEND_FAILED;
2978 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2979 		ssf->ssf_length = length;
2980 		ssf->ssf_error = error;
2981 		/* not exactly what the user sent in, but should be close :) */
2982 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2983 		ssf->ssf_info.sinfo_stream = sp->stream;
2984 		ssf->ssf_info.sinfo_ssn = sp->strseq;
2985 		if (sp->some_taken) {
2986 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2987 		} else {
2988 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2989 		}
2990 		ssf->ssf_info.sinfo_ppid = sp->ppid;
2991 		ssf->ssf_info.sinfo_context = sp->context;
2992 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2993 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2994 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2995 	}
2996 	SCTP_BUF_NEXT(m_notify) = sp->data;
2997 
2998 	/* Steal off the mbuf */
2999 	sp->data = NULL;
3000 	/*
3001 	 * For this case, we check the actual socket buffer, since the assoc
3002 	 * is going away we don't want to overfill the socket buffer for a
3003 	 * non-reader
3004 	 */
3005 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3006 		sctp_m_freem(m_notify);
3007 		return;
3008 	}
3009 	/* append to socket */
3010 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3011 	    0, 0, stcb->asoc.context, 0, 0, 0,
3012 	    m_notify);
3013 	if (control == NULL) {
3014 		/* no memory */
3015 		sctp_m_freem(m_notify);
3016 		return;
3017 	}
3018 	control->spec_flags = M_NOTIFICATION;
3019 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3020 	    control,
3021 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3022 }
3023 
3024 
3025 
3026 static void
3027 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3028 {
3029 	struct mbuf *m_notify;
3030 	struct sctp_adaptation_event *sai;
3031 	struct sctp_queued_to_read *control;
3032 
3033 	if ((stcb == NULL) ||
3034 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3035 		/* event not enabled */
3036 		return;
3037 	}
3038 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3039 	if (m_notify == NULL)
3040 		/* no space left */
3041 		return;
3042 	SCTP_BUF_LEN(m_notify) = 0;
3043 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3044 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3045 	sai->sai_flags = 0;
3046 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3047 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3048 	sai->sai_assoc_id = sctp_get_associd(stcb);
3049 
3050 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3051 	SCTP_BUF_NEXT(m_notify) = NULL;
3052 
3053 	/* append to socket */
3054 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3055 	    0, 0, stcb->asoc.context, 0, 0, 0,
3056 	    m_notify);
3057 	if (control == NULL) {
3058 		/* no memory */
3059 		sctp_m_freem(m_notify);
3060 		return;
3061 	}
3062 	control->length = SCTP_BUF_LEN(m_notify);
3063 	control->spec_flags = M_NOTIFICATION;
3064 	/* not that we need this */
3065 	control->tail_mbuf = m_notify;
3066 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3067 	    control,
3068 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3069 }
3070 
3071 /* This always must be called with the read-queue LOCKED in the INP */
3072 static void
3073 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3074     uint32_t val, int so_locked
3075 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3076     SCTP_UNUSED
3077 #endif
3078 )
3079 {
3080 	struct mbuf *m_notify;
3081 	struct sctp_pdapi_event *pdapi;
3082 	struct sctp_queued_to_read *control;
3083 	struct sockbuf *sb;
3084 
3085 	if ((stcb == NULL) ||
3086 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3087 		/* event not enabled */
3088 		return;
3089 	}
3090 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3091 		return;
3092 	}
3093 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3094 	if (m_notify == NULL)
3095 		/* no space left */
3096 		return;
3097 	SCTP_BUF_LEN(m_notify) = 0;
3098 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3099 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3100 	pdapi->pdapi_flags = 0;
3101 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3102 	pdapi->pdapi_indication = error;
3103 	pdapi->pdapi_stream = (val >> 16);
3104 	pdapi->pdapi_seq = (val & 0x0000ffff);
3105 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3106 
3107 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3108 	SCTP_BUF_NEXT(m_notify) = NULL;
3109 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3110 	    0, 0, stcb->asoc.context, 0, 0, 0,
3111 	    m_notify);
3112 	if (control == NULL) {
3113 		/* no memory */
3114 		sctp_m_freem(m_notify);
3115 		return;
3116 	}
3117 	control->spec_flags = M_NOTIFICATION;
3118 	control->length = SCTP_BUF_LEN(m_notify);
3119 	/* not that we need this */
3120 	control->tail_mbuf = m_notify;
3121 	control->held_length = 0;
3122 	control->length = 0;
3123 	sb = &stcb->sctp_socket->so_rcv;
3124 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3125 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3126 	}
3127 	sctp_sballoc(stcb, sb, m_notify);
3128 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3129 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3130 	}
3131 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3132 	control->end_added = 1;
3133 	if (stcb->asoc.control_pdapi)
3134 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3135 	else {
3136 		/* we really should not see this case */
3137 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3138 	}
3139 	if (stcb->sctp_ep && stcb->sctp_socket) {
3140 		/* This should always be the case */
3141 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3142 		struct socket *so;
3143 
3144 		so = SCTP_INP_SO(stcb->sctp_ep);
3145 		if (!so_locked) {
3146 			atomic_add_int(&stcb->asoc.refcnt, 1);
3147 			SCTP_TCB_UNLOCK(stcb);
3148 			SCTP_SOCKET_LOCK(so, 1);
3149 			SCTP_TCB_LOCK(stcb);
3150 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3151 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3152 				SCTP_SOCKET_UNLOCK(so, 1);
3153 				return;
3154 			}
3155 		}
3156 #endif
3157 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3158 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3159 		if (!so_locked) {
3160 			SCTP_SOCKET_UNLOCK(so, 1);
3161 		}
3162 #endif
3163 	}
3164 }
3165 
3166 static void
3167 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3168 {
3169 	struct mbuf *m_notify;
3170 	struct sctp_shutdown_event *sse;
3171 	struct sctp_queued_to_read *control;
3172 
3173 	/*
3174 	 * For TCP model AND UDP connected sockets we will send an error up
3175 	 * when an SHUTDOWN completes
3176 	 */
3177 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3178 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3179 		/* mark socket closed for read/write and wakeup! */
3180 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3181 		struct socket *so;
3182 
3183 		so = SCTP_INP_SO(stcb->sctp_ep);
3184 		atomic_add_int(&stcb->asoc.refcnt, 1);
3185 		SCTP_TCB_UNLOCK(stcb);
3186 		SCTP_SOCKET_LOCK(so, 1);
3187 		SCTP_TCB_LOCK(stcb);
3188 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3189 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3190 			SCTP_SOCKET_UNLOCK(so, 1);
3191 			return;
3192 		}
3193 #endif
3194 		socantsendmore(stcb->sctp_socket);
3195 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3196 		SCTP_SOCKET_UNLOCK(so, 1);
3197 #endif
3198 	}
3199 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3200 		/* event not enabled */
3201 		return;
3202 	}
3203 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3204 	if (m_notify == NULL)
3205 		/* no space left */
3206 		return;
3207 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3208 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3209 	sse->sse_flags = 0;
3210 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3211 	sse->sse_assoc_id = sctp_get_associd(stcb);
3212 
3213 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3214 	SCTP_BUF_NEXT(m_notify) = NULL;
3215 
3216 	/* append to socket */
3217 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3218 	    0, 0, stcb->asoc.context, 0, 0, 0,
3219 	    m_notify);
3220 	if (control == NULL) {
3221 		/* no memory */
3222 		sctp_m_freem(m_notify);
3223 		return;
3224 	}
3225 	control->spec_flags = M_NOTIFICATION;
3226 	control->length = SCTP_BUF_LEN(m_notify);
3227 	/* not that we need this */
3228 	control->tail_mbuf = m_notify;
3229 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3230 	    control,
3231 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3232 }
3233 
3234 static void
3235 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3236     int so_locked
3237 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3238     SCTP_UNUSED
3239 #endif
3240 )
3241 {
3242 	struct mbuf *m_notify;
3243 	struct sctp_sender_dry_event *event;
3244 	struct sctp_queued_to_read *control;
3245 
3246 	if ((stcb == NULL) ||
3247 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3248 		/* event not enabled */
3249 		return;
3250 	}
3251 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3252 	if (m_notify == NULL) {
3253 		/* no space left */
3254 		return;
3255 	}
3256 	SCTP_BUF_LEN(m_notify) = 0;
3257 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3258 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3259 	event->sender_dry_flags = 0;
3260 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3261 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3262 
3263 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3264 	SCTP_BUF_NEXT(m_notify) = NULL;
3265 
3266 	/* append to socket */
3267 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3268 	    0, 0, stcb->asoc.context, 0, 0, 0,
3269 	    m_notify);
3270 	if (control == NULL) {
3271 		/* no memory */
3272 		sctp_m_freem(m_notify);
3273 		return;
3274 	}
3275 	control->length = SCTP_BUF_LEN(m_notify);
3276 	control->spec_flags = M_NOTIFICATION;
3277 	/* not that we need this */
3278 	control->tail_mbuf = m_notify;
3279 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3280 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3281 }
3282 
3283 
3284 void
3285 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3286 {
3287 	struct mbuf *m_notify;
3288 	struct sctp_queued_to_read *control;
3289 	struct sctp_stream_change_event *stradd;
3290 	int len;
3291 
3292 	if ((stcb == NULL) ||
3293 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3294 		/* event not enabled */
3295 		return;
3296 	}
3297 	if ((stcb->asoc.peer_req_out) && flag) {
3298 		/* Peer made the request, don't tell the local user */
3299 		stcb->asoc.peer_req_out = 0;
3300 		return;
3301 	}
3302 	stcb->asoc.peer_req_out = 0;
3303 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3304 	if (m_notify == NULL)
3305 		/* no space left */
3306 		return;
3307 	SCTP_BUF_LEN(m_notify) = 0;
3308 	len = sizeof(struct sctp_stream_change_event);
3309 	if (len > M_TRAILINGSPACE(m_notify)) {
3310 		/* never enough room */
3311 		sctp_m_freem(m_notify);
3312 		return;
3313 	}
3314 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3315 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3316 	stradd->strchange_flags = flag;
3317 	stradd->strchange_length = len;
3318 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3319 	stradd->strchange_instrms = numberin;
3320 	stradd->strchange_outstrms = numberout;
3321 	SCTP_BUF_LEN(m_notify) = len;
3322 	SCTP_BUF_NEXT(m_notify) = NULL;
3323 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3324 		/* no space */
3325 		sctp_m_freem(m_notify);
3326 		return;
3327 	}
3328 	/* append to socket */
3329 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3330 	    0, 0, stcb->asoc.context, 0, 0, 0,
3331 	    m_notify);
3332 	if (control == NULL) {
3333 		/* no memory */
3334 		sctp_m_freem(m_notify);
3335 		return;
3336 	}
3337 	control->spec_flags = M_NOTIFICATION;
3338 	control->length = SCTP_BUF_LEN(m_notify);
3339 	/* not that we need this */
3340 	control->tail_mbuf = m_notify;
3341 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3342 	    control,
3343 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3344 }
3345 
3346 void
3347 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3348 {
3349 	struct mbuf *m_notify;
3350 	struct sctp_queued_to_read *control;
3351 	struct sctp_assoc_reset_event *strasoc;
3352 	int len;
3353 
3354 	if ((stcb == NULL) ||
3355 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3356 		/* event not enabled */
3357 		return;
3358 	}
3359 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3360 	if (m_notify == NULL)
3361 		/* no space left */
3362 		return;
3363 	SCTP_BUF_LEN(m_notify) = 0;
3364 	len = sizeof(struct sctp_assoc_reset_event);
3365 	if (len > M_TRAILINGSPACE(m_notify)) {
3366 		/* never enough room */
3367 		sctp_m_freem(m_notify);
3368 		return;
3369 	}
3370 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3371 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3372 	strasoc->assocreset_flags = flag;
3373 	strasoc->assocreset_length = len;
3374 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3375 	strasoc->assocreset_local_tsn = sending_tsn;
3376 	strasoc->assocreset_remote_tsn = recv_tsn;
3377 	SCTP_BUF_LEN(m_notify) = len;
3378 	SCTP_BUF_NEXT(m_notify) = NULL;
3379 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3380 		/* no space */
3381 		sctp_m_freem(m_notify);
3382 		return;
3383 	}
3384 	/* append to socket */
3385 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3386 	    0, 0, stcb->asoc.context, 0, 0, 0,
3387 	    m_notify);
3388 	if (control == NULL) {
3389 		/* no memory */
3390 		sctp_m_freem(m_notify);
3391 		return;
3392 	}
3393 	control->spec_flags = M_NOTIFICATION;
3394 	control->length = SCTP_BUF_LEN(m_notify);
3395 	/* not that we need this */
3396 	control->tail_mbuf = m_notify;
3397 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3398 	    control,
3399 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3400 }
3401 
3402 
3403 
3404 static void
3405 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3406     int number_entries, uint16_t * list, int flag)
3407 {
3408 	struct mbuf *m_notify;
3409 	struct sctp_queued_to_read *control;
3410 	struct sctp_stream_reset_event *strreset;
3411 	int len;
3412 
3413 	if ((stcb == NULL) ||
3414 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3415 		/* event not enabled */
3416 		return;
3417 	}
3418 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3419 	if (m_notify == NULL)
3420 		/* no space left */
3421 		return;
3422 	SCTP_BUF_LEN(m_notify) = 0;
3423 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3424 	if (len > M_TRAILINGSPACE(m_notify)) {
3425 		/* never enough room */
3426 		sctp_m_freem(m_notify);
3427 		return;
3428 	}
3429 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3430 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3431 	strreset->strreset_flags = flag;
3432 	strreset->strreset_length = len;
3433 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3434 	if (number_entries) {
3435 		int i;
3436 
3437 		for (i = 0; i < number_entries; i++) {
3438 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3439 		}
3440 	}
3441 	SCTP_BUF_LEN(m_notify) = len;
3442 	SCTP_BUF_NEXT(m_notify) = NULL;
3443 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3444 		/* no space */
3445 		sctp_m_freem(m_notify);
3446 		return;
3447 	}
3448 	/* append to socket */
3449 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3450 	    0, 0, stcb->asoc.context, 0, 0, 0,
3451 	    m_notify);
3452 	if (control == NULL) {
3453 		/* no memory */
3454 		sctp_m_freem(m_notify);
3455 		return;
3456 	}
3457 	control->spec_flags = M_NOTIFICATION;
3458 	control->length = SCTP_BUF_LEN(m_notify);
3459 	/* not that we need this */
3460 	control->tail_mbuf = m_notify;
3461 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3462 	    control,
3463 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3464 }
3465 
3466 
3467 static void
3468 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3469 {
3470 	struct mbuf *m_notify;
3471 	struct sctp_remote_error *sre;
3472 	struct sctp_queued_to_read *control;
3473 	size_t notif_len, chunk_len;
3474 
3475 	if ((stcb == NULL) ||
3476 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3477 		return;
3478 	}
3479 	if (chunk != NULL) {
3480 		chunk_len = htons(chunk->ch.chunk_length);
3481 	} else {
3482 		chunk_len = 0;
3483 	}
3484 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3485 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
3486 	if (m_notify == NULL) {
3487 		/* Retry with smaller value. */
3488 		notif_len = sizeof(struct sctp_remote_error);
3489 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
3490 		if (m_notify == NULL) {
3491 			return;
3492 		}
3493 	}
3494 	SCTP_BUF_NEXT(m_notify) = NULL;
3495 	sre = mtod(m_notify, struct sctp_remote_error *);
3496 	sre->sre_type = SCTP_REMOTE_ERROR;
3497 	sre->sre_flags = 0;
3498 	sre->sre_length = sizeof(struct sctp_remote_error);
3499 	sre->sre_error = error;
3500 	sre->sre_assoc_id = sctp_get_associd(stcb);
3501 	if (notif_len > sizeof(struct sctp_remote_error)) {
3502 		memcpy(sre->sre_data, chunk, chunk_len);
3503 		sre->sre_length += chunk_len;
3504 	}
3505 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3506 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3507 	    0, 0, stcb->asoc.context, 0, 0, 0,
3508 	    m_notify);
3509 	if (control != NULL) {
3510 		control->length = SCTP_BUF_LEN(m_notify);
3511 		/* not that we need this */
3512 		control->tail_mbuf = m_notify;
3513 		control->spec_flags = M_NOTIFICATION;
3514 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3515 		    control,
3516 		    &stcb->sctp_socket->so_rcv, 1,
3517 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3518 	} else {
3519 		sctp_m_freem(m_notify);
3520 	}
3521 }
3522 
3523 
3524 void
3525 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3526     uint32_t error, void *data, int so_locked
3527 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3528     SCTP_UNUSED
3529 #endif
3530 )
3531 {
3532 	if ((stcb == NULL) ||
3533 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3534 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3535 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3536 		/* If the socket is gone we are out of here */
3537 		return;
3538 	}
3539 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3540 		return;
3541 	}
3542 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3543 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3544 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3545 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3546 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3547 			/* Don't report these in front states */
3548 			return;
3549 		}
3550 	}
3551 	switch (notification) {
3552 	case SCTP_NOTIFY_ASSOC_UP:
3553 		if (stcb->asoc.assoc_up_sent == 0) {
3554 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3555 			stcb->asoc.assoc_up_sent = 1;
3556 		}
3557 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3558 			sctp_notify_adaptation_layer(stcb);
3559 		}
3560 		if (stcb->asoc.peer_supports_auth == 0) {
3561 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3562 			    NULL, so_locked);
3563 		}
3564 		break;
3565 	case SCTP_NOTIFY_ASSOC_DOWN:
3566 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3567 		break;
3568 	case SCTP_NOTIFY_INTERFACE_DOWN:
3569 		{
3570 			struct sctp_nets *net;
3571 
3572 			net = (struct sctp_nets *)data;
3573 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3574 			    (struct sockaddr *)&net->ro._l_addr, error);
3575 			break;
3576 		}
3577 	case SCTP_NOTIFY_INTERFACE_UP:
3578 		{
3579 			struct sctp_nets *net;
3580 
3581 			net = (struct sctp_nets *)data;
3582 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3583 			    (struct sockaddr *)&net->ro._l_addr, error);
3584 			break;
3585 		}
3586 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3587 		{
3588 			struct sctp_nets *net;
3589 
3590 			net = (struct sctp_nets *)data;
3591 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3592 			    (struct sockaddr *)&net->ro._l_addr, error);
3593 			break;
3594 		}
3595 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3596 		sctp_notify_send_failed2(stcb, error,
3597 		    (struct sctp_stream_queue_pending *)data, so_locked);
3598 		break;
3599 	case SCTP_NOTIFY_SENT_DG_FAIL:
3600 		sctp_notify_send_failed(stcb, 1, error,
3601 		    (struct sctp_tmit_chunk *)data, so_locked);
3602 		break;
3603 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3604 		sctp_notify_send_failed(stcb, 0, error,
3605 		    (struct sctp_tmit_chunk *)data, so_locked);
3606 		break;
3607 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3608 		{
3609 			uint32_t val;
3610 
3611 			val = *((uint32_t *) data);
3612 
3613 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3614 			break;
3615 		}
3616 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3617 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3618 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3619 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3620 		} else {
3621 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3622 		}
3623 		break;
3624 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3625 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3626 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3627 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3628 		} else {
3629 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3630 		}
3631 		break;
3632 	case SCTP_NOTIFY_ASSOC_RESTART:
3633 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3634 		if (stcb->asoc.peer_supports_auth == 0) {
3635 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3636 			    NULL, so_locked);
3637 		}
3638 		break;
3639 	case SCTP_NOTIFY_STR_RESET_SEND:
3640 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3641 		break;
3642 	case SCTP_NOTIFY_STR_RESET_RECV:
3643 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3644 		break;
3645 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3646 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3647 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3648 		break;
3649 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3650 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3651 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3652 		break;
3653 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3654 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3655 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3656 		break;
3657 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3658 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3659 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3660 		break;
3661 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3662 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3663 		    error);
3664 		break;
3665 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3666 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3667 		    error);
3668 		break;
3669 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3670 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3671 		    error);
3672 		break;
3673 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3674 		sctp_notify_shutdown_event(stcb);
3675 		break;
3676 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3677 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3678 		    (uint16_t) (uintptr_t) data,
3679 		    so_locked);
3680 		break;
3681 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3682 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3683 		    (uint16_t) (uintptr_t) data,
3684 		    so_locked);
3685 		break;
3686 	case SCTP_NOTIFY_NO_PEER_AUTH:
3687 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3688 		    (uint16_t) (uintptr_t) data,
3689 		    so_locked);
3690 		break;
3691 	case SCTP_NOTIFY_SENDER_DRY:
3692 		sctp_notify_sender_dry_event(stcb, so_locked);
3693 		break;
3694 	case SCTP_NOTIFY_REMOTE_ERROR:
3695 		sctp_notify_remote_error(stcb, error, data);
3696 		break;
3697 	default:
3698 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3699 		    __FUNCTION__, notification, notification);
3700 		break;
3701 	}			/* end switch */
3702 }
3703 
3704 void
3705 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3706 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3707     SCTP_UNUSED
3708 #endif
3709 )
3710 {
3711 	struct sctp_association *asoc;
3712 	struct sctp_stream_out *outs;
3713 	struct sctp_tmit_chunk *chk, *nchk;
3714 	struct sctp_stream_queue_pending *sp, *nsp;
3715 	int i;
3716 
3717 	if (stcb == NULL) {
3718 		return;
3719 	}
3720 	asoc = &stcb->asoc;
3721 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3722 		/* already being freed */
3723 		return;
3724 	}
3725 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3726 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3727 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3728 		return;
3729 	}
3730 	/* now through all the gunk freeing chunks */
3731 	if (holds_lock == 0) {
3732 		SCTP_TCB_SEND_LOCK(stcb);
3733 	}
3734 	/* sent queue SHOULD be empty */
3735 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3736 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3737 		asoc->sent_queue_cnt--;
3738 		if (chk->data != NULL) {
3739 			sctp_free_bufspace(stcb, asoc, chk, 1);
3740 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3741 			    error, chk, so_locked);
3742 			if (chk->data) {
3743 				sctp_m_freem(chk->data);
3744 				chk->data = NULL;
3745 			}
3746 		}
3747 		sctp_free_a_chunk(stcb, chk, so_locked);
3748 		/* sa_ignore FREED_MEMORY */
3749 	}
3750 	/* pending send queue SHOULD be empty */
3751 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3752 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3753 		asoc->send_queue_cnt--;
3754 		if (chk->data != NULL) {
3755 			sctp_free_bufspace(stcb, asoc, chk, 1);
3756 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3757 			    error, chk, so_locked);
3758 			if (chk->data) {
3759 				sctp_m_freem(chk->data);
3760 				chk->data = NULL;
3761 			}
3762 		}
3763 		sctp_free_a_chunk(stcb, chk, so_locked);
3764 		/* sa_ignore FREED_MEMORY */
3765 	}
3766 	for (i = 0; i < asoc->streamoutcnt; i++) {
3767 		/* For each stream */
3768 		outs = &asoc->strmout[i];
3769 		/* clean up any sends there */
3770 		asoc->locked_on_sending = NULL;
3771 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3772 			asoc->stream_queue_cnt--;
3773 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3774 			sctp_free_spbufspace(stcb, asoc, sp);
3775 			if (sp->data) {
3776 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3777 				    error, (void *)sp, so_locked);
3778 				if (sp->data) {
3779 					sctp_m_freem(sp->data);
3780 					sp->data = NULL;
3781 				}
3782 			}
3783 			if (sp->net) {
3784 				sctp_free_remote_addr(sp->net);
3785 				sp->net = NULL;
3786 			}
3787 			/* Free the chunk */
3788 			sctp_free_a_strmoq(stcb, sp, so_locked);
3789 			/* sa_ignore FREED_MEMORY */
3790 		}
3791 	}
3792 
3793 	if (holds_lock == 0) {
3794 		SCTP_TCB_SEND_UNLOCK(stcb);
3795 	}
3796 }
3797 
3798 void
3799 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3800     struct sctp_abort_chunk *abort, int so_locked
3801 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3802     SCTP_UNUSED
3803 #endif
3804 )
3805 {
3806 	if (stcb == NULL) {
3807 		return;
3808 	}
3809 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3810 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3811 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3812 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3813 	}
3814 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3815 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3816 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3817 		return;
3818 	}
3819 	/* Tell them we lost the asoc */
3820 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3821 	if (from_peer) {
3822 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3823 	} else {
3824 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3825 	}
3826 }
3827 
3828 void
3829 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3830     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3831     uint32_t vrf_id, uint16_t port)
3832 {
3833 	uint32_t vtag;
3834 
3835 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3836 	struct socket *so;
3837 
3838 #endif
3839 
3840 	vtag = 0;
3841 	if (stcb != NULL) {
3842 		/* We have a TCB to abort, send notification too */
3843 		vtag = stcb->asoc.peer_vtag;
3844 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3845 		/* get the assoc vrf id and table id */
3846 		vrf_id = stcb->asoc.vrf_id;
3847 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3848 	}
3849 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3850 	if (stcb != NULL) {
3851 		/* Ok, now lets free it */
3852 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3853 		so = SCTP_INP_SO(inp);
3854 		atomic_add_int(&stcb->asoc.refcnt, 1);
3855 		SCTP_TCB_UNLOCK(stcb);
3856 		SCTP_SOCKET_LOCK(so, 1);
3857 		SCTP_TCB_LOCK(stcb);
3858 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3859 #endif
3860 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3861 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3862 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3863 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3864 		}
3865 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3866 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3867 		SCTP_SOCKET_UNLOCK(so, 1);
3868 #endif
3869 	}
3870 }
3871 
3872 #ifdef SCTP_ASOCLOG_OF_TSNS
3873 void
3874 sctp_print_out_track_log(struct sctp_tcb *stcb)
3875 {
3876 #ifdef NOSIY_PRINTS
3877 	int i;
3878 
3879 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3880 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3881 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3882 		SCTP_PRINTF("None rcvd\n");
3883 		goto none_in;
3884 	}
3885 	if (stcb->asoc.tsn_in_wrapped) {
3886 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3887 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3888 			    stcb->asoc.in_tsnlog[i].tsn,
3889 			    stcb->asoc.in_tsnlog[i].strm,
3890 			    stcb->asoc.in_tsnlog[i].seq,
3891 			    stcb->asoc.in_tsnlog[i].flgs,
3892 			    stcb->asoc.in_tsnlog[i].sz);
3893 		}
3894 	}
3895 	if (stcb->asoc.tsn_in_at) {
3896 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3897 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3898 			    stcb->asoc.in_tsnlog[i].tsn,
3899 			    stcb->asoc.in_tsnlog[i].strm,
3900 			    stcb->asoc.in_tsnlog[i].seq,
3901 			    stcb->asoc.in_tsnlog[i].flgs,
3902 			    stcb->asoc.in_tsnlog[i].sz);
3903 		}
3904 	}
3905 none_in:
3906 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3907 	if ((stcb->asoc.tsn_out_at == 0) &&
3908 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3909 		SCTP_PRINTF("None sent\n");
3910 	}
3911 	if (stcb->asoc.tsn_out_wrapped) {
3912 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3913 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3914 			    stcb->asoc.out_tsnlog[i].tsn,
3915 			    stcb->asoc.out_tsnlog[i].strm,
3916 			    stcb->asoc.out_tsnlog[i].seq,
3917 			    stcb->asoc.out_tsnlog[i].flgs,
3918 			    stcb->asoc.out_tsnlog[i].sz);
3919 		}
3920 	}
3921 	if (stcb->asoc.tsn_out_at) {
3922 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3923 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3924 			    stcb->asoc.out_tsnlog[i].tsn,
3925 			    stcb->asoc.out_tsnlog[i].strm,
3926 			    stcb->asoc.out_tsnlog[i].seq,
3927 			    stcb->asoc.out_tsnlog[i].flgs,
3928 			    stcb->asoc.out_tsnlog[i].sz);
3929 		}
3930 	}
3931 #endif
3932 }
3933 
3934 #endif
3935 
3936 void
3937 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3938     struct mbuf *op_err,
3939     int so_locked
3940 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3941     SCTP_UNUSED
3942 #endif
3943 )
3944 {
3945 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3946 	struct socket *so;
3947 
3948 #endif
3949 
3950 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3951 	so = SCTP_INP_SO(inp);
3952 #endif
3953 	if (stcb == NULL) {
3954 		/* Got to have a TCB */
3955 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3956 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3957 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3958 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3959 			}
3960 		}
3961 		return;
3962 	} else {
3963 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3964 	}
3965 	/* notify the ulp */
3966 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
3967 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
3968 	}
3969 	/* notify the peer */
3970 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3971 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3972 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3973 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3974 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3975 	}
3976 	/* now free the asoc */
3977 #ifdef SCTP_ASOCLOG_OF_TSNS
3978 	sctp_print_out_track_log(stcb);
3979 #endif
3980 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3981 	if (!so_locked) {
3982 		atomic_add_int(&stcb->asoc.refcnt, 1);
3983 		SCTP_TCB_UNLOCK(stcb);
3984 		SCTP_SOCKET_LOCK(so, 1);
3985 		SCTP_TCB_LOCK(stcb);
3986 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3987 	}
3988 #endif
3989 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3990 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3991 	if (!so_locked) {
3992 		SCTP_SOCKET_UNLOCK(so, 1);
3993 	}
3994 #endif
3995 }
3996 
3997 void
3998 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3999     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
4000 {
4001 	struct sctp_chunkhdr *ch, chunk_buf;
4002 	unsigned int chk_length;
4003 	int contains_init_chunk;
4004 
4005 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4006 	/* Generate a TO address for future reference */
4007 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4008 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
4009 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4010 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4011 		}
4012 	}
4013 	contains_init_chunk = 0;
4014 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4015 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4016 	while (ch != NULL) {
4017 		chk_length = ntohs(ch->chunk_length);
4018 		if (chk_length < sizeof(*ch)) {
4019 			/* break to abort land */
4020 			break;
4021 		}
4022 		switch (ch->chunk_type) {
4023 		case SCTP_INIT:
4024 			contains_init_chunk = 1;
4025 			break;
4026 		case SCTP_COOKIE_ECHO:
4027 			/* We hit here only if the assoc is being freed */
4028 			return;
4029 		case SCTP_PACKET_DROPPED:
4030 			/* we don't respond to pkt-dropped */
4031 			return;
4032 		case SCTP_ABORT_ASSOCIATION:
4033 			/* we don't respond with an ABORT to an ABORT */
4034 			return;
4035 		case SCTP_SHUTDOWN_COMPLETE:
4036 			/*
4037 			 * we ignore it since we are not waiting for it and
4038 			 * peer is gone
4039 			 */
4040 			return;
4041 		case SCTP_SHUTDOWN_ACK:
4042 			sctp_send_shutdown_complete2(m, sh, vrf_id, port);
4043 			return;
4044 		default:
4045 			break;
4046 		}
4047 		offset += SCTP_SIZE32(chk_length);
4048 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4049 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4050 	}
4051 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4052 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4053 	    (contains_init_chunk == 0))) {
4054 		sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4055 	}
4056 }
4057 
4058 /*
4059  * check the inbound datagram to make sure there is not an abort inside it,
4060  * if there is return 1, else return 0.
4061  */
4062 int
4063 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4064 {
4065 	struct sctp_chunkhdr *ch;
4066 	struct sctp_init_chunk *init_chk, chunk_buf;
4067 	int offset;
4068 	unsigned int chk_length;
4069 
4070 	offset = iphlen + sizeof(struct sctphdr);
4071 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4072 	    (uint8_t *) & chunk_buf);
4073 	while (ch != NULL) {
4074 		chk_length = ntohs(ch->chunk_length);
4075 		if (chk_length < sizeof(*ch)) {
4076 			/* packet is probably corrupt */
4077 			break;
4078 		}
4079 		/* we seem to be ok, is it an abort? */
4080 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4081 			/* yep, tell them */
4082 			return (1);
4083 		}
4084 		if (ch->chunk_type == SCTP_INITIATION) {
4085 			/* need to update the Vtag */
4086 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4087 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4088 			if (init_chk != NULL) {
4089 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4090 			}
4091 		}
4092 		/* Nope, move to the next chunk */
4093 		offset += SCTP_SIZE32(chk_length);
4094 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4095 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4096 	}
4097 	return (0);
4098 }
4099 
4100 /*
4101  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4102  * set (i.e. it's 0) so, create this function to compare link local scopes
4103  */
4104 #ifdef INET6
4105 uint32_t
4106 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4107 {
4108 	struct sockaddr_in6 a, b;
4109 
4110 	/* save copies */
4111 	a = *addr1;
4112 	b = *addr2;
4113 
4114 	if (a.sin6_scope_id == 0)
4115 		if (sa6_recoverscope(&a)) {
4116 			/* can't get scope, so can't match */
4117 			return (0);
4118 		}
4119 	if (b.sin6_scope_id == 0)
4120 		if (sa6_recoverscope(&b)) {
4121 			/* can't get scope, so can't match */
4122 			return (0);
4123 		}
4124 	if (a.sin6_scope_id != b.sin6_scope_id)
4125 		return (0);
4126 
4127 	return (1);
4128 }
4129 
4130 /*
4131  * returns a sockaddr_in6 with embedded scope recovered and removed
4132  */
4133 struct sockaddr_in6 *
4134 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4135 {
4136 	/* check and strip embedded scope junk */
4137 	if (addr->sin6_family == AF_INET6) {
4138 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4139 			if (addr->sin6_scope_id == 0) {
4140 				*store = *addr;
4141 				if (!sa6_recoverscope(store)) {
4142 					/* use the recovered scope */
4143 					addr = store;
4144 				}
4145 			} else {
4146 				/* else, return the original "to" addr */
4147 				in6_clearscope(&addr->sin6_addr);
4148 			}
4149 		}
4150 	}
4151 	return (addr);
4152 }
4153 
4154 #endif
4155 
4156 /*
4157  * are the two addresses the same?  currently a "scopeless" check returns: 1
4158  * if same, 0 if not
4159  */
4160 int
4161 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4162 {
4163 
4164 	/* must be valid */
4165 	if (sa1 == NULL || sa2 == NULL)
4166 		return (0);
4167 
4168 	/* must be the same family */
4169 	if (sa1->sa_family != sa2->sa_family)
4170 		return (0);
4171 
4172 	switch (sa1->sa_family) {
4173 #ifdef INET6
4174 	case AF_INET6:
4175 		{
4176 			/* IPv6 addresses */
4177 			struct sockaddr_in6 *sin6_1, *sin6_2;
4178 
4179 			sin6_1 = (struct sockaddr_in6 *)sa1;
4180 			sin6_2 = (struct sockaddr_in6 *)sa2;
4181 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4182 			    sin6_2));
4183 		}
4184 #endif
4185 #ifdef INET
4186 	case AF_INET:
4187 		{
4188 			/* IPv4 addresses */
4189 			struct sockaddr_in *sin_1, *sin_2;
4190 
4191 			sin_1 = (struct sockaddr_in *)sa1;
4192 			sin_2 = (struct sockaddr_in *)sa2;
4193 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4194 		}
4195 #endif
4196 	default:
4197 		/* we don't do these... */
4198 		return (0);
4199 	}
4200 }
4201 
4202 void
4203 sctp_print_address(struct sockaddr *sa)
4204 {
4205 #ifdef INET6
4206 	char ip6buf[INET6_ADDRSTRLEN];
4207 
4208 	ip6buf[0] = 0;
4209 #endif
4210 
4211 	switch (sa->sa_family) {
4212 #ifdef INET6
4213 	case AF_INET6:
4214 		{
4215 			struct sockaddr_in6 *sin6;
4216 
4217 			sin6 = (struct sockaddr_in6 *)sa;
4218 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4219 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4220 			    ntohs(sin6->sin6_port),
4221 			    sin6->sin6_scope_id);
4222 			break;
4223 		}
4224 #endif
4225 #ifdef INET
4226 	case AF_INET:
4227 		{
4228 			struct sockaddr_in *sin;
4229 			unsigned char *p;
4230 
4231 			sin = (struct sockaddr_in *)sa;
4232 			p = (unsigned char *)&sin->sin_addr;
4233 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4234 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4235 			break;
4236 		}
4237 #endif
4238 	default:
4239 		SCTP_PRINTF("?\n");
4240 		break;
4241 	}
4242 }
4243 
4244 void
4245 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4246 {
4247 	switch (iph->ip_v) {
4248 #ifdef INET
4249 	case IPVERSION:
4250 		{
4251 			struct sockaddr_in lsa, fsa;
4252 
4253 			bzero(&lsa, sizeof(lsa));
4254 			lsa.sin_len = sizeof(lsa);
4255 			lsa.sin_family = AF_INET;
4256 			lsa.sin_addr = iph->ip_src;
4257 			lsa.sin_port = sh->src_port;
4258 			bzero(&fsa, sizeof(fsa));
4259 			fsa.sin_len = sizeof(fsa);
4260 			fsa.sin_family = AF_INET;
4261 			fsa.sin_addr = iph->ip_dst;
4262 			fsa.sin_port = sh->dest_port;
4263 			SCTP_PRINTF("src: ");
4264 			sctp_print_address((struct sockaddr *)&lsa);
4265 			SCTP_PRINTF("dest: ");
4266 			sctp_print_address((struct sockaddr *)&fsa);
4267 			break;
4268 		}
4269 #endif
4270 #ifdef INET6
4271 	case IPV6_VERSION >> 4:
4272 		{
4273 			struct ip6_hdr *ip6;
4274 			struct sockaddr_in6 lsa6, fsa6;
4275 
4276 			ip6 = (struct ip6_hdr *)iph;
4277 			bzero(&lsa6, sizeof(lsa6));
4278 			lsa6.sin6_len = sizeof(lsa6);
4279 			lsa6.sin6_family = AF_INET6;
4280 			lsa6.sin6_addr = ip6->ip6_src;
4281 			lsa6.sin6_port = sh->src_port;
4282 			bzero(&fsa6, sizeof(fsa6));
4283 			fsa6.sin6_len = sizeof(fsa6);
4284 			fsa6.sin6_family = AF_INET6;
4285 			fsa6.sin6_addr = ip6->ip6_dst;
4286 			fsa6.sin6_port = sh->dest_port;
4287 			SCTP_PRINTF("src: ");
4288 			sctp_print_address((struct sockaddr *)&lsa6);
4289 			SCTP_PRINTF("dest: ");
4290 			sctp_print_address((struct sockaddr *)&fsa6);
4291 			break;
4292 		}
4293 #endif
4294 	default:
4295 		/* TSNH */
4296 		break;
4297 	}
4298 }
4299 
4300 void
4301 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4302     struct sctp_inpcb *new_inp,
4303     struct sctp_tcb *stcb,
4304     int waitflags)
4305 {
4306 	/*
4307 	 * go through our old INP and pull off any control structures that
4308 	 * belong to stcb and move then to the new inp.
4309 	 */
4310 	struct socket *old_so, *new_so;
4311 	struct sctp_queued_to_read *control, *nctl;
4312 	struct sctp_readhead tmp_queue;
4313 	struct mbuf *m;
4314 	int error = 0;
4315 
4316 	old_so = old_inp->sctp_socket;
4317 	new_so = new_inp->sctp_socket;
4318 	TAILQ_INIT(&tmp_queue);
4319 	error = sblock(&old_so->so_rcv, waitflags);
4320 	if (error) {
4321 		/*
4322 		 * Gak, can't get sblock, we have a problem. data will be
4323 		 * left stranded.. and we don't dare look at it since the
4324 		 * other thread may be reading something. Oh well, its a
4325 		 * screwed up app that does a peeloff OR a accept while
4326 		 * reading from the main socket... actually its only the
4327 		 * peeloff() case, since I think read will fail on a
4328 		 * listening socket..
4329 		 */
4330 		return;
4331 	}
4332 	/* lock the socket buffers */
4333 	SCTP_INP_READ_LOCK(old_inp);
4334 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4335 		/* Pull off all for out target stcb */
4336 		if (control->stcb == stcb) {
4337 			/* remove it we want it */
4338 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4339 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4340 			m = control->data;
4341 			while (m) {
4342 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4343 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4344 				}
4345 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4346 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4347 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4348 				}
4349 				m = SCTP_BUF_NEXT(m);
4350 			}
4351 		}
4352 	}
4353 	SCTP_INP_READ_UNLOCK(old_inp);
4354 	/* Remove the sb-lock on the old socket */
4355 
4356 	sbunlock(&old_so->so_rcv);
4357 	/* Now we move them over to the new socket buffer */
4358 	SCTP_INP_READ_LOCK(new_inp);
4359 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4360 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4361 		m = control->data;
4362 		while (m) {
4363 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4364 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4365 			}
4366 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4367 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4368 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4369 			}
4370 			m = SCTP_BUF_NEXT(m);
4371 		}
4372 	}
4373 	SCTP_INP_READ_UNLOCK(new_inp);
4374 }
4375 
4376 void
4377 sctp_add_to_readq(struct sctp_inpcb *inp,
4378     struct sctp_tcb *stcb,
4379     struct sctp_queued_to_read *control,
4380     struct sockbuf *sb,
4381     int end,
4382     int inp_read_lock_held,
4383     int so_locked
4384 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4385     SCTP_UNUSED
4386 #endif
4387 )
4388 {
4389 	/*
4390 	 * Here we must place the control on the end of the socket read
4391 	 * queue AND increment sb_cc so that select will work properly on
4392 	 * read.
4393 	 */
4394 	struct mbuf *m, *prev = NULL;
4395 
4396 	if (inp == NULL) {
4397 		/* Gak, TSNH!! */
4398 #ifdef INVARIANTS
4399 		panic("Gak, inp NULL on add_to_readq");
4400 #endif
4401 		return;
4402 	}
4403 	if (inp_read_lock_held == 0)
4404 		SCTP_INP_READ_LOCK(inp);
4405 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4406 		sctp_free_remote_addr(control->whoFrom);
4407 		if (control->data) {
4408 			sctp_m_freem(control->data);
4409 			control->data = NULL;
4410 		}
4411 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4412 		if (inp_read_lock_held == 0)
4413 			SCTP_INP_READ_UNLOCK(inp);
4414 		return;
4415 	}
4416 	if (!(control->spec_flags & M_NOTIFICATION)) {
4417 		atomic_add_int(&inp->total_recvs, 1);
4418 		if (!control->do_not_ref_stcb) {
4419 			atomic_add_int(&stcb->total_recvs, 1);
4420 		}
4421 	}
4422 	m = control->data;
4423 	control->held_length = 0;
4424 	control->length = 0;
4425 	while (m) {
4426 		if (SCTP_BUF_LEN(m) == 0) {
4427 			/* Skip mbufs with NO length */
4428 			if (prev == NULL) {
4429 				/* First one */
4430 				control->data = sctp_m_free(m);
4431 				m = control->data;
4432 			} else {
4433 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4434 				m = SCTP_BUF_NEXT(prev);
4435 			}
4436 			if (m == NULL) {
4437 				control->tail_mbuf = prev;
4438 			}
4439 			continue;
4440 		}
4441 		prev = m;
4442 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4443 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4444 		}
4445 		sctp_sballoc(stcb, sb, m);
4446 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4447 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4448 		}
4449 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4450 		m = SCTP_BUF_NEXT(m);
4451 	}
4452 	if (prev != NULL) {
4453 		control->tail_mbuf = prev;
4454 	} else {
4455 		/* Everything got collapsed out?? */
4456 		sctp_free_remote_addr(control->whoFrom);
4457 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4458 		if (inp_read_lock_held == 0)
4459 			SCTP_INP_READ_UNLOCK(inp);
4460 		return;
4461 	}
4462 	if (end) {
4463 		control->end_added = 1;
4464 	}
4465 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4466 	if (inp_read_lock_held == 0)
4467 		SCTP_INP_READ_UNLOCK(inp);
4468 	if (inp && inp->sctp_socket) {
4469 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4470 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4471 		} else {
4472 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4473 			struct socket *so;
4474 
4475 			so = SCTP_INP_SO(inp);
4476 			if (!so_locked) {
4477 				if (stcb) {
4478 					atomic_add_int(&stcb->asoc.refcnt, 1);
4479 					SCTP_TCB_UNLOCK(stcb);
4480 				}
4481 				SCTP_SOCKET_LOCK(so, 1);
4482 				if (stcb) {
4483 					SCTP_TCB_LOCK(stcb);
4484 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4485 				}
4486 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4487 					SCTP_SOCKET_UNLOCK(so, 1);
4488 					return;
4489 				}
4490 			}
4491 #endif
4492 			sctp_sorwakeup(inp, inp->sctp_socket);
4493 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4494 			if (!so_locked) {
4495 				SCTP_SOCKET_UNLOCK(so, 1);
4496 			}
4497 #endif
4498 		}
4499 	}
4500 }
4501 
4502 
4503 int
4504 sctp_append_to_readq(struct sctp_inpcb *inp,
4505     struct sctp_tcb *stcb,
4506     struct sctp_queued_to_read *control,
4507     struct mbuf *m,
4508     int end,
4509     int ctls_cumack,
4510     struct sockbuf *sb)
4511 {
4512 	/*
4513 	 * A partial delivery API event is underway. OR we are appending on
4514 	 * the reassembly queue.
4515 	 *
4516 	 * If PDAPI this means we need to add m to the end of the data.
4517 	 * Increase the length in the control AND increment the sb_cc.
4518 	 * Otherwise sb is NULL and all we need to do is put it at the end
4519 	 * of the mbuf chain.
4520 	 */
4521 	int len = 0;
4522 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4523 
4524 	if (inp) {
4525 		SCTP_INP_READ_LOCK(inp);
4526 	}
4527 	if (control == NULL) {
4528 get_out:
4529 		if (inp) {
4530 			SCTP_INP_READ_UNLOCK(inp);
4531 		}
4532 		return (-1);
4533 	}
4534 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4535 		SCTP_INP_READ_UNLOCK(inp);
4536 		return (0);
4537 	}
4538 	if (control->end_added) {
4539 		/* huh this one is complete? */
4540 		goto get_out;
4541 	}
4542 	mm = m;
4543 	if (mm == NULL) {
4544 		goto get_out;
4545 	}
4546 	while (mm) {
4547 		if (SCTP_BUF_LEN(mm) == 0) {
4548 			/* Skip mbufs with NO lenght */
4549 			if (prev == NULL) {
4550 				/* First one */
4551 				m = sctp_m_free(mm);
4552 				mm = m;
4553 			} else {
4554 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4555 				mm = SCTP_BUF_NEXT(prev);
4556 			}
4557 			continue;
4558 		}
4559 		prev = mm;
4560 		len += SCTP_BUF_LEN(mm);
4561 		if (sb) {
4562 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4563 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4564 			}
4565 			sctp_sballoc(stcb, sb, mm);
4566 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4567 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4568 			}
4569 		}
4570 		mm = SCTP_BUF_NEXT(mm);
4571 	}
4572 	if (prev) {
4573 		tail = prev;
4574 	} else {
4575 		/* Really there should always be a prev */
4576 		if (m == NULL) {
4577 			/* Huh nothing left? */
4578 #ifdef INVARIANTS
4579 			panic("Nothing left to add?");
4580 #else
4581 			goto get_out;
4582 #endif
4583 		}
4584 		tail = m;
4585 	}
4586 	if (control->tail_mbuf) {
4587 		/* append */
4588 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4589 		control->tail_mbuf = tail;
4590 	} else {
4591 		/* nothing there */
4592 #ifdef INVARIANTS
4593 		if (control->data != NULL) {
4594 			panic("This should NOT happen");
4595 		}
4596 #endif
4597 		control->data = m;
4598 		control->tail_mbuf = tail;
4599 	}
4600 	atomic_add_int(&control->length, len);
4601 	if (end) {
4602 		/* message is complete */
4603 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4604 			stcb->asoc.control_pdapi = NULL;
4605 		}
4606 		control->held_length = 0;
4607 		control->end_added = 1;
4608 	}
4609 	if (stcb == NULL) {
4610 		control->do_not_ref_stcb = 1;
4611 	}
4612 	/*
4613 	 * When we are appending in partial delivery, the cum-ack is used
4614 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4615 	 * is populated in the outbound sinfo structure from the true cumack
4616 	 * if the association exists...
4617 	 */
4618 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4619 	if (inp) {
4620 		SCTP_INP_READ_UNLOCK(inp);
4621 	}
4622 	if (inp && inp->sctp_socket) {
4623 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4624 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4625 		} else {
4626 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4627 			struct socket *so;
4628 
4629 			so = SCTP_INP_SO(inp);
4630 			if (stcb) {
4631 				atomic_add_int(&stcb->asoc.refcnt, 1);
4632 				SCTP_TCB_UNLOCK(stcb);
4633 			}
4634 			SCTP_SOCKET_LOCK(so, 1);
4635 			if (stcb) {
4636 				SCTP_TCB_LOCK(stcb);
4637 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4638 			}
4639 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4640 				SCTP_SOCKET_UNLOCK(so, 1);
4641 				return (0);
4642 			}
4643 #endif
4644 			sctp_sorwakeup(inp, inp->sctp_socket);
4645 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4646 			SCTP_SOCKET_UNLOCK(so, 1);
4647 #endif
4648 		}
4649 	}
4650 	return (0);
4651 }
4652 
4653 
4654 
4655 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4656  *************ALTERNATE ROUTING CODE
4657  */
4658 
4659 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4660  *************ALTERNATE ROUTING CODE
4661  */
4662 
4663 struct mbuf *
4664 sctp_generate_invmanparam(int err)
4665 {
4666 	/* Return a MBUF with a invalid mandatory parameter */
4667 	struct mbuf *m;
4668 
4669 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4670 	if (m) {
4671 		struct sctp_paramhdr *ph;
4672 
4673 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4674 		ph = mtod(m, struct sctp_paramhdr *);
4675 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4676 		ph->param_type = htons(err);
4677 	}
4678 	return (m);
4679 }
4680 
4681 #ifdef SCTP_MBCNT_LOGGING
4682 void
4683 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4684     struct sctp_tmit_chunk *tp1, int chk_cnt)
4685 {
4686 	if (tp1->data == NULL) {
4687 		return;
4688 	}
4689 	asoc->chunks_on_out_queue -= chk_cnt;
4690 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4691 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4692 		    asoc->total_output_queue_size,
4693 		    tp1->book_size,
4694 		    0,
4695 		    tp1->mbcnt);
4696 	}
4697 	if (asoc->total_output_queue_size >= tp1->book_size) {
4698 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4699 	} else {
4700 		asoc->total_output_queue_size = 0;
4701 	}
4702 
4703 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4704 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4705 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4706 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4707 		} else {
4708 			stcb->sctp_socket->so_snd.sb_cc = 0;
4709 
4710 		}
4711 	}
4712 }
4713 
4714 #endif
4715 
4716 int
4717 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4718     uint8_t sent, int so_locked
4719 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4720     SCTP_UNUSED
4721 #endif
4722 )
4723 {
4724 	struct sctp_stream_out *strq;
4725 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4726 	struct sctp_stream_queue_pending *sp;
4727 	uint16_t stream = 0, seq = 0;
4728 	uint8_t foundeom = 0;
4729 	int ret_sz = 0;
4730 	int notdone;
4731 	int do_wakeup_routine = 0;
4732 
4733 	stream = tp1->rec.data.stream_number;
4734 	seq = tp1->rec.data.stream_seq;
4735 	do {
4736 		ret_sz += tp1->book_size;
4737 		if (tp1->data != NULL) {
4738 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4739 				sctp_flight_size_decrease(tp1);
4740 				sctp_total_flight_decrease(stcb, tp1);
4741 			}
4742 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4743 			stcb->asoc.peers_rwnd += tp1->send_size;
4744 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4745 			if (sent) {
4746 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4747 			} else {
4748 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4749 			}
4750 			if (tp1->data) {
4751 				sctp_m_freem(tp1->data);
4752 				tp1->data = NULL;
4753 			}
4754 			do_wakeup_routine = 1;
4755 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4756 				stcb->asoc.sent_queue_cnt_removeable--;
4757 			}
4758 		}
4759 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4760 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4761 		    SCTP_DATA_NOT_FRAG) {
4762 			/* not frag'ed we ae done   */
4763 			notdone = 0;
4764 			foundeom = 1;
4765 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4766 			/* end of frag, we are done */
4767 			notdone = 0;
4768 			foundeom = 1;
4769 		} else {
4770 			/*
4771 			 * Its a begin or middle piece, we must mark all of
4772 			 * it
4773 			 */
4774 			notdone = 1;
4775 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4776 		}
4777 	} while (tp1 && notdone);
4778 	if (foundeom == 0) {
4779 		/*
4780 		 * The multi-part message was scattered across the send and
4781 		 * sent queue.
4782 		 */
4783 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4784 			if ((tp1->rec.data.stream_number != stream) ||
4785 			    (tp1->rec.data.stream_seq != seq)) {
4786 				break;
4787 			}
4788 			/*
4789 			 * save to chk in case we have some on stream out
4790 			 * queue. If so and we have an un-transmitted one we
4791 			 * don't have to fudge the TSN.
4792 			 */
4793 			chk = tp1;
4794 			ret_sz += tp1->book_size;
4795 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4796 			if (sent) {
4797 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4798 			} else {
4799 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4800 			}
4801 			if (tp1->data) {
4802 				sctp_m_freem(tp1->data);
4803 				tp1->data = NULL;
4804 			}
4805 			/* No flight involved here book the size to 0 */
4806 			tp1->book_size = 0;
4807 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4808 				foundeom = 1;
4809 			}
4810 			do_wakeup_routine = 1;
4811 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4812 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4813 			/*
4814 			 * on to the sent queue so we can wait for it to be
4815 			 * passed by.
4816 			 */
4817 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4818 			    sctp_next);
4819 			stcb->asoc.send_queue_cnt--;
4820 			stcb->asoc.sent_queue_cnt++;
4821 		}
4822 	}
4823 	if (foundeom == 0) {
4824 		/*
4825 		 * Still no eom found. That means there is stuff left on the
4826 		 * stream out queue.. yuck.
4827 		 */
4828 		strq = &stcb->asoc.strmout[stream];
4829 		SCTP_TCB_SEND_LOCK(stcb);
4830 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4831 			/* FIXME: Shouldn't this be a serial number check? */
4832 			if (sp->strseq > seq) {
4833 				break;
4834 			}
4835 			/* Check if its our SEQ */
4836 			if (sp->strseq == seq) {
4837 				sp->discard_rest = 1;
4838 				/*
4839 				 * We may need to put a chunk on the queue
4840 				 * that holds the TSN that would have been
4841 				 * sent with the LAST bit.
4842 				 */
4843 				if (chk == NULL) {
4844 					/* Yep, we have to */
4845 					sctp_alloc_a_chunk(stcb, chk);
4846 					if (chk == NULL) {
4847 						/*
4848 						 * we are hosed. All we can
4849 						 * do is nothing.. which
4850 						 * will cause an abort if
4851 						 * the peer is paying
4852 						 * attention.
4853 						 */
4854 						goto oh_well;
4855 					}
4856 					memset(chk, 0, sizeof(*chk));
4857 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4858 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4859 					chk->asoc = &stcb->asoc;
4860 					chk->rec.data.stream_seq = sp->strseq;
4861 					chk->rec.data.stream_number = sp->stream;
4862 					chk->rec.data.payloadtype = sp->ppid;
4863 					chk->rec.data.context = sp->context;
4864 					chk->flags = sp->act_flags;
4865 					if (sp->net)
4866 						chk->whoTo = sp->net;
4867 					else
4868 						chk->whoTo = stcb->asoc.primary_destination;
4869 					atomic_add_int(&chk->whoTo->ref_count, 1);
4870 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4871 					stcb->asoc.pr_sctp_cnt++;
4872 					chk->pr_sctp_on = 1;
4873 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4874 					stcb->asoc.sent_queue_cnt++;
4875 					stcb->asoc.pr_sctp_cnt++;
4876 				} else {
4877 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4878 				}
4879 		oh_well:
4880 				if (sp->data) {
4881 					/*
4882 					 * Pull any data to free up the SB
4883 					 * and allow sender to "add more"
4884 					 * whilc we will throw away :-)
4885 					 */
4886 					sctp_free_spbufspace(stcb, &stcb->asoc,
4887 					    sp);
4888 					ret_sz += sp->length;
4889 					do_wakeup_routine = 1;
4890 					sp->some_taken = 1;
4891 					sctp_m_freem(sp->data);
4892 					sp->length = 0;
4893 					sp->data = NULL;
4894 					sp->tail_mbuf = NULL;
4895 				}
4896 				break;
4897 			}
4898 		}		/* End tailq_foreach */
4899 		SCTP_TCB_SEND_UNLOCK(stcb);
4900 	}
4901 	if (do_wakeup_routine) {
4902 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4903 		struct socket *so;
4904 
4905 		so = SCTP_INP_SO(stcb->sctp_ep);
4906 		if (!so_locked) {
4907 			atomic_add_int(&stcb->asoc.refcnt, 1);
4908 			SCTP_TCB_UNLOCK(stcb);
4909 			SCTP_SOCKET_LOCK(so, 1);
4910 			SCTP_TCB_LOCK(stcb);
4911 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4912 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4913 				/* assoc was freed while we were unlocked */
4914 				SCTP_SOCKET_UNLOCK(so, 1);
4915 				return (ret_sz);
4916 			}
4917 		}
4918 #endif
4919 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4920 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4921 		if (!so_locked) {
4922 			SCTP_SOCKET_UNLOCK(so, 1);
4923 		}
4924 #endif
4925 	}
4926 	return (ret_sz);
4927 }
4928 
4929 /*
4930  * checks to see if the given address, sa, is one that is currently known by
4931  * the kernel note: can't distinguish the same address on multiple interfaces
4932  * and doesn't handle multiple addresses with different zone/scope id's note:
4933  * ifa_ifwithaddr() compares the entire sockaddr struct
4934  */
4935 struct sctp_ifa *
4936 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4937     int holds_lock)
4938 {
4939 	struct sctp_laddr *laddr;
4940 
4941 	if (holds_lock == 0) {
4942 		SCTP_INP_RLOCK(inp);
4943 	}
4944 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4945 		if (laddr->ifa == NULL)
4946 			continue;
4947 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4948 			continue;
4949 #ifdef INET
4950 		if (addr->sa_family == AF_INET) {
4951 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4952 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4953 				/* found him. */
4954 				if (holds_lock == 0) {
4955 					SCTP_INP_RUNLOCK(inp);
4956 				}
4957 				return (laddr->ifa);
4958 				break;
4959 			}
4960 		}
4961 #endif
4962 #ifdef INET6
4963 		if (addr->sa_family == AF_INET6) {
4964 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4965 			    &laddr->ifa->address.sin6)) {
4966 				/* found him. */
4967 				if (holds_lock == 0) {
4968 					SCTP_INP_RUNLOCK(inp);
4969 				}
4970 				return (laddr->ifa);
4971 				break;
4972 			}
4973 		}
4974 #endif
4975 	}
4976 	if (holds_lock == 0) {
4977 		SCTP_INP_RUNLOCK(inp);
4978 	}
4979 	return (NULL);
4980 }
4981 
4982 uint32_t
4983 sctp_get_ifa_hash_val(struct sockaddr *addr)
4984 {
4985 	switch (addr->sa_family) {
4986 #ifdef INET
4987 	case AF_INET:
4988 		{
4989 			struct sockaddr_in *sin;
4990 
4991 			sin = (struct sockaddr_in *)addr;
4992 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4993 		}
4994 #endif
4995 #ifdef INET6
4996 	case INET6:
4997 		{
4998 			struct sockaddr_in6 *sin6;
4999 			uint32_t hash_of_addr;
5000 
5001 			sin6 = (struct sockaddr_in6 *)addr;
5002 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5003 			    sin6->sin6_addr.s6_addr32[1] +
5004 			    sin6->sin6_addr.s6_addr32[2] +
5005 			    sin6->sin6_addr.s6_addr32[3]);
5006 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5007 			return (hash_of_addr);
5008 		}
5009 #endif
5010 	default:
5011 		break;
5012 	}
5013 	return (0);
5014 }
5015 
5016 struct sctp_ifa *
5017 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5018 {
5019 	struct sctp_ifa *sctp_ifap;
5020 	struct sctp_vrf *vrf;
5021 	struct sctp_ifalist *hash_head;
5022 	uint32_t hash_of_addr;
5023 
5024 	if (holds_lock == 0)
5025 		SCTP_IPI_ADDR_RLOCK();
5026 
5027 	vrf = sctp_find_vrf(vrf_id);
5028 	if (vrf == NULL) {
5029 stage_right:
5030 		if (holds_lock == 0)
5031 			SCTP_IPI_ADDR_RUNLOCK();
5032 		return (NULL);
5033 	}
5034 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5035 
5036 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5037 	if (hash_head == NULL) {
5038 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5039 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5040 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5041 		sctp_print_address(addr);
5042 		SCTP_PRINTF("No such bucket for address\n");
5043 		if (holds_lock == 0)
5044 			SCTP_IPI_ADDR_RUNLOCK();
5045 
5046 		return (NULL);
5047 	}
5048 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5049 		if (sctp_ifap == NULL) {
5050 #ifdef INVARIANTS
5051 			panic("Huh LIST_FOREACH corrupt");
5052 			goto stage_right;
5053 #else
5054 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5055 			goto stage_right;
5056 #endif
5057 		}
5058 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5059 			continue;
5060 #ifdef INET
5061 		if (addr->sa_family == AF_INET) {
5062 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5063 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5064 				/* found him. */
5065 				if (holds_lock == 0)
5066 					SCTP_IPI_ADDR_RUNLOCK();
5067 				return (sctp_ifap);
5068 				break;
5069 			}
5070 		}
5071 #endif
5072 #ifdef INET6
5073 		if (addr->sa_family == AF_INET6) {
5074 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5075 			    &sctp_ifap->address.sin6)) {
5076 				/* found him. */
5077 				if (holds_lock == 0)
5078 					SCTP_IPI_ADDR_RUNLOCK();
5079 				return (sctp_ifap);
5080 				break;
5081 			}
5082 		}
5083 #endif
5084 	}
5085 	if (holds_lock == 0)
5086 		SCTP_IPI_ADDR_RUNLOCK();
5087 	return (NULL);
5088 }
5089 
5090 static void
5091 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5092     uint32_t rwnd_req)
5093 {
5094 	/* User pulled some data, do we need a rwnd update? */
5095 	int r_unlocked = 0;
5096 	uint32_t dif, rwnd;
5097 	struct socket *so = NULL;
5098 
5099 	if (stcb == NULL)
5100 		return;
5101 
5102 	atomic_add_int(&stcb->asoc.refcnt, 1);
5103 
5104 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5105 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5106 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5107 		/* Pre-check If we are freeing no update */
5108 		goto no_lock;
5109 	}
5110 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5111 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5112 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5113 		goto out;
5114 	}
5115 	so = stcb->sctp_socket;
5116 	if (so == NULL) {
5117 		goto out;
5118 	}
5119 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5120 	/* Have you have freed enough to look */
5121 	*freed_so_far = 0;
5122 	/* Yep, its worth a look and the lock overhead */
5123 
5124 	/* Figure out what the rwnd would be */
5125 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5126 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5127 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5128 	} else {
5129 		dif = 0;
5130 	}
5131 	if (dif >= rwnd_req) {
5132 		if (hold_rlock) {
5133 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5134 			r_unlocked = 1;
5135 		}
5136 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5137 			/*
5138 			 * One last check before we allow the guy possibly
5139 			 * to get in. There is a race, where the guy has not
5140 			 * reached the gate. In that case
5141 			 */
5142 			goto out;
5143 		}
5144 		SCTP_TCB_LOCK(stcb);
5145 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5146 			/* No reports here */
5147 			SCTP_TCB_UNLOCK(stcb);
5148 			goto out;
5149 		}
5150 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5151 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5152 
5153 		sctp_chunk_output(stcb->sctp_ep, stcb,
5154 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5155 		/* make sure no timer is running */
5156 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5157 		SCTP_TCB_UNLOCK(stcb);
5158 	} else {
5159 		/* Update how much we have pending */
5160 		stcb->freed_by_sorcv_sincelast = dif;
5161 	}
5162 out:
5163 	if (so && r_unlocked && hold_rlock) {
5164 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5165 	}
5166 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5167 no_lock:
5168 	atomic_add_int(&stcb->asoc.refcnt, -1);
5169 	return;
5170 }
5171 
5172 int
5173 sctp_sorecvmsg(struct socket *so,
5174     struct uio *uio,
5175     struct mbuf **mp,
5176     struct sockaddr *from,
5177     int fromlen,
5178     int *msg_flags,
5179     struct sctp_sndrcvinfo *sinfo,
5180     int filling_sinfo)
5181 {
5182 	/*
5183 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5184 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5185 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5186 	 * On the way out we may send out any combination of:
5187 	 * MSG_NOTIFICATION MSG_EOR
5188 	 *
5189 	 */
5190 	struct sctp_inpcb *inp = NULL;
5191 	int my_len = 0;
5192 	int cp_len = 0, error = 0;
5193 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5194 	struct mbuf *m = NULL;
5195 	struct sctp_tcb *stcb = NULL;
5196 	int wakeup_read_socket = 0;
5197 	int freecnt_applied = 0;
5198 	int out_flags = 0, in_flags = 0;
5199 	int block_allowed = 1;
5200 	uint32_t freed_so_far = 0;
5201 	uint32_t copied_so_far = 0;
5202 	int in_eeor_mode = 0;
5203 	int no_rcv_needed = 0;
5204 	uint32_t rwnd_req = 0;
5205 	int hold_sblock = 0;
5206 	int hold_rlock = 0;
5207 	int slen = 0;
5208 	uint32_t held_length = 0;
5209 	int sockbuf_lock = 0;
5210 
5211 	if (uio == NULL) {
5212 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5213 		return (EINVAL);
5214 	}
5215 	if (msg_flags) {
5216 		in_flags = *msg_flags;
5217 		if (in_flags & MSG_PEEK)
5218 			SCTP_STAT_INCR(sctps_read_peeks);
5219 	} else {
5220 		in_flags = 0;
5221 	}
5222 	slen = uio->uio_resid;
5223 
5224 	/* Pull in and set up our int flags */
5225 	if (in_flags & MSG_OOB) {
5226 		/* Out of band's NOT supported */
5227 		return (EOPNOTSUPP);
5228 	}
5229 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5230 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5231 		return (EINVAL);
5232 	}
5233 	if ((in_flags & (MSG_DONTWAIT
5234 	    | MSG_NBIO
5235 	    )) ||
5236 	    SCTP_SO_IS_NBIO(so)) {
5237 		block_allowed = 0;
5238 	}
5239 	/* setup the endpoint */
5240 	inp = (struct sctp_inpcb *)so->so_pcb;
5241 	if (inp == NULL) {
5242 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5243 		return (EFAULT);
5244 	}
5245 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5246 	/* Must be at least a MTU's worth */
5247 	if (rwnd_req < SCTP_MIN_RWND)
5248 		rwnd_req = SCTP_MIN_RWND;
5249 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5250 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5251 		sctp_misc_ints(SCTP_SORECV_ENTER,
5252 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5253 	}
5254 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5255 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5256 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5257 	}
5258 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5259 	sockbuf_lock = 1;
5260 	if (error) {
5261 		goto release_unlocked;
5262 	}
5263 restart:
5264 
5265 
5266 restart_nosblocks:
5267 	if (hold_sblock == 0) {
5268 		SOCKBUF_LOCK(&so->so_rcv);
5269 		hold_sblock = 1;
5270 	}
5271 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5272 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5273 		goto out;
5274 	}
5275 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5276 		if (so->so_error) {
5277 			error = so->so_error;
5278 			if ((in_flags & MSG_PEEK) == 0)
5279 				so->so_error = 0;
5280 			goto out;
5281 		} else {
5282 			if (so->so_rcv.sb_cc == 0) {
5283 				/* indicate EOF */
5284 				error = 0;
5285 				goto out;
5286 			}
5287 		}
5288 	}
5289 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5290 		/* we need to wait for data */
5291 		if ((so->so_rcv.sb_cc == 0) &&
5292 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5293 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5294 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5295 				/*
5296 				 * For active open side clear flags for
5297 				 * re-use passive open is blocked by
5298 				 * connect.
5299 				 */
5300 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5301 					/*
5302 					 * You were aborted, passive side
5303 					 * always hits here
5304 					 */
5305 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5306 					error = ECONNRESET;
5307 				}
5308 				so->so_state &= ~(SS_ISCONNECTING |
5309 				    SS_ISDISCONNECTING |
5310 				    SS_ISCONFIRMING |
5311 				    SS_ISCONNECTED);
5312 				if (error == 0) {
5313 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5314 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5315 						error = ENOTCONN;
5316 					}
5317 				}
5318 				goto out;
5319 			}
5320 		}
5321 		error = sbwait(&so->so_rcv);
5322 		if (error) {
5323 			goto out;
5324 		}
5325 		held_length = 0;
5326 		goto restart_nosblocks;
5327 	} else if (so->so_rcv.sb_cc == 0) {
5328 		if (so->so_error) {
5329 			error = so->so_error;
5330 			if ((in_flags & MSG_PEEK) == 0)
5331 				so->so_error = 0;
5332 		} else {
5333 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5334 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5335 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5336 					/*
5337 					 * For active open side clear flags
5338 					 * for re-use passive open is
5339 					 * blocked by connect.
5340 					 */
5341 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5342 						/*
5343 						 * You were aborted, passive
5344 						 * side always hits here
5345 						 */
5346 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5347 						error = ECONNRESET;
5348 					}
5349 					so->so_state &= ~(SS_ISCONNECTING |
5350 					    SS_ISDISCONNECTING |
5351 					    SS_ISCONFIRMING |
5352 					    SS_ISCONNECTED);
5353 					if (error == 0) {
5354 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5355 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5356 							error = ENOTCONN;
5357 						}
5358 					}
5359 					goto out;
5360 				}
5361 			}
5362 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5363 			error = EWOULDBLOCK;
5364 		}
5365 		goto out;
5366 	}
5367 	if (hold_sblock == 1) {
5368 		SOCKBUF_UNLOCK(&so->so_rcv);
5369 		hold_sblock = 0;
5370 	}
5371 	/* we possibly have data we can read */
5372 	/* sa_ignore FREED_MEMORY */
5373 	control = TAILQ_FIRST(&inp->read_queue);
5374 	if (control == NULL) {
5375 		/*
5376 		 * This could be happening since the appender did the
5377 		 * increment but as not yet did the tailq insert onto the
5378 		 * read_queue
5379 		 */
5380 		if (hold_rlock == 0) {
5381 			SCTP_INP_READ_LOCK(inp);
5382 		}
5383 		control = TAILQ_FIRST(&inp->read_queue);
5384 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5385 #ifdef INVARIANTS
5386 			panic("Huh, its non zero and nothing on control?");
5387 #endif
5388 			so->so_rcv.sb_cc = 0;
5389 		}
5390 		SCTP_INP_READ_UNLOCK(inp);
5391 		hold_rlock = 0;
5392 		goto restart;
5393 	}
5394 	if ((control->length == 0) &&
5395 	    (control->do_not_ref_stcb)) {
5396 		/*
5397 		 * Clean up code for freeing assoc that left behind a
5398 		 * pdapi.. maybe a peer in EEOR that just closed after
5399 		 * sending and never indicated a EOR.
5400 		 */
5401 		if (hold_rlock == 0) {
5402 			hold_rlock = 1;
5403 			SCTP_INP_READ_LOCK(inp);
5404 		}
5405 		control->held_length = 0;
5406 		if (control->data) {
5407 			/* Hmm there is data here .. fix */
5408 			struct mbuf *m_tmp;
5409 			int cnt = 0;
5410 
5411 			m_tmp = control->data;
5412 			while (m_tmp) {
5413 				cnt += SCTP_BUF_LEN(m_tmp);
5414 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5415 					control->tail_mbuf = m_tmp;
5416 					control->end_added = 1;
5417 				}
5418 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5419 			}
5420 			control->length = cnt;
5421 		} else {
5422 			/* remove it */
5423 			TAILQ_REMOVE(&inp->read_queue, control, next);
5424 			/* Add back any hiddend data */
5425 			sctp_free_remote_addr(control->whoFrom);
5426 			sctp_free_a_readq(stcb, control);
5427 		}
5428 		if (hold_rlock) {
5429 			hold_rlock = 0;
5430 			SCTP_INP_READ_UNLOCK(inp);
5431 		}
5432 		goto restart;
5433 	}
5434 	if ((control->length == 0) &&
5435 	    (control->end_added == 1)) {
5436 		/*
5437 		 * Do we also need to check for (control->pdapi_aborted ==
5438 		 * 1)?
5439 		 */
5440 		if (hold_rlock == 0) {
5441 			hold_rlock = 1;
5442 			SCTP_INP_READ_LOCK(inp);
5443 		}
5444 		TAILQ_REMOVE(&inp->read_queue, control, next);
5445 		if (control->data) {
5446 #ifdef INVARIANTS
5447 			panic("control->data not null but control->length == 0");
5448 #else
5449 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5450 			sctp_m_freem(control->data);
5451 			control->data = NULL;
5452 #endif
5453 		}
5454 		if (control->aux_data) {
5455 			sctp_m_free(control->aux_data);
5456 			control->aux_data = NULL;
5457 		}
5458 		sctp_free_remote_addr(control->whoFrom);
5459 		sctp_free_a_readq(stcb, control);
5460 		if (hold_rlock) {
5461 			hold_rlock = 0;
5462 			SCTP_INP_READ_UNLOCK(inp);
5463 		}
5464 		goto restart;
5465 	}
5466 	if (control->length == 0) {
5467 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5468 		    (filling_sinfo)) {
5469 			/* find a more suitable one then this */
5470 			ctl = TAILQ_NEXT(control, next);
5471 			while (ctl) {
5472 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5473 				    (ctl->some_taken ||
5474 				    (ctl->spec_flags & M_NOTIFICATION) ||
5475 				    ((ctl->do_not_ref_stcb == 0) &&
5476 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5477 				    ) {
5478 					/*-
5479 					 * If we have a different TCB next, and there is data
5480 					 * present. If we have already taken some (pdapi), OR we can
5481 					 * ref the tcb and no delivery as started on this stream, we
5482 					 * take it. Note we allow a notification on a different
5483 					 * assoc to be delivered..
5484 					 */
5485 					control = ctl;
5486 					goto found_one;
5487 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5488 					    (ctl->length) &&
5489 					    ((ctl->some_taken) ||
5490 					    ((ctl->do_not_ref_stcb == 0) &&
5491 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5492 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5493 					/*-
5494 					 * If we have the same tcb, and there is data present, and we
5495 					 * have the strm interleave feature present. Then if we have
5496 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5497 					 * not started a delivery for this stream, we can take it.
5498 					 * Note we do NOT allow a notificaiton on the same assoc to
5499 					 * be delivered.
5500 					 */
5501 					control = ctl;
5502 					goto found_one;
5503 				}
5504 				ctl = TAILQ_NEXT(ctl, next);
5505 			}
5506 		}
5507 		/*
5508 		 * if we reach here, not suitable replacement is available
5509 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5510 		 * into the our held count, and its time to sleep again.
5511 		 */
5512 		held_length = so->so_rcv.sb_cc;
5513 		control->held_length = so->so_rcv.sb_cc;
5514 		goto restart;
5515 	}
5516 	/* Clear the held length since there is something to read */
5517 	control->held_length = 0;
5518 	if (hold_rlock) {
5519 		SCTP_INP_READ_UNLOCK(inp);
5520 		hold_rlock = 0;
5521 	}
5522 found_one:
5523 	/*
5524 	 * If we reach here, control has a some data for us to read off.
5525 	 * Note that stcb COULD be NULL.
5526 	 */
5527 	control->some_taken++;
5528 	if (hold_sblock) {
5529 		SOCKBUF_UNLOCK(&so->so_rcv);
5530 		hold_sblock = 0;
5531 	}
5532 	stcb = control->stcb;
5533 	if (stcb) {
5534 		if ((control->do_not_ref_stcb == 0) &&
5535 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5536 			if (freecnt_applied == 0)
5537 				stcb = NULL;
5538 		} else if (control->do_not_ref_stcb == 0) {
5539 			/* you can't free it on me please */
5540 			/*
5541 			 * The lock on the socket buffer protects us so the
5542 			 * free code will stop. But since we used the
5543 			 * socketbuf lock and the sender uses the tcb_lock
5544 			 * to increment, we need to use the atomic add to
5545 			 * the refcnt
5546 			 */
5547 			if (freecnt_applied) {
5548 #ifdef INVARIANTS
5549 				panic("refcnt already incremented");
5550 #else
5551 				SCTP_PRINTF("refcnt already incremented?\n");
5552 #endif
5553 			} else {
5554 				atomic_add_int(&stcb->asoc.refcnt, 1);
5555 				freecnt_applied = 1;
5556 			}
5557 			/*
5558 			 * Setup to remember how much we have not yet told
5559 			 * the peer our rwnd has opened up. Note we grab the
5560 			 * value from the tcb from last time. Note too that
5561 			 * sack sending clears this when a sack is sent,
5562 			 * which is fine. Once we hit the rwnd_req, we then
5563 			 * will go to the sctp_user_rcvd() that will not
5564 			 * lock until it KNOWs it MUST send a WUP-SACK.
5565 			 */
5566 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5567 			stcb->freed_by_sorcv_sincelast = 0;
5568 		}
5569 	}
5570 	if (stcb &&
5571 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5572 	    control->do_not_ref_stcb == 0) {
5573 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5574 	}
5575 	/* First lets get off the sinfo and sockaddr info */
5576 	if ((sinfo) && filling_sinfo) {
5577 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5578 		nxt = TAILQ_NEXT(control, next);
5579 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5580 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5581 			struct sctp_extrcvinfo *s_extra;
5582 
5583 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5584 			if ((nxt) &&
5585 			    (nxt->length)) {
5586 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5587 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5588 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5589 				}
5590 				if (nxt->spec_flags & M_NOTIFICATION) {
5591 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5592 				}
5593 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5594 				s_extra->sreinfo_next_length = nxt->length;
5595 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5596 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5597 				if (nxt->tail_mbuf != NULL) {
5598 					if (nxt->end_added) {
5599 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5600 					}
5601 				}
5602 			} else {
5603 				/*
5604 				 * we explicitly 0 this, since the memcpy
5605 				 * got some other things beyond the older
5606 				 * sinfo_ that is on the control's structure
5607 				 * :-D
5608 				 */
5609 				nxt = NULL;
5610 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5611 				s_extra->sreinfo_next_aid = 0;
5612 				s_extra->sreinfo_next_length = 0;
5613 				s_extra->sreinfo_next_ppid = 0;
5614 				s_extra->sreinfo_next_stream = 0;
5615 			}
5616 		}
5617 		/*
5618 		 * update off the real current cum-ack, if we have an stcb.
5619 		 */
5620 		if ((control->do_not_ref_stcb == 0) && stcb)
5621 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5622 		/*
5623 		 * mask off the high bits, we keep the actual chunk bits in
5624 		 * there.
5625 		 */
5626 		sinfo->sinfo_flags &= 0x00ff;
5627 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5628 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5629 		}
5630 	}
5631 #ifdef SCTP_ASOCLOG_OF_TSNS
5632 	{
5633 		int index, newindex;
5634 		struct sctp_pcbtsn_rlog *entry;
5635 
5636 		do {
5637 			index = inp->readlog_index;
5638 			newindex = index + 1;
5639 			if (newindex >= SCTP_READ_LOG_SIZE) {
5640 				newindex = 0;
5641 			}
5642 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5643 		entry = &inp->readlog[index];
5644 		entry->vtag = control->sinfo_assoc_id;
5645 		entry->strm = control->sinfo_stream;
5646 		entry->seq = control->sinfo_ssn;
5647 		entry->sz = control->length;
5648 		entry->flgs = control->sinfo_flags;
5649 	}
5650 #endif
5651 	if (fromlen && from) {
5652 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
5653 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5654 #ifdef INET6
5655 		case AF_INET6:
5656 			((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5657 			break;
5658 #endif
5659 #ifdef INET
5660 		case AF_INET:
5661 			((struct sockaddr_in *)from)->sin_port = control->port_from;
5662 			break;
5663 #endif
5664 		default:
5665 			break;
5666 		}
5667 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5668 
5669 #if defined(INET) && defined(INET6)
5670 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5671 		    (from->sa_family == AF_INET) &&
5672 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5673 			struct sockaddr_in *sin;
5674 			struct sockaddr_in6 sin6;
5675 
5676 			sin = (struct sockaddr_in *)from;
5677 			bzero(&sin6, sizeof(sin6));
5678 			sin6.sin6_family = AF_INET6;
5679 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5680 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5681 			bcopy(&sin->sin_addr,
5682 			    &sin6.sin6_addr.s6_addr32[3],
5683 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5684 			sin6.sin6_port = sin->sin_port;
5685 			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
5686 		}
5687 #endif
5688 #if defined(INET6)
5689 		{
5690 			struct sockaddr_in6 lsa6, *from6;
5691 
5692 			from6 = (struct sockaddr_in6 *)from;
5693 			sctp_recover_scope_mac(from6, (&lsa6));
5694 		}
5695 #endif
5696 	}
5697 	/* now copy out what data we can */
5698 	if (mp == NULL) {
5699 		/* copy out each mbuf in the chain up to length */
5700 get_more_data:
5701 		m = control->data;
5702 		while (m) {
5703 			/* Move out all we can */
5704 			cp_len = (int)uio->uio_resid;
5705 			my_len = (int)SCTP_BUF_LEN(m);
5706 			if (cp_len > my_len) {
5707 				/* not enough in this buf */
5708 				cp_len = my_len;
5709 			}
5710 			if (hold_rlock) {
5711 				SCTP_INP_READ_UNLOCK(inp);
5712 				hold_rlock = 0;
5713 			}
5714 			if (cp_len > 0)
5715 				error = uiomove(mtod(m, char *), cp_len, uio);
5716 			/* re-read */
5717 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5718 				goto release;
5719 			}
5720 			if ((control->do_not_ref_stcb == 0) && stcb &&
5721 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5722 				no_rcv_needed = 1;
5723 			}
5724 			if (error) {
5725 				/* error we are out of here */
5726 				goto release;
5727 			}
5728 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5729 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5730 			    ((control->end_added == 0) ||
5731 			    (control->end_added &&
5732 			    (TAILQ_NEXT(control, next) == NULL)))
5733 			    ) {
5734 				SCTP_INP_READ_LOCK(inp);
5735 				hold_rlock = 1;
5736 			}
5737 			if (cp_len == SCTP_BUF_LEN(m)) {
5738 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5739 				    (control->end_added)) {
5740 					out_flags |= MSG_EOR;
5741 					if ((control->do_not_ref_stcb == 0) &&
5742 					    (control->stcb != NULL) &&
5743 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5744 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5745 				}
5746 				if (control->spec_flags & M_NOTIFICATION) {
5747 					out_flags |= MSG_NOTIFICATION;
5748 				}
5749 				/* we ate up the mbuf */
5750 				if (in_flags & MSG_PEEK) {
5751 					/* just looking */
5752 					m = SCTP_BUF_NEXT(m);
5753 					copied_so_far += cp_len;
5754 				} else {
5755 					/* dispose of the mbuf */
5756 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5757 						sctp_sblog(&so->so_rcv,
5758 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5759 					}
5760 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5761 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5762 						sctp_sblog(&so->so_rcv,
5763 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5764 					}
5765 					copied_so_far += cp_len;
5766 					freed_so_far += cp_len;
5767 					freed_so_far += MSIZE;
5768 					atomic_subtract_int(&control->length, cp_len);
5769 					control->data = sctp_m_free(m);
5770 					m = control->data;
5771 					/*
5772 					 * been through it all, must hold sb
5773 					 * lock ok to null tail
5774 					 */
5775 					if (control->data == NULL) {
5776 #ifdef INVARIANTS
5777 						if ((control->end_added == 0) ||
5778 						    (TAILQ_NEXT(control, next) == NULL)) {
5779 							/*
5780 							 * If the end is not
5781 							 * added, OR the
5782 							 * next is NOT null
5783 							 * we MUST have the
5784 							 * lock.
5785 							 */
5786 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5787 								panic("Hmm we don't own the lock?");
5788 							}
5789 						}
5790 #endif
5791 						control->tail_mbuf = NULL;
5792 #ifdef INVARIANTS
5793 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5794 							panic("end_added, nothing left and no MSG_EOR");
5795 						}
5796 #endif
5797 					}
5798 				}
5799 			} else {
5800 				/* Do we need to trim the mbuf? */
5801 				if (control->spec_flags & M_NOTIFICATION) {
5802 					out_flags |= MSG_NOTIFICATION;
5803 				}
5804 				if ((in_flags & MSG_PEEK) == 0) {
5805 					SCTP_BUF_RESV_UF(m, cp_len);
5806 					SCTP_BUF_LEN(m) -= cp_len;
5807 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5808 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5809 					}
5810 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5811 					if ((control->do_not_ref_stcb == 0) &&
5812 					    stcb) {
5813 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5814 					}
5815 					copied_so_far += cp_len;
5816 					freed_so_far += cp_len;
5817 					freed_so_far += MSIZE;
5818 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5819 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5820 						    SCTP_LOG_SBRESULT, 0);
5821 					}
5822 					atomic_subtract_int(&control->length, cp_len);
5823 				} else {
5824 					copied_so_far += cp_len;
5825 				}
5826 			}
5827 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5828 				break;
5829 			}
5830 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5831 			    (control->do_not_ref_stcb == 0) &&
5832 			    (freed_so_far >= rwnd_req)) {
5833 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5834 			}
5835 		}		/* end while(m) */
5836 		/*
5837 		 * At this point we have looked at it all and we either have
5838 		 * a MSG_EOR/or read all the user wants... <OR>
5839 		 * control->length == 0.
5840 		 */
5841 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5842 			/* we are done with this control */
5843 			if (control->length == 0) {
5844 				if (control->data) {
5845 #ifdef INVARIANTS
5846 					panic("control->data not null at read eor?");
5847 #else
5848 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5849 					sctp_m_freem(control->data);
5850 					control->data = NULL;
5851 #endif
5852 				}
5853 		done_with_control:
5854 				if (TAILQ_NEXT(control, next) == NULL) {
5855 					/*
5856 					 * If we don't have a next we need a
5857 					 * lock, if there is a next
5858 					 * interrupt is filling ahead of us
5859 					 * and we don't need a lock to
5860 					 * remove this guy (which is the
5861 					 * head of the queue).
5862 					 */
5863 					if (hold_rlock == 0) {
5864 						SCTP_INP_READ_LOCK(inp);
5865 						hold_rlock = 1;
5866 					}
5867 				}
5868 				TAILQ_REMOVE(&inp->read_queue, control, next);
5869 				/* Add back any hiddend data */
5870 				if (control->held_length) {
5871 					held_length = 0;
5872 					control->held_length = 0;
5873 					wakeup_read_socket = 1;
5874 				}
5875 				if (control->aux_data) {
5876 					sctp_m_free(control->aux_data);
5877 					control->aux_data = NULL;
5878 				}
5879 				no_rcv_needed = control->do_not_ref_stcb;
5880 				sctp_free_remote_addr(control->whoFrom);
5881 				control->data = NULL;
5882 				sctp_free_a_readq(stcb, control);
5883 				control = NULL;
5884 				if ((freed_so_far >= rwnd_req) &&
5885 				    (no_rcv_needed == 0))
5886 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5887 
5888 			} else {
5889 				/*
5890 				 * The user did not read all of this
5891 				 * message, turn off the returned MSG_EOR
5892 				 * since we are leaving more behind on the
5893 				 * control to read.
5894 				 */
5895 #ifdef INVARIANTS
5896 				if (control->end_added &&
5897 				    (control->data == NULL) &&
5898 				    (control->tail_mbuf == NULL)) {
5899 					panic("Gak, control->length is corrupt?");
5900 				}
5901 #endif
5902 				no_rcv_needed = control->do_not_ref_stcb;
5903 				out_flags &= ~MSG_EOR;
5904 			}
5905 		}
5906 		if (out_flags & MSG_EOR) {
5907 			goto release;
5908 		}
5909 		if ((uio->uio_resid == 0) ||
5910 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5911 		    ) {
5912 			goto release;
5913 		}
5914 		/*
5915 		 * If I hit here the receiver wants more and this message is
5916 		 * NOT done (pd-api). So two questions. Can we block? if not
5917 		 * we are done. Did the user NOT set MSG_WAITALL?
5918 		 */
5919 		if (block_allowed == 0) {
5920 			goto release;
5921 		}
5922 		/*
5923 		 * We need to wait for more data a few things: - We don't
5924 		 * sbunlock() so we don't get someone else reading. - We
5925 		 * must be sure to account for the case where what is added
5926 		 * is NOT to our control when we wakeup.
5927 		 */
5928 
5929 		/*
5930 		 * Do we need to tell the transport a rwnd update might be
5931 		 * needed before we go to sleep?
5932 		 */
5933 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5934 		    ((freed_so_far >= rwnd_req) &&
5935 		    (control->do_not_ref_stcb == 0) &&
5936 		    (no_rcv_needed == 0))) {
5937 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5938 		}
5939 wait_some_more:
5940 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5941 			goto release;
5942 		}
5943 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5944 			goto release;
5945 
5946 		if (hold_rlock == 1) {
5947 			SCTP_INP_READ_UNLOCK(inp);
5948 			hold_rlock = 0;
5949 		}
5950 		if (hold_sblock == 0) {
5951 			SOCKBUF_LOCK(&so->so_rcv);
5952 			hold_sblock = 1;
5953 		}
5954 		if ((copied_so_far) && (control->length == 0) &&
5955 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5956 			goto release;
5957 		}
5958 		if (so->so_rcv.sb_cc <= control->held_length) {
5959 			error = sbwait(&so->so_rcv);
5960 			if (error) {
5961 				goto release;
5962 			}
5963 			control->held_length = 0;
5964 		}
5965 		if (hold_sblock) {
5966 			SOCKBUF_UNLOCK(&so->so_rcv);
5967 			hold_sblock = 0;
5968 		}
5969 		if (control->length == 0) {
5970 			/* still nothing here */
5971 			if (control->end_added == 1) {
5972 				/* he aborted, or is done i.e.did a shutdown */
5973 				out_flags |= MSG_EOR;
5974 				if (control->pdapi_aborted) {
5975 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5976 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5977 
5978 					out_flags |= MSG_TRUNC;
5979 				} else {
5980 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5981 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5982 				}
5983 				goto done_with_control;
5984 			}
5985 			if (so->so_rcv.sb_cc > held_length) {
5986 				control->held_length = so->so_rcv.sb_cc;
5987 				held_length = 0;
5988 			}
5989 			goto wait_some_more;
5990 		} else if (control->data == NULL) {
5991 			/*
5992 			 * we must re-sync since data is probably being
5993 			 * added
5994 			 */
5995 			SCTP_INP_READ_LOCK(inp);
5996 			if ((control->length > 0) && (control->data == NULL)) {
5997 				/*
5998 				 * big trouble.. we have the lock and its
5999 				 * corrupt?
6000 				 */
6001 #ifdef INVARIANTS
6002 				panic("Impossible data==NULL length !=0");
6003 #endif
6004 				out_flags |= MSG_EOR;
6005 				out_flags |= MSG_TRUNC;
6006 				control->length = 0;
6007 				SCTP_INP_READ_UNLOCK(inp);
6008 				goto done_with_control;
6009 			}
6010 			SCTP_INP_READ_UNLOCK(inp);
6011 			/* We will fall around to get more data */
6012 		}
6013 		goto get_more_data;
6014 	} else {
6015 		/*-
6016 		 * Give caller back the mbuf chain,
6017 		 * store in uio_resid the length
6018 		 */
6019 		wakeup_read_socket = 0;
6020 		if ((control->end_added == 0) ||
6021 		    (TAILQ_NEXT(control, next) == NULL)) {
6022 			/* Need to get rlock */
6023 			if (hold_rlock == 0) {
6024 				SCTP_INP_READ_LOCK(inp);
6025 				hold_rlock = 1;
6026 			}
6027 		}
6028 		if (control->end_added) {
6029 			out_flags |= MSG_EOR;
6030 			if ((control->do_not_ref_stcb == 0) &&
6031 			    (control->stcb != NULL) &&
6032 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6033 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6034 		}
6035 		if (control->spec_flags & M_NOTIFICATION) {
6036 			out_flags |= MSG_NOTIFICATION;
6037 		}
6038 		uio->uio_resid = control->length;
6039 		*mp = control->data;
6040 		m = control->data;
6041 		while (m) {
6042 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6043 				sctp_sblog(&so->so_rcv,
6044 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6045 			}
6046 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6047 			freed_so_far += SCTP_BUF_LEN(m);
6048 			freed_so_far += MSIZE;
6049 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6050 				sctp_sblog(&so->so_rcv,
6051 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6052 			}
6053 			m = SCTP_BUF_NEXT(m);
6054 		}
6055 		control->data = control->tail_mbuf = NULL;
6056 		control->length = 0;
6057 		if (out_flags & MSG_EOR) {
6058 			/* Done with this control */
6059 			goto done_with_control;
6060 		}
6061 	}
6062 release:
6063 	if (hold_rlock == 1) {
6064 		SCTP_INP_READ_UNLOCK(inp);
6065 		hold_rlock = 0;
6066 	}
6067 	if (hold_sblock == 1) {
6068 		SOCKBUF_UNLOCK(&so->so_rcv);
6069 		hold_sblock = 0;
6070 	}
6071 	sbunlock(&so->so_rcv);
6072 	sockbuf_lock = 0;
6073 
6074 release_unlocked:
6075 	if (hold_sblock) {
6076 		SOCKBUF_UNLOCK(&so->so_rcv);
6077 		hold_sblock = 0;
6078 	}
6079 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6080 		if ((freed_so_far >= rwnd_req) &&
6081 		    (control && (control->do_not_ref_stcb == 0)) &&
6082 		    (no_rcv_needed == 0))
6083 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6084 	}
6085 out:
6086 	if (msg_flags) {
6087 		*msg_flags = out_flags;
6088 	}
6089 	if (((out_flags & MSG_EOR) == 0) &&
6090 	    ((in_flags & MSG_PEEK) == 0) &&
6091 	    (sinfo) &&
6092 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6093 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6094 		struct sctp_extrcvinfo *s_extra;
6095 
6096 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6097 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6098 	}
6099 	if (hold_rlock == 1) {
6100 		SCTP_INP_READ_UNLOCK(inp);
6101 	}
6102 	if (hold_sblock) {
6103 		SOCKBUF_UNLOCK(&so->so_rcv);
6104 	}
6105 	if (sockbuf_lock) {
6106 		sbunlock(&so->so_rcv);
6107 	}
6108 	if (freecnt_applied) {
6109 		/*
6110 		 * The lock on the socket buffer protects us so the free
6111 		 * code will stop. But since we used the socketbuf lock and
6112 		 * the sender uses the tcb_lock to increment, we need to use
6113 		 * the atomic add to the refcnt.
6114 		 */
6115 		if (stcb == NULL) {
6116 #ifdef INVARIANTS
6117 			panic("stcb for refcnt has gone NULL?");
6118 			goto stage_left;
6119 #else
6120 			goto stage_left;
6121 #endif
6122 		}
6123 		atomic_add_int(&stcb->asoc.refcnt, -1);
6124 		/* Save the value back for next time */
6125 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6126 	}
6127 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6128 		if (stcb) {
6129 			sctp_misc_ints(SCTP_SORECV_DONE,
6130 			    freed_so_far,
6131 			    ((uio) ? (slen - uio->uio_resid) : slen),
6132 			    stcb->asoc.my_rwnd,
6133 			    so->so_rcv.sb_cc);
6134 		} else {
6135 			sctp_misc_ints(SCTP_SORECV_DONE,
6136 			    freed_so_far,
6137 			    ((uio) ? (slen - uio->uio_resid) : slen),
6138 			    0,
6139 			    so->so_rcv.sb_cc);
6140 		}
6141 	}
6142 stage_left:
6143 	if (wakeup_read_socket) {
6144 		sctp_sorwakeup(inp, so);
6145 	}
6146 	return (error);
6147 }
6148 
6149 
6150 #ifdef SCTP_MBUF_LOGGING
6151 struct mbuf *
6152 sctp_m_free(struct mbuf *m)
6153 {
6154 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6155 		if (SCTP_BUF_IS_EXTENDED(m)) {
6156 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6157 		}
6158 	}
6159 	return (m_free(m));
6160 }
6161 
6162 void
6163 sctp_m_freem(struct mbuf *mb)
6164 {
6165 	while (mb != NULL)
6166 		mb = sctp_m_free(mb);
6167 }
6168 
6169 #endif
6170 
6171 int
6172 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6173 {
6174 	/*
6175 	 * Given a local address. For all associations that holds the
6176 	 * address, request a peer-set-primary.
6177 	 */
6178 	struct sctp_ifa *ifa;
6179 	struct sctp_laddr *wi;
6180 
6181 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6182 	if (ifa == NULL) {
6183 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6184 		return (EADDRNOTAVAIL);
6185 	}
6186 	/*
6187 	 * Now that we have the ifa we must awaken the iterator with this
6188 	 * message.
6189 	 */
6190 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6191 	if (wi == NULL) {
6192 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6193 		return (ENOMEM);
6194 	}
6195 	/* Now incr the count and int wi structure */
6196 	SCTP_INCR_LADDR_COUNT();
6197 	bzero(wi, sizeof(*wi));
6198 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6199 	wi->ifa = ifa;
6200 	wi->action = SCTP_SET_PRIM_ADDR;
6201 	atomic_add_int(&ifa->refcount, 1);
6202 
6203 	/* Now add it to the work queue */
6204 	SCTP_WQ_ADDR_LOCK();
6205 	/*
6206 	 * Should this really be a tailq? As it is we will process the
6207 	 * newest first :-0
6208 	 */
6209 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6210 	SCTP_WQ_ADDR_UNLOCK();
6211 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6212 	    (struct sctp_inpcb *)NULL,
6213 	    (struct sctp_tcb *)NULL,
6214 	    (struct sctp_nets *)NULL);
6215 	return (0);
6216 }
6217 
6218 
6219 int
6220 sctp_soreceive(struct socket *so,
6221     struct sockaddr **psa,
6222     struct uio *uio,
6223     struct mbuf **mp0,
6224     struct mbuf **controlp,
6225     int *flagsp)
6226 {
6227 	int error, fromlen;
6228 	uint8_t sockbuf[256];
6229 	struct sockaddr *from;
6230 	struct sctp_extrcvinfo sinfo;
6231 	int filling_sinfo = 1;
6232 	struct sctp_inpcb *inp;
6233 
6234 	inp = (struct sctp_inpcb *)so->so_pcb;
6235 	/* pickup the assoc we are reading from */
6236 	if (inp == NULL) {
6237 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6238 		return (EINVAL);
6239 	}
6240 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6241 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6242 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6243 	    (controlp == NULL)) {
6244 		/* user does not want the sndrcv ctl */
6245 		filling_sinfo = 0;
6246 	}
6247 	if (psa) {
6248 		from = (struct sockaddr *)sockbuf;
6249 		fromlen = sizeof(sockbuf);
6250 		from->sa_len = 0;
6251 	} else {
6252 		from = NULL;
6253 		fromlen = 0;
6254 	}
6255 
6256 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6257 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6258 	if ((controlp) && (filling_sinfo)) {
6259 		/* copy back the sinfo in a CMSG format */
6260 		if (filling_sinfo)
6261 			*controlp = sctp_build_ctl_nchunk(inp,
6262 			    (struct sctp_sndrcvinfo *)&sinfo);
6263 		else
6264 			*controlp = NULL;
6265 	}
6266 	if (psa) {
6267 		/* copy back the address info */
6268 		if (from && from->sa_len) {
6269 			*psa = sodupsockaddr(from, M_NOWAIT);
6270 		} else {
6271 			*psa = NULL;
6272 		}
6273 	}
6274 	return (error);
6275 }
6276 
6277 
6278 
6279 
6280 
6281 int
6282 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6283     int totaddr, int *error)
6284 {
6285 	int added = 0;
6286 	int i;
6287 	struct sctp_inpcb *inp;
6288 	struct sockaddr *sa;
6289 	size_t incr = 0;
6290 
6291 #ifdef INET
6292 	struct sockaddr_in *sin;
6293 
6294 #endif
6295 #ifdef INET6
6296 	struct sockaddr_in6 *sin6;
6297 
6298 #endif
6299 
6300 	sa = addr;
6301 	inp = stcb->sctp_ep;
6302 	*error = 0;
6303 	for (i = 0; i < totaddr; i++) {
6304 		switch (sa->sa_family) {
6305 #ifdef INET
6306 		case AF_INET:
6307 			incr = sizeof(struct sockaddr_in);
6308 			sin = (struct sockaddr_in *)sa;
6309 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6310 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6311 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6312 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6313 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6314 				*error = EINVAL;
6315 				goto out_now;
6316 			}
6317 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6318 				/* assoc gone no un-lock */
6319 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6320 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6321 				*error = ENOBUFS;
6322 				goto out_now;
6323 			}
6324 			added++;
6325 			break;
6326 #endif
6327 #ifdef INET6
6328 		case AF_INET6:
6329 			incr = sizeof(struct sockaddr_in6);
6330 			sin6 = (struct sockaddr_in6 *)sa;
6331 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6332 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6333 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6334 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6335 				*error = EINVAL;
6336 				goto out_now;
6337 			}
6338 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6339 				/* assoc gone no un-lock */
6340 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6341 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6342 				*error = ENOBUFS;
6343 				goto out_now;
6344 			}
6345 			added++;
6346 			break;
6347 #endif
6348 		default:
6349 			break;
6350 		}
6351 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6352 	}
6353 out_now:
6354 	return (added);
6355 }
6356 
6357 struct sctp_tcb *
6358 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6359     int *totaddr, int *num_v4, int *num_v6, int *error,
6360     int limit, int *bad_addr)
6361 {
6362 	struct sockaddr *sa;
6363 	struct sctp_tcb *stcb = NULL;
6364 	size_t incr, at, i;
6365 
6366 	at = incr = 0;
6367 	sa = addr;
6368 
6369 	*error = *num_v6 = *num_v4 = 0;
6370 	/* account and validate addresses */
6371 	for (i = 0; i < (size_t)*totaddr; i++) {
6372 		switch (sa->sa_family) {
6373 #ifdef INET
6374 		case AF_INET:
6375 			(*num_v4) += 1;
6376 			incr = sizeof(struct sockaddr_in);
6377 			if (sa->sa_len != incr) {
6378 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6379 				*error = EINVAL;
6380 				*bad_addr = 1;
6381 				return (NULL);
6382 			}
6383 			break;
6384 #endif
6385 #ifdef INET6
6386 		case AF_INET6:
6387 			{
6388 				struct sockaddr_in6 *sin6;
6389 
6390 				sin6 = (struct sockaddr_in6 *)sa;
6391 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6392 					/* Must be non-mapped for connectx */
6393 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6394 					*error = EINVAL;
6395 					*bad_addr = 1;
6396 					return (NULL);
6397 				}
6398 				(*num_v6) += 1;
6399 				incr = sizeof(struct sockaddr_in6);
6400 				if (sa->sa_len != incr) {
6401 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6402 					*error = EINVAL;
6403 					*bad_addr = 1;
6404 					return (NULL);
6405 				}
6406 				break;
6407 			}
6408 #endif
6409 		default:
6410 			*totaddr = i;
6411 			/* we are done */
6412 			break;
6413 		}
6414 		if (i == (size_t)*totaddr) {
6415 			break;
6416 		}
6417 		SCTP_INP_INCR_REF(inp);
6418 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6419 		if (stcb != NULL) {
6420 			/* Already have or am bring up an association */
6421 			return (stcb);
6422 		} else {
6423 			SCTP_INP_DECR_REF(inp);
6424 		}
6425 		if ((at + incr) > (size_t)limit) {
6426 			*totaddr = i;
6427 			break;
6428 		}
6429 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6430 	}
6431 	return ((struct sctp_tcb *)NULL);
6432 }
6433 
6434 /*
6435  * sctp_bindx(ADD) for one address.
6436  * assumes all arguments are valid/checked by caller.
6437  */
6438 void
6439 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6440     struct sockaddr *sa, sctp_assoc_t assoc_id,
6441     uint32_t vrf_id, int *error, void *p)
6442 {
6443 	struct sockaddr *addr_touse;
6444 
6445 #ifdef INET6
6446 	struct sockaddr_in sin;
6447 
6448 #endif
6449 
6450 	/* see if we're bound all already! */
6451 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6452 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6453 		*error = EINVAL;
6454 		return;
6455 	}
6456 	addr_touse = sa;
6457 #ifdef INET6
6458 	if (sa->sa_family == AF_INET6) {
6459 		struct sockaddr_in6 *sin6;
6460 
6461 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6462 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6463 			*error = EINVAL;
6464 			return;
6465 		}
6466 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6467 			/* can only bind v6 on PF_INET6 sockets */
6468 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6469 			*error = EINVAL;
6470 			return;
6471 		}
6472 		sin6 = (struct sockaddr_in6 *)addr_touse;
6473 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6474 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6475 			    SCTP_IPV6_V6ONLY(inp)) {
6476 				/* can't bind v4-mapped on PF_INET sockets */
6477 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6478 				*error = EINVAL;
6479 				return;
6480 			}
6481 			in6_sin6_2_sin(&sin, sin6);
6482 			addr_touse = (struct sockaddr *)&sin;
6483 		}
6484 	}
6485 #endif
6486 #ifdef INET
6487 	if (sa->sa_family == AF_INET) {
6488 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6489 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6490 			*error = EINVAL;
6491 			return;
6492 		}
6493 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6494 		    SCTP_IPV6_V6ONLY(inp)) {
6495 			/* can't bind v4 on PF_INET sockets */
6496 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6497 			*error = EINVAL;
6498 			return;
6499 		}
6500 	}
6501 #endif
6502 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6503 		if (p == NULL) {
6504 			/* Can't get proc for Net/Open BSD */
6505 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6506 			*error = EINVAL;
6507 			return;
6508 		}
6509 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6510 		return;
6511 	}
6512 	/*
6513 	 * No locks required here since bind and mgmt_ep_sa all do their own
6514 	 * locking. If we do something for the FIX: below we may need to
6515 	 * lock in that case.
6516 	 */
6517 	if (assoc_id == 0) {
6518 		/* add the address */
6519 		struct sctp_inpcb *lep;
6520 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6521 
6522 		/* validate the incoming port */
6523 		if ((lsin->sin_port != 0) &&
6524 		    (lsin->sin_port != inp->sctp_lport)) {
6525 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6526 			*error = EINVAL;
6527 			return;
6528 		} else {
6529 			/* user specified 0 port, set it to existing port */
6530 			lsin->sin_port = inp->sctp_lport;
6531 		}
6532 
6533 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6534 		if (lep != NULL) {
6535 			/*
6536 			 * We must decrement the refcount since we have the
6537 			 * ep already and are binding. No remove going on
6538 			 * here.
6539 			 */
6540 			SCTP_INP_DECR_REF(lep);
6541 		}
6542 		if (lep == inp) {
6543 			/* already bound to it.. ok */
6544 			return;
6545 		} else if (lep == NULL) {
6546 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6547 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6548 			    SCTP_ADD_IP_ADDRESS,
6549 			    vrf_id, NULL);
6550 		} else {
6551 			*error = EADDRINUSE;
6552 		}
6553 		if (*error)
6554 			return;
6555 	} else {
6556 		/*
6557 		 * FIX: decide whether we allow assoc based bindx
6558 		 */
6559 	}
6560 }
6561 
6562 /*
6563  * sctp_bindx(DELETE) for one address.
6564  * assumes all arguments are valid/checked by caller.
6565  */
6566 void
6567 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6568     struct sockaddr *sa, sctp_assoc_t assoc_id,
6569     uint32_t vrf_id, int *error)
6570 {
6571 	struct sockaddr *addr_touse;
6572 
6573 #ifdef INET6
6574 	struct sockaddr_in sin;
6575 
6576 #endif
6577 
6578 	/* see if we're bound all already! */
6579 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6580 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6581 		*error = EINVAL;
6582 		return;
6583 	}
6584 	addr_touse = sa;
6585 #if defined(INET6)
6586 	if (sa->sa_family == AF_INET6) {
6587 		struct sockaddr_in6 *sin6;
6588 
6589 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6590 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6591 			*error = EINVAL;
6592 			return;
6593 		}
6594 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6595 			/* can only bind v6 on PF_INET6 sockets */
6596 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6597 			*error = EINVAL;
6598 			return;
6599 		}
6600 		sin6 = (struct sockaddr_in6 *)addr_touse;
6601 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6602 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6603 			    SCTP_IPV6_V6ONLY(inp)) {
6604 				/* can't bind mapped-v4 on PF_INET sockets */
6605 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6606 				*error = EINVAL;
6607 				return;
6608 			}
6609 			in6_sin6_2_sin(&sin, sin6);
6610 			addr_touse = (struct sockaddr *)&sin;
6611 		}
6612 	}
6613 #endif
6614 #ifdef INET
6615 	if (sa->sa_family == AF_INET) {
6616 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6617 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6618 			*error = EINVAL;
6619 			return;
6620 		}
6621 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6622 		    SCTP_IPV6_V6ONLY(inp)) {
6623 			/* can't bind v4 on PF_INET sockets */
6624 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6625 			*error = EINVAL;
6626 			return;
6627 		}
6628 	}
6629 #endif
6630 	/*
6631 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6632 	 * below is ever changed we may need to lock before calling
6633 	 * association level binding.
6634 	 */
6635 	if (assoc_id == 0) {
6636 		/* delete the address */
6637 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6638 		    SCTP_DEL_IP_ADDRESS,
6639 		    vrf_id, NULL);
6640 	} else {
6641 		/*
6642 		 * FIX: decide whether we allow assoc based bindx
6643 		 */
6644 	}
6645 }
6646 
6647 /*
6648  * returns the valid local address count for an assoc, taking into account
6649  * all scoping rules
6650  */
6651 int
6652 sctp_local_addr_count(struct sctp_tcb *stcb)
6653 {
6654 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6655 	int ipv4_addr_legal, ipv6_addr_legal;
6656 	struct sctp_vrf *vrf;
6657 	struct sctp_ifn *sctp_ifn;
6658 	struct sctp_ifa *sctp_ifa;
6659 	int count = 0;
6660 
6661 	/* Turn on all the appropriate scopes */
6662 	loopback_scope = stcb->asoc.loopback_scope;
6663 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6664 	local_scope = stcb->asoc.local_scope;
6665 	site_scope = stcb->asoc.site_scope;
6666 	ipv4_addr_legal = ipv6_addr_legal = 0;
6667 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6668 		ipv6_addr_legal = 1;
6669 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6670 			ipv4_addr_legal = 1;
6671 		}
6672 	} else {
6673 		ipv4_addr_legal = 1;
6674 	}
6675 
6676 	SCTP_IPI_ADDR_RLOCK();
6677 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6678 	if (vrf == NULL) {
6679 		/* no vrf, no addresses */
6680 		SCTP_IPI_ADDR_RUNLOCK();
6681 		return (0);
6682 	}
6683 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6684 		/*
6685 		 * bound all case: go through all ifns on the vrf
6686 		 */
6687 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6688 			if ((loopback_scope == 0) &&
6689 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6690 				continue;
6691 			}
6692 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6693 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6694 					continue;
6695 				switch (sctp_ifa->address.sa.sa_family) {
6696 #ifdef INET
6697 				case AF_INET:
6698 					if (ipv4_addr_legal) {
6699 						struct sockaddr_in *sin;
6700 
6701 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6702 						if (sin->sin_addr.s_addr == 0) {
6703 							/*
6704 							 * skip unspecified
6705 							 * addrs
6706 							 */
6707 							continue;
6708 						}
6709 						if ((ipv4_local_scope == 0) &&
6710 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6711 							continue;
6712 						}
6713 						/* count this one */
6714 						count++;
6715 					} else {
6716 						continue;
6717 					}
6718 					break;
6719 #endif
6720 #ifdef INET6
6721 				case AF_INET6:
6722 					if (ipv6_addr_legal) {
6723 						struct sockaddr_in6 *sin6;
6724 
6725 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6726 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6727 							continue;
6728 						}
6729 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6730 							if (local_scope == 0)
6731 								continue;
6732 							if (sin6->sin6_scope_id == 0) {
6733 								if (sa6_recoverscope(sin6) != 0)
6734 									/*
6735 									 *
6736 									 * bad
6737 									 *
6738 									 * li
6739 									 * nk
6740 									 *
6741 									 * loc
6742 									 * al
6743 									 *
6744 									 * add
6745 									 * re
6746 									 * ss
6747 									 * */
6748 									continue;
6749 							}
6750 						}
6751 						if ((site_scope == 0) &&
6752 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6753 							continue;
6754 						}
6755 						/* count this one */
6756 						count++;
6757 					}
6758 					break;
6759 #endif
6760 				default:
6761 					/* TSNH */
6762 					break;
6763 				}
6764 			}
6765 		}
6766 	} else {
6767 		/*
6768 		 * subset bound case
6769 		 */
6770 		struct sctp_laddr *laddr;
6771 
6772 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6773 		    sctp_nxt_addr) {
6774 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6775 				continue;
6776 			}
6777 			/* count this one */
6778 			count++;
6779 		}
6780 	}
6781 	SCTP_IPI_ADDR_RUNLOCK();
6782 	return (count);
6783 }
6784 
6785 #if defined(SCTP_LOCAL_TRACE_BUF)
6786 
6787 void
6788 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6789 {
6790 	uint32_t saveindex, newindex;
6791 
6792 	do {
6793 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6794 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6795 			newindex = 1;
6796 		} else {
6797 			newindex = saveindex + 1;
6798 		}
6799 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6800 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6801 		saveindex = 0;
6802 	}
6803 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6804 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6805 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6806 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6807 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6808 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6809 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6810 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6811 }
6812 
6813 #endif
6814 /* XXX: Remove the #ifdef after tunneling over IPv6 works also on FreeBSD. */
6815 #ifdef INET
6816 /* We will need to add support
6817  * to bind the ports and such here
6818  * so we can do UDP tunneling. In
6819  * the mean-time, we return error
6820  */
6821 #include <netinet/udp.h>
6822 #include <netinet/udp_var.h>
6823 #include <sys/proc.h>
6824 #ifdef INET6
6825 #include <netinet6/sctp6_var.h>
6826 #endif
6827 
6828 static void
6829 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6830 {
6831 	struct ip *iph;
6832 	struct mbuf *sp, *last;
6833 	struct udphdr *uhdr;
6834 	uint16_t port = 0;
6835 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6836 
6837 	/*
6838 	 * Split out the mbuf chain. Leave the IP header in m, place the
6839 	 * rest in the sp.
6840 	 */
6841 	if ((m->m_flags & M_PKTHDR) == 0) {
6842 		/* Can't handle one that is not a pkt hdr */
6843 		goto out;
6844 	}
6845 	/* pull the src port */
6846 	iph = mtod(m, struct ip *);
6847 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6848 
6849 	port = uhdr->uh_sport;
6850 	sp = m_split(m, off, M_DONTWAIT);
6851 	if (sp == NULL) {
6852 		/* Gak, drop packet, we can't do a split */
6853 		goto out;
6854 	}
6855 	if (sp->m_pkthdr.len < header_size) {
6856 		/* Gak, packet can't have an SCTP header in it - to small */
6857 		m_freem(sp);
6858 		goto out;
6859 	}
6860 	/* ok now pull up the UDP header and SCTP header together */
6861 	sp = m_pullup(sp, header_size);
6862 	if (sp == NULL) {
6863 		/* Gak pullup failed */
6864 		goto out;
6865 	}
6866 	/* trim out the UDP header */
6867 	m_adj(sp, sizeof(struct udphdr));
6868 
6869 	/* Now reconstruct the mbuf chain */
6870 	/* 1) find last one */
6871 	last = m;
6872 	while (last->m_next != NULL) {
6873 		last = last->m_next;
6874 	}
6875 	last->m_next = sp;
6876 	m->m_pkthdr.len += sp->m_pkthdr.len;
6877 	last = m;
6878 	while (last != NULL) {
6879 		last = last->m_next;
6880 	}
6881 	/* Now its ready for sctp_input or sctp6_input */
6882 	iph = mtod(m, struct ip *);
6883 	switch (iph->ip_v) {
6884 #ifdef INET
6885 	case IPVERSION:
6886 		{
6887 			uint16_t len;
6888 
6889 			/* its IPv4 */
6890 			len = SCTP_GET_IPV4_LENGTH(iph);
6891 			len -= sizeof(struct udphdr);
6892 			SCTP_GET_IPV4_LENGTH(iph) = len;
6893 			sctp_input_with_port(m, off, port);
6894 			break;
6895 		}
6896 #endif
6897 #ifdef INET6
6898 	case IPV6_VERSION >> 4:
6899 		{
6900 			/* its IPv6 - NOT supported */
6901 			goto out;
6902 			break;
6903 
6904 		}
6905 #endif
6906 	default:
6907 		{
6908 			m_freem(m);
6909 			break;
6910 		}
6911 	}
6912 	return;
6913 out:
6914 	m_freem(m);
6915 }
6916 
6917 void
6918 sctp_over_udp_stop(void)
6919 {
6920 	struct socket *sop;
6921 
6922 	/*
6923 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6924 	 * for writting!
6925 	 */
6926 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6927 		/* Nothing to do */
6928 		return;
6929 	}
6930 	sop = SCTP_BASE_INFO(udp_tun_socket);
6931 	soclose(sop);
6932 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6933 }
6934 
6935 int
6936 sctp_over_udp_start(void)
6937 {
6938 	uint16_t port;
6939 	int ret;
6940 	struct sockaddr_in sin;
6941 	struct socket *sop = NULL;
6942 	struct thread *th;
6943 	struct ucred *cred;
6944 
6945 	/*
6946 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6947 	 * for writting!
6948 	 */
6949 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6950 	if (port == 0) {
6951 		/* Must have a port set */
6952 		return (EINVAL);
6953 	}
6954 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6955 		/* Already running -- must stop first */
6956 		return (EALREADY);
6957 	}
6958 	th = curthread;
6959 	cred = th->td_ucred;
6960 	if ((ret = socreate(PF_INET, &sop,
6961 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6962 		return (ret);
6963 	}
6964 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6965 	/* call the special UDP hook */
6966 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6967 	if (ret) {
6968 		goto exit_stage_left;
6969 	}
6970 	/* Ok we have a socket, bind it to the port */
6971 	memset(&sin, 0, sizeof(sin));
6972 	sin.sin_len = sizeof(sin);
6973 	sin.sin_family = AF_INET;
6974 	sin.sin_port = htons(port);
6975 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6976 	if (ret) {
6977 		/* Close up we cant get the port */
6978 exit_stage_left:
6979 		sctp_over_udp_stop();
6980 		return (ret);
6981 	}
6982 	/*
6983 	 * Ok we should now get UDP packets directly to our input routine
6984 	 * sctp_recv_upd_tunneled_packet().
6985 	 */
6986 	return (0);
6987 }
6988 
6989 #endif
6990