xref: /freebsd/sys/netinet/sctputil.c (revision 7750ad47a9a7dbc83f87158464170c8640723293)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_bsd_addr.h>
51 
52 
53 #ifndef KTR_SCTP
54 #define KTR_SCTP KTR_SUBSYS
55 #endif
56 
57 extern struct sctp_cc_functions sctp_cc_functions[];
58 extern struct sctp_ss_functions sctp_ss_functions[];
59 
60 void
61 sctp_sblog(struct sockbuf *sb,
62     struct sctp_tcb *stcb, int from, int incr)
63 {
64 	struct sctp_cwnd_log sctp_clog;
65 
66 	sctp_clog.x.sb.stcb = stcb;
67 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
68 	if (stcb)
69 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
70 	else
71 		sctp_clog.x.sb.stcb_sbcc = 0;
72 	sctp_clog.x.sb.incr = incr;
73 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
74 	    SCTP_LOG_EVENT_SB,
75 	    from,
76 	    sctp_clog.x.misc.log1,
77 	    sctp_clog.x.misc.log2,
78 	    sctp_clog.x.misc.log3,
79 	    sctp_clog.x.misc.log4);
80 }
81 
82 void
83 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
84 {
85 	struct sctp_cwnd_log sctp_clog;
86 
87 	sctp_clog.x.close.inp = (void *)inp;
88 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
89 	if (stcb) {
90 		sctp_clog.x.close.stcb = (void *)stcb;
91 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
92 	} else {
93 		sctp_clog.x.close.stcb = 0;
94 		sctp_clog.x.close.state = 0;
95 	}
96 	sctp_clog.x.close.loc = loc;
97 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
98 	    SCTP_LOG_EVENT_CLOSE,
99 	    0,
100 	    sctp_clog.x.misc.log1,
101 	    sctp_clog.x.misc.log2,
102 	    sctp_clog.x.misc.log3,
103 	    sctp_clog.x.misc.log4);
104 }
105 
106 
107 void
108 rto_logging(struct sctp_nets *net, int from)
109 {
110 	struct sctp_cwnd_log sctp_clog;
111 
112 	memset(&sctp_clog, 0, sizeof(sctp_clog));
113 	sctp_clog.x.rto.net = (void *)net;
114 	sctp_clog.x.rto.rtt = net->rtt / 1000;
115 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 	    SCTP_LOG_EVENT_RTT,
117 	    from,
118 	    sctp_clog.x.misc.log1,
119 	    sctp_clog.x.misc.log2,
120 	    sctp_clog.x.misc.log3,
121 	    sctp_clog.x.misc.log4);
122 }
123 
124 void
125 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
126 {
127 	struct sctp_cwnd_log sctp_clog;
128 
129 	sctp_clog.x.strlog.stcb = stcb;
130 	sctp_clog.x.strlog.n_tsn = tsn;
131 	sctp_clog.x.strlog.n_sseq = sseq;
132 	sctp_clog.x.strlog.e_tsn = 0;
133 	sctp_clog.x.strlog.e_sseq = 0;
134 	sctp_clog.x.strlog.strm = stream;
135 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
136 	    SCTP_LOG_EVENT_STRM,
137 	    from,
138 	    sctp_clog.x.misc.log1,
139 	    sctp_clog.x.misc.log2,
140 	    sctp_clog.x.misc.log3,
141 	    sctp_clog.x.misc.log4);
142 }
143 
144 void
145 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
146 {
147 	struct sctp_cwnd_log sctp_clog;
148 
149 	sctp_clog.x.nagle.stcb = (void *)stcb;
150 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
151 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
152 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
153 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
154 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
155 	    SCTP_LOG_EVENT_NAGLE,
156 	    action,
157 	    sctp_clog.x.misc.log1,
158 	    sctp_clog.x.misc.log2,
159 	    sctp_clog.x.misc.log3,
160 	    sctp_clog.x.misc.log4);
161 }
162 
163 void
164 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
165 {
166 	struct sctp_cwnd_log sctp_clog;
167 
168 	sctp_clog.x.sack.cumack = cumack;
169 	sctp_clog.x.sack.oldcumack = old_cumack;
170 	sctp_clog.x.sack.tsn = tsn;
171 	sctp_clog.x.sack.numGaps = gaps;
172 	sctp_clog.x.sack.numDups = dups;
173 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
174 	    SCTP_LOG_EVENT_SACK,
175 	    from,
176 	    sctp_clog.x.misc.log1,
177 	    sctp_clog.x.misc.log2,
178 	    sctp_clog.x.misc.log3,
179 	    sctp_clog.x.misc.log4);
180 }
181 
182 void
183 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
184 {
185 	struct sctp_cwnd_log sctp_clog;
186 
187 	memset(&sctp_clog, 0, sizeof(sctp_clog));
188 	sctp_clog.x.map.base = map;
189 	sctp_clog.x.map.cum = cum;
190 	sctp_clog.x.map.high = high;
191 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
192 	    SCTP_LOG_EVENT_MAP,
193 	    from,
194 	    sctp_clog.x.misc.log1,
195 	    sctp_clog.x.misc.log2,
196 	    sctp_clog.x.misc.log3,
197 	    sctp_clog.x.misc.log4);
198 }
199 
200 void
201 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
202     int from)
203 {
204 	struct sctp_cwnd_log sctp_clog;
205 
206 	memset(&sctp_clog, 0, sizeof(sctp_clog));
207 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
208 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
209 	sctp_clog.x.fr.tsn = tsn;
210 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
211 	    SCTP_LOG_EVENT_FR,
212 	    from,
213 	    sctp_clog.x.misc.log1,
214 	    sctp_clog.x.misc.log2,
215 	    sctp_clog.x.misc.log3,
216 	    sctp_clog.x.misc.log4);
217 }
218 
219 void
220 sctp_log_mb(struct mbuf *m, int from)
221 {
222 	struct sctp_cwnd_log sctp_clog;
223 
224 	sctp_clog.x.mb.mp = m;
225 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
226 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
227 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
228 	if (SCTP_BUF_IS_EXTENDED(m)) {
229 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
230 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
231 	} else {
232 		sctp_clog.x.mb.ext = 0;
233 		sctp_clog.x.mb.refcnt = 0;
234 	}
235 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
236 	    SCTP_LOG_EVENT_MBUF,
237 	    from,
238 	    sctp_clog.x.misc.log1,
239 	    sctp_clog.x.misc.log2,
240 	    sctp_clog.x.misc.log3,
241 	    sctp_clog.x.misc.log4);
242 }
243 
244 void
245 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
246     int from)
247 {
248 	struct sctp_cwnd_log sctp_clog;
249 
250 	if (control == NULL) {
251 		SCTP_PRINTF("Gak log of NULL?\n");
252 		return;
253 	}
254 	sctp_clog.x.strlog.stcb = control->stcb;
255 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
256 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
257 	sctp_clog.x.strlog.strm = control->sinfo_stream;
258 	if (poschk != NULL) {
259 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
260 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
261 	} else {
262 		sctp_clog.x.strlog.e_tsn = 0;
263 		sctp_clog.x.strlog.e_sseq = 0;
264 	}
265 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
266 	    SCTP_LOG_EVENT_STRM,
267 	    from,
268 	    sctp_clog.x.misc.log1,
269 	    sctp_clog.x.misc.log2,
270 	    sctp_clog.x.misc.log3,
271 	    sctp_clog.x.misc.log4);
272 }
273 
274 void
275 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
276 {
277 	struct sctp_cwnd_log sctp_clog;
278 
279 	sctp_clog.x.cwnd.net = net;
280 	if (stcb->asoc.send_queue_cnt > 255)
281 		sctp_clog.x.cwnd.cnt_in_send = 255;
282 	else
283 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
284 	if (stcb->asoc.stream_queue_cnt > 255)
285 		sctp_clog.x.cwnd.cnt_in_str = 255;
286 	else
287 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
288 
289 	if (net) {
290 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
291 		sctp_clog.x.cwnd.inflight = net->flight_size;
292 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
293 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
294 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
295 	}
296 	if (SCTP_CWNDLOG_PRESEND == from) {
297 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
298 	}
299 	sctp_clog.x.cwnd.cwnd_augment = augment;
300 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
301 	    SCTP_LOG_EVENT_CWND,
302 	    from,
303 	    sctp_clog.x.misc.log1,
304 	    sctp_clog.x.misc.log2,
305 	    sctp_clog.x.misc.log3,
306 	    sctp_clog.x.misc.log4);
307 }
308 
309 void
310 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
311 {
312 	struct sctp_cwnd_log sctp_clog;
313 
314 	memset(&sctp_clog, 0, sizeof(sctp_clog));
315 	if (inp) {
316 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
317 
318 	} else {
319 		sctp_clog.x.lock.sock = (void *)NULL;
320 	}
321 	sctp_clog.x.lock.inp = (void *)inp;
322 	if (stcb) {
323 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
324 	} else {
325 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
326 	}
327 	if (inp) {
328 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
329 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
330 	} else {
331 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
332 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
333 	}
334 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
335 	if (inp && (inp->sctp_socket)) {
336 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
337 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
338 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
339 	} else {
340 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
341 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
342 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
343 	}
344 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
345 	    SCTP_LOG_LOCK_EVENT,
346 	    from,
347 	    sctp_clog.x.misc.log1,
348 	    sctp_clog.x.misc.log2,
349 	    sctp_clog.x.misc.log3,
350 	    sctp_clog.x.misc.log4);
351 }
352 
353 void
354 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
355 {
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	sctp_clog.x.cwnd.net = net;
360 	sctp_clog.x.cwnd.cwnd_new_value = error;
361 	sctp_clog.x.cwnd.inflight = net->flight_size;
362 	sctp_clog.x.cwnd.cwnd_augment = burst;
363 	if (stcb->asoc.send_queue_cnt > 255)
364 		sctp_clog.x.cwnd.cnt_in_send = 255;
365 	else
366 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
367 	if (stcb->asoc.stream_queue_cnt > 255)
368 		sctp_clog.x.cwnd.cnt_in_str = 255;
369 	else
370 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
371 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
372 	    SCTP_LOG_EVENT_MAXBURST,
373 	    from,
374 	    sctp_clog.x.misc.log1,
375 	    sctp_clog.x.misc.log2,
376 	    sctp_clog.x.misc.log3,
377 	    sctp_clog.x.misc.log4);
378 }
379 
380 void
381 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
382 {
383 	struct sctp_cwnd_log sctp_clog;
384 
385 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
386 	sctp_clog.x.rwnd.send_size = snd_size;
387 	sctp_clog.x.rwnd.overhead = overhead;
388 	sctp_clog.x.rwnd.new_rwnd = 0;
389 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390 	    SCTP_LOG_EVENT_RWND,
391 	    from,
392 	    sctp_clog.x.misc.log1,
393 	    sctp_clog.x.misc.log2,
394 	    sctp_clog.x.misc.log3,
395 	    sctp_clog.x.misc.log4);
396 }
397 
398 void
399 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
400 {
401 	struct sctp_cwnd_log sctp_clog;
402 
403 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404 	sctp_clog.x.rwnd.send_size = flight_size;
405 	sctp_clog.x.rwnd.overhead = overhead;
406 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
407 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408 	    SCTP_LOG_EVENT_RWND,
409 	    from,
410 	    sctp_clog.x.misc.log1,
411 	    sctp_clog.x.misc.log2,
412 	    sctp_clog.x.misc.log3,
413 	    sctp_clog.x.misc.log4);
414 }
415 
416 void
417 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
418 {
419 	struct sctp_cwnd_log sctp_clog;
420 
421 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
422 	sctp_clog.x.mbcnt.size_change = book;
423 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
424 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
425 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426 	    SCTP_LOG_EVENT_MBCNT,
427 	    from,
428 	    sctp_clog.x.misc.log1,
429 	    sctp_clog.x.misc.log2,
430 	    sctp_clog.x.misc.log3,
431 	    sctp_clog.x.misc.log4);
432 }
433 
434 void
435 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
436 {
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_MISC_EVENT,
439 	    from,
440 	    a, b, c, d);
441 }
442 
443 void
444 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
445 {
446 	struct sctp_cwnd_log sctp_clog;
447 
448 	sctp_clog.x.wake.stcb = (void *)stcb;
449 	sctp_clog.x.wake.wake_cnt = wake_cnt;
450 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
451 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
452 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
453 
454 	if (stcb->asoc.stream_queue_cnt < 0xff)
455 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
456 	else
457 		sctp_clog.x.wake.stream_qcnt = 0xff;
458 
459 	if (stcb->asoc.chunks_on_out_queue < 0xff)
460 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
461 	else
462 		sctp_clog.x.wake.chunks_on_oque = 0xff;
463 
464 	sctp_clog.x.wake.sctpflags = 0;
465 	/* set in the defered mode stuff */
466 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
467 		sctp_clog.x.wake.sctpflags |= 1;
468 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
469 		sctp_clog.x.wake.sctpflags |= 2;
470 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
471 		sctp_clog.x.wake.sctpflags |= 4;
472 	/* what about the sb */
473 	if (stcb->sctp_socket) {
474 		struct socket *so = stcb->sctp_socket;
475 
476 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
477 	} else {
478 		sctp_clog.x.wake.sbflags = 0xff;
479 	}
480 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
481 	    SCTP_LOG_EVENT_WAKE,
482 	    from,
483 	    sctp_clog.x.misc.log1,
484 	    sctp_clog.x.misc.log2,
485 	    sctp_clog.x.misc.log3,
486 	    sctp_clog.x.misc.log4);
487 }
488 
489 void
490 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
491 {
492 	struct sctp_cwnd_log sctp_clog;
493 
494 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
495 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
496 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
497 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
498 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
499 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
500 	sctp_clog.x.blk.sndlen = sendlen;
501 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
502 	    SCTP_LOG_EVENT_BLOCK,
503 	    from,
504 	    sctp_clog.x.misc.log1,
505 	    sctp_clog.x.misc.log2,
506 	    sctp_clog.x.misc.log3,
507 	    sctp_clog.x.misc.log4);
508 }
509 
510 int
511 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
512 {
513 	/* May need to fix this if ktrdump does not work */
514 	return (0);
515 }
516 
517 #ifdef SCTP_AUDITING_ENABLED
518 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
519 static int sctp_audit_indx = 0;
520 
521 static
522 void
523 sctp_print_audit_report(void)
524 {
525 	int i;
526 	int cnt;
527 
528 	cnt = 0;
529 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
530 		if ((sctp_audit_data[i][0] == 0xe0) &&
531 		    (sctp_audit_data[i][1] == 0x01)) {
532 			cnt = 0;
533 			SCTP_PRINTF("\n");
534 		} else if (sctp_audit_data[i][0] == 0xf0) {
535 			cnt = 0;
536 			SCTP_PRINTF("\n");
537 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
538 		    (sctp_audit_data[i][1] == 0x01)) {
539 			SCTP_PRINTF("\n");
540 			cnt = 0;
541 		}
542 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
543 		    (uint32_t) sctp_audit_data[i][1]);
544 		cnt++;
545 		if ((cnt % 14) == 0)
546 			SCTP_PRINTF("\n");
547 	}
548 	for (i = 0; i < sctp_audit_indx; i++) {
549 		if ((sctp_audit_data[i][0] == 0xe0) &&
550 		    (sctp_audit_data[i][1] == 0x01)) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if (sctp_audit_data[i][0] == 0xf0) {
554 			cnt = 0;
555 			SCTP_PRINTF("\n");
556 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
557 		    (sctp_audit_data[i][1] == 0x01)) {
558 			SCTP_PRINTF("\n");
559 			cnt = 0;
560 		}
561 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562 		    (uint32_t) sctp_audit_data[i][1]);
563 		cnt++;
564 		if ((cnt % 14) == 0)
565 			SCTP_PRINTF("\n");
566 	}
567 	SCTP_PRINTF("\n");
568 }
569 
570 void
571 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
572     struct sctp_nets *net)
573 {
574 	int resend_cnt, tot_out, rep, tot_book_cnt;
575 	struct sctp_nets *lnet;
576 	struct sctp_tmit_chunk *chk;
577 
578 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
579 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
580 	sctp_audit_indx++;
581 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
582 		sctp_audit_indx = 0;
583 	}
584 	if (inp == NULL) {
585 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
586 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
587 		sctp_audit_indx++;
588 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
589 			sctp_audit_indx = 0;
590 		}
591 		return;
592 	}
593 	if (stcb == NULL) {
594 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
595 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
596 		sctp_audit_indx++;
597 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 			sctp_audit_indx = 0;
599 		}
600 		return;
601 	}
602 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
603 	sctp_audit_data[sctp_audit_indx][1] =
604 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
605 	sctp_audit_indx++;
606 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 		sctp_audit_indx = 0;
608 	}
609 	rep = 0;
610 	tot_book_cnt = 0;
611 	resend_cnt = tot_out = 0;
612 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
613 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
614 			resend_cnt++;
615 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
616 			tot_out += chk->book_size;
617 			tot_book_cnt++;
618 		}
619 	}
620 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
621 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
622 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
623 		sctp_audit_indx++;
624 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625 			sctp_audit_indx = 0;
626 		}
627 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
628 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
629 		rep = 1;
630 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
631 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
632 		sctp_audit_data[sctp_audit_indx][1] =
633 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
634 		sctp_audit_indx++;
635 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
636 			sctp_audit_indx = 0;
637 		}
638 	}
639 	if (tot_out != stcb->asoc.total_flight) {
640 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
642 		sctp_audit_indx++;
643 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644 			sctp_audit_indx = 0;
645 		}
646 		rep = 1;
647 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
648 		    (int)stcb->asoc.total_flight);
649 		stcb->asoc.total_flight = tot_out;
650 	}
651 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
652 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
653 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
654 		sctp_audit_indx++;
655 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 			sctp_audit_indx = 0;
657 		}
658 		rep = 1;
659 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
660 
661 		stcb->asoc.total_flight_count = tot_book_cnt;
662 	}
663 	tot_out = 0;
664 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
665 		tot_out += lnet->flight_size;
666 	}
667 	if (tot_out != stcb->asoc.total_flight) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("real flight:%d net total was %d\n",
676 		    stcb->asoc.total_flight, tot_out);
677 		/* now corrective action */
678 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
679 
680 			tot_out = 0;
681 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
682 				if ((chk->whoTo == lnet) &&
683 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
684 					tot_out += chk->book_size;
685 				}
686 			}
687 			if (lnet->flight_size != tot_out) {
688 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
689 				    lnet, lnet->flight_size,
690 				    tot_out);
691 				lnet->flight_size = tot_out;
692 			}
693 		}
694 	}
695 	if (rep) {
696 		sctp_print_audit_report();
697 	}
698 }
699 
700 void
701 sctp_audit_log(uint8_t ev, uint8_t fd)
702 {
703 
704 	sctp_audit_data[sctp_audit_indx][0] = ev;
705 	sctp_audit_data[sctp_audit_indx][1] = fd;
706 	sctp_audit_indx++;
707 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
708 		sctp_audit_indx = 0;
709 	}
710 }
711 
712 #endif
713 
714 /*
715  * sctp_stop_timers_for_shutdown() should be called
716  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
717  * state to make sure that all timers are stopped.
718  */
719 void
720 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
721 {
722 	struct sctp_association *asoc;
723 	struct sctp_nets *net;
724 
725 	asoc = &stcb->asoc;
726 
727 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
728 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
729 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
730 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
731 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
732 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
733 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
734 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
735 	}
736 }
737 
738 /*
739  * a list of sizes based on typical mtu's, used only if next hop size not
740  * returned.
741  */
742 static uint32_t sctp_mtu_sizes[] = {
743 	68,
744 	296,
745 	508,
746 	512,
747 	544,
748 	576,
749 	1006,
750 	1492,
751 	1500,
752 	1536,
753 	2002,
754 	2048,
755 	4352,
756 	4464,
757 	8166,
758 	17914,
759 	32000,
760 	65535
761 };
762 
763 /*
764  * Return the largest MTU smaller than val. If there is no
765  * entry, just return val.
766  */
767 uint32_t
768 sctp_get_prev_mtu(uint32_t val)
769 {
770 	uint32_t i;
771 
772 	if (val <= sctp_mtu_sizes[0]) {
773 		return (val);
774 	}
775 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
776 		if (val <= sctp_mtu_sizes[i]) {
777 			break;
778 		}
779 	}
780 	return (sctp_mtu_sizes[i - 1]);
781 }
782 
783 /*
784  * Return the smallest MTU larger than val. If there is no
785  * entry, just return val.
786  */
787 uint32_t
788 sctp_get_next_mtu(uint32_t val)
789 {
790 	/* select another MTU that is just bigger than this one */
791 	uint32_t i;
792 
793 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
794 		if (val < sctp_mtu_sizes[i]) {
795 			return (sctp_mtu_sizes[i]);
796 		}
797 	}
798 	return (val);
799 }
800 
801 void
802 sctp_fill_random_store(struct sctp_pcb *m)
803 {
804 	/*
805 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
806 	 * our counter. The result becomes our good random numbers and we
807 	 * then setup to give these out. Note that we do no locking to
808 	 * protect this. This is ok, since if competing folks call this we
809 	 * will get more gobbled gook in the random store which is what we
810 	 * want. There is a danger that two guys will use the same random
811 	 * numbers, but thats ok too since that is random as well :->
812 	 */
813 	m->store_at = 0;
814 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
815 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
816 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
817 	m->random_counter++;
818 }
819 
820 uint32_t
821 sctp_select_initial_TSN(struct sctp_pcb *inp)
822 {
823 	/*
824 	 * A true implementation should use random selection process to get
825 	 * the initial stream sequence number, using RFC1750 as a good
826 	 * guideline
827 	 */
828 	uint32_t x, *xp;
829 	uint8_t *p;
830 	int store_at, new_store;
831 
832 	if (inp->initial_sequence_debug != 0) {
833 		uint32_t ret;
834 
835 		ret = inp->initial_sequence_debug;
836 		inp->initial_sequence_debug++;
837 		return (ret);
838 	}
839 retry:
840 	store_at = inp->store_at;
841 	new_store = store_at + sizeof(uint32_t);
842 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
843 		new_store = 0;
844 	}
845 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
846 		goto retry;
847 	}
848 	if (new_store == 0) {
849 		/* Refill the random store */
850 		sctp_fill_random_store(inp);
851 	}
852 	p = &inp->random_store[store_at];
853 	xp = (uint32_t *) p;
854 	x = *xp;
855 	return (x);
856 }
857 
858 uint32_t
859 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
860 {
861 	uint32_t x;
862 	struct timeval now;
863 
864 	if (check) {
865 		(void)SCTP_GETTIME_TIMEVAL(&now);
866 	}
867 	for (;;) {
868 		x = sctp_select_initial_TSN(&inp->sctp_ep);
869 		if (x == 0) {
870 			/* we never use 0 */
871 			continue;
872 		}
873 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
874 			break;
875 		}
876 	}
877 	return (x);
878 }
879 
880 int
881 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
882     uint32_t override_tag, uint32_t vrf_id)
883 {
884 	struct sctp_association *asoc;
885 
886 	/*
887 	 * Anything set to zero is taken care of by the allocation routine's
888 	 * bzero
889 	 */
890 
891 	/*
892 	 * Up front select what scoping to apply on addresses I tell my peer
893 	 * Not sure what to do with these right now, we will need to come up
894 	 * with a way to set them. We may need to pass them through from the
895 	 * caller in the sctp_aloc_assoc() function.
896 	 */
897 	int i;
898 
899 	asoc = &stcb->asoc;
900 	/* init all variables to a known value. */
901 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
902 	asoc->max_burst = m->sctp_ep.max_burst;
903 	asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
904 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
905 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
906 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
907 	asoc->ecn_allowed = m->sctp_ecn_enable;
908 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
909 	asoc->sctp_cmt_pf = (uint8_t) 0;
910 	asoc->sctp_frag_point = m->sctp_frag_point;
911 	asoc->sctp_features = m->sctp_features;
912 	asoc->default_dscp = m->sctp_ep.default_dscp;
913 #ifdef INET6
914 	if (m->sctp_ep.default_flowlabel) {
915 		asoc->default_flowlabel = m->sctp_ep.default_flowlabel;
916 	} else {
917 		if (m->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
918 			asoc->default_flowlabel = sctp_select_initial_TSN(&m->sctp_ep);
919 			asoc->default_flowlabel &= 0x000fffff;
920 			asoc->default_flowlabel |= 0x80000000;
921 		} else {
922 			asoc->default_flowlabel = 0;
923 		}
924 	}
925 #endif
926 	asoc->sb_send_resv = 0;
927 	if (override_tag) {
928 		asoc->my_vtag = override_tag;
929 	} else {
930 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
931 	}
932 	/* Get the nonce tags */
933 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
934 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
935 	asoc->vrf_id = vrf_id;
936 
937 #ifdef SCTP_ASOCLOG_OF_TSNS
938 	asoc->tsn_in_at = 0;
939 	asoc->tsn_out_at = 0;
940 	asoc->tsn_in_wrapped = 0;
941 	asoc->tsn_out_wrapped = 0;
942 	asoc->cumack_log_at = 0;
943 	asoc->cumack_log_atsnt = 0;
944 #endif
945 #ifdef SCTP_FS_SPEC_LOG
946 	asoc->fs_index = 0;
947 #endif
948 	asoc->refcnt = 0;
949 	asoc->assoc_up_sent = 0;
950 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
951 	    sctp_select_initial_TSN(&m->sctp_ep);
952 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
953 	/* we are optimisitic here */
954 	asoc->peer_supports_pktdrop = 1;
955 	asoc->peer_supports_nat = 0;
956 	asoc->sent_queue_retran_cnt = 0;
957 
958 	/* for CMT */
959 	asoc->last_net_cmt_send_started = NULL;
960 
961 	/* This will need to be adjusted */
962 	asoc->last_acked_seq = asoc->init_seq_number - 1;
963 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
964 	asoc->asconf_seq_in = asoc->last_acked_seq;
965 
966 	/* here we are different, we hold the next one we expect */
967 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
968 
969 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
970 	asoc->initial_rto = m->sctp_ep.initial_rto;
971 
972 	asoc->max_init_times = m->sctp_ep.max_init_times;
973 	asoc->max_send_times = m->sctp_ep.max_send_times;
974 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
975 	asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold;
976 	asoc->free_chunk_cnt = 0;
977 
978 	asoc->iam_blocking = 0;
979 	asoc->context = m->sctp_context;
980 	asoc->local_strreset_support = m->local_strreset_support;
981 	asoc->def_send = m->def_send;
982 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
983 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
984 	asoc->pr_sctp_cnt = 0;
985 	asoc->total_output_queue_size = 0;
986 
987 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
988 		struct in6pcb *inp6;
989 
990 		/* Its a V6 socket */
991 		inp6 = (struct in6pcb *)m;
992 		asoc->ipv6_addr_legal = 1;
993 		/* Now look at the binding flag to see if V4 will be legal */
994 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
995 			asoc->ipv4_addr_legal = 1;
996 		} else {
997 			/* V4 addresses are NOT legal on the association */
998 			asoc->ipv4_addr_legal = 0;
999 		}
1000 	} else {
1001 		/* Its a V4 socket, no - V6 */
1002 		asoc->ipv4_addr_legal = 1;
1003 		asoc->ipv6_addr_legal = 0;
1004 	}
1005 
1006 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1007 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1008 
1009 	asoc->smallest_mtu = m->sctp_frag_point;
1010 	asoc->minrto = m->sctp_ep.sctp_minrto;
1011 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1012 
1013 	asoc->locked_on_sending = NULL;
1014 	asoc->stream_locked_on = 0;
1015 	asoc->ecn_echo_cnt_onq = 0;
1016 	asoc->stream_locked = 0;
1017 
1018 	asoc->send_sack = 1;
1019 
1020 	LIST_INIT(&asoc->sctp_restricted_addrs);
1021 
1022 	TAILQ_INIT(&asoc->nets);
1023 	TAILQ_INIT(&asoc->pending_reply_queue);
1024 	TAILQ_INIT(&asoc->asconf_ack_sent);
1025 	/* Setup to fill the hb random cache at first HB */
1026 	asoc->hb_random_idx = 4;
1027 
1028 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1029 
1030 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1031 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1032 
1033 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1034 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1035 
1036 	/*
1037 	 * Now the stream parameters, here we allocate space for all streams
1038 	 * that we request by default.
1039 	 */
1040 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1041 	    m->sctp_ep.pre_open_stream_count;
1042 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1043 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1044 	    SCTP_M_STRMO);
1045 	if (asoc->strmout == NULL) {
1046 		/* big trouble no memory */
1047 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1048 		return (ENOMEM);
1049 	}
1050 	for (i = 0; i < asoc->streamoutcnt; i++) {
1051 		/*
1052 		 * inbound side must be set to 0xffff, also NOTE when we get
1053 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1054 		 * count (streamoutcnt) but first check if we sent to any of
1055 		 * the upper streams that were dropped (if some were). Those
1056 		 * that were dropped must be notified to the upper layer as
1057 		 * failed to send.
1058 		 */
1059 		asoc->strmout[i].next_sequence_sent = 0x0;
1060 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1061 		asoc->strmout[i].stream_no = i;
1062 		asoc->strmout[i].last_msg_incomplete = 0;
1063 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1064 	}
1065 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1066 
1067 	/* Now the mapping array */
1068 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1069 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1070 	    SCTP_M_MAP);
1071 	if (asoc->mapping_array == NULL) {
1072 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1073 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1074 		return (ENOMEM);
1075 	}
1076 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1077 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1078 	    SCTP_M_MAP);
1079 	if (asoc->nr_mapping_array == NULL) {
1080 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1081 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1082 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1083 		return (ENOMEM);
1084 	}
1085 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1086 
1087 	/* Now the init of the other outqueues */
1088 	TAILQ_INIT(&asoc->free_chunks);
1089 	TAILQ_INIT(&asoc->control_send_queue);
1090 	TAILQ_INIT(&asoc->asconf_send_queue);
1091 	TAILQ_INIT(&asoc->send_queue);
1092 	TAILQ_INIT(&asoc->sent_queue);
1093 	TAILQ_INIT(&asoc->reasmqueue);
1094 	TAILQ_INIT(&asoc->resetHead);
1095 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1096 	TAILQ_INIT(&asoc->asconf_queue);
1097 	/* authentication fields */
1098 	asoc->authinfo.random = NULL;
1099 	asoc->authinfo.active_keyid = 0;
1100 	asoc->authinfo.assoc_key = NULL;
1101 	asoc->authinfo.assoc_keyid = 0;
1102 	asoc->authinfo.recv_key = NULL;
1103 	asoc->authinfo.recv_keyid = 0;
1104 	LIST_INIT(&asoc->shared_keys);
1105 	asoc->marked_retrans = 0;
1106 	asoc->port = m->sctp_ep.port;
1107 	asoc->timoinit = 0;
1108 	asoc->timodata = 0;
1109 	asoc->timosack = 0;
1110 	asoc->timoshutdown = 0;
1111 	asoc->timoheartbeat = 0;
1112 	asoc->timocookie = 0;
1113 	asoc->timoshutdownack = 0;
1114 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1115 	asoc->discontinuity_time = asoc->start_time;
1116 	/*
1117 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1118 	 * freed later when the association is freed.
1119 	 */
1120 	return (0);
1121 }
1122 
1123 void
1124 sctp_print_mapping_array(struct sctp_association *asoc)
1125 {
1126 	unsigned int i, limit;
1127 
1128 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1129 	    asoc->mapping_array_size,
1130 	    asoc->mapping_array_base_tsn,
1131 	    asoc->cumulative_tsn,
1132 	    asoc->highest_tsn_inside_map,
1133 	    asoc->highest_tsn_inside_nr_map);
1134 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1135 		if (asoc->mapping_array[limit - 1] != 0) {
1136 			break;
1137 		}
1138 	}
1139 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1140 	for (i = 0; i < limit; i++) {
1141 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1142 	}
1143 	if (limit % 16)
1144 		SCTP_PRINTF("\n");
1145 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1146 		if (asoc->nr_mapping_array[limit - 1]) {
1147 			break;
1148 		}
1149 	}
1150 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1151 	for (i = 0; i < limit; i++) {
1152 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1153 	}
1154 	if (limit % 16)
1155 		SCTP_PRINTF("\n");
1156 }
1157 
1158 int
1159 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1160 {
1161 	/* mapping array needs to grow */
1162 	uint8_t *new_array1, *new_array2;
1163 	uint32_t new_size;
1164 
1165 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1166 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1167 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1168 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1169 		/* can't get more, forget it */
1170 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1171 		if (new_array1) {
1172 			SCTP_FREE(new_array1, SCTP_M_MAP);
1173 		}
1174 		if (new_array2) {
1175 			SCTP_FREE(new_array2, SCTP_M_MAP);
1176 		}
1177 		return (-1);
1178 	}
1179 	memset(new_array1, 0, new_size);
1180 	memset(new_array2, 0, new_size);
1181 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1182 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1183 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1184 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1185 	asoc->mapping_array = new_array1;
1186 	asoc->nr_mapping_array = new_array2;
1187 	asoc->mapping_array_size = new_size;
1188 	return (0);
1189 }
1190 
1191 
1192 static void
1193 sctp_iterator_work(struct sctp_iterator *it)
1194 {
1195 	int iteration_count = 0;
1196 	int inp_skip = 0;
1197 	int first_in = 1;
1198 	struct sctp_inpcb *tinp;
1199 
1200 	SCTP_INP_INFO_RLOCK();
1201 	SCTP_ITERATOR_LOCK();
1202 	if (it->inp) {
1203 		SCTP_INP_RLOCK(it->inp);
1204 		SCTP_INP_DECR_REF(it->inp);
1205 	}
1206 	if (it->inp == NULL) {
1207 		/* iterator is complete */
1208 done_with_iterator:
1209 		SCTP_ITERATOR_UNLOCK();
1210 		SCTP_INP_INFO_RUNLOCK();
1211 		if (it->function_atend != NULL) {
1212 			(*it->function_atend) (it->pointer, it->val);
1213 		}
1214 		SCTP_FREE(it, SCTP_M_ITER);
1215 		return;
1216 	}
1217 select_a_new_ep:
1218 	if (first_in) {
1219 		first_in = 0;
1220 	} else {
1221 		SCTP_INP_RLOCK(it->inp);
1222 	}
1223 	while (((it->pcb_flags) &&
1224 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1225 	    ((it->pcb_features) &&
1226 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1227 		/* endpoint flags or features don't match, so keep looking */
1228 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1229 			SCTP_INP_RUNLOCK(it->inp);
1230 			goto done_with_iterator;
1231 		}
1232 		tinp = it->inp;
1233 		it->inp = LIST_NEXT(it->inp, sctp_list);
1234 		SCTP_INP_RUNLOCK(tinp);
1235 		if (it->inp == NULL) {
1236 			goto done_with_iterator;
1237 		}
1238 		SCTP_INP_RLOCK(it->inp);
1239 	}
1240 	/* now go through each assoc which is in the desired state */
1241 	if (it->done_current_ep == 0) {
1242 		if (it->function_inp != NULL)
1243 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1244 		it->done_current_ep = 1;
1245 	}
1246 	if (it->stcb == NULL) {
1247 		/* run the per instance function */
1248 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1249 	}
1250 	if ((inp_skip) || it->stcb == NULL) {
1251 		if (it->function_inp_end != NULL) {
1252 			inp_skip = (*it->function_inp_end) (it->inp,
1253 			    it->pointer,
1254 			    it->val);
1255 		}
1256 		SCTP_INP_RUNLOCK(it->inp);
1257 		goto no_stcb;
1258 	}
1259 	while (it->stcb) {
1260 		SCTP_TCB_LOCK(it->stcb);
1261 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1262 			/* not in the right state... keep looking */
1263 			SCTP_TCB_UNLOCK(it->stcb);
1264 			goto next_assoc;
1265 		}
1266 		/* see if we have limited out the iterator loop */
1267 		iteration_count++;
1268 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1269 			/* Pause to let others grab the lock */
1270 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1271 			SCTP_TCB_UNLOCK(it->stcb);
1272 			SCTP_INP_INCR_REF(it->inp);
1273 			SCTP_INP_RUNLOCK(it->inp);
1274 			SCTP_ITERATOR_UNLOCK();
1275 			SCTP_INP_INFO_RUNLOCK();
1276 			SCTP_INP_INFO_RLOCK();
1277 			SCTP_ITERATOR_LOCK();
1278 			if (sctp_it_ctl.iterator_flags) {
1279 				/* We won't be staying here */
1280 				SCTP_INP_DECR_REF(it->inp);
1281 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1282 				if (sctp_it_ctl.iterator_flags &
1283 				    SCTP_ITERATOR_STOP_CUR_IT) {
1284 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1285 					goto done_with_iterator;
1286 				}
1287 				if (sctp_it_ctl.iterator_flags &
1288 				    SCTP_ITERATOR_STOP_CUR_INP) {
1289 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1290 					goto no_stcb;
1291 				}
1292 				/* If we reach here huh? */
1293 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1294 				    sctp_it_ctl.iterator_flags);
1295 				sctp_it_ctl.iterator_flags = 0;
1296 			}
1297 			SCTP_INP_RLOCK(it->inp);
1298 			SCTP_INP_DECR_REF(it->inp);
1299 			SCTP_TCB_LOCK(it->stcb);
1300 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1301 			iteration_count = 0;
1302 		}
1303 		/* run function on this one */
1304 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1305 
1306 		/*
1307 		 * we lie here, it really needs to have its own type but
1308 		 * first I must verify that this won't effect things :-0
1309 		 */
1310 		if (it->no_chunk_output == 0)
1311 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1312 
1313 		SCTP_TCB_UNLOCK(it->stcb);
1314 next_assoc:
1315 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1316 		if (it->stcb == NULL) {
1317 			/* Run last function */
1318 			if (it->function_inp_end != NULL) {
1319 				inp_skip = (*it->function_inp_end) (it->inp,
1320 				    it->pointer,
1321 				    it->val);
1322 			}
1323 		}
1324 	}
1325 	SCTP_INP_RUNLOCK(it->inp);
1326 no_stcb:
1327 	/* done with all assocs on this endpoint, move on to next endpoint */
1328 	it->done_current_ep = 0;
1329 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1330 		it->inp = NULL;
1331 	} else {
1332 		it->inp = LIST_NEXT(it->inp, sctp_list);
1333 	}
1334 	if (it->inp == NULL) {
1335 		goto done_with_iterator;
1336 	}
1337 	goto select_a_new_ep;
1338 }
1339 
1340 void
1341 sctp_iterator_worker(void)
1342 {
1343 	struct sctp_iterator *it, *nit;
1344 
1345 	/* This function is called with the WQ lock in place */
1346 
1347 	sctp_it_ctl.iterator_running = 1;
1348 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1349 		sctp_it_ctl.cur_it = it;
1350 		/* now lets work on this one */
1351 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1352 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1353 		CURVNET_SET(it->vn);
1354 		sctp_iterator_work(it);
1355 		sctp_it_ctl.cur_it = NULL;
1356 		CURVNET_RESTORE();
1357 		SCTP_IPI_ITERATOR_WQ_LOCK();
1358 		/* sa_ignore FREED_MEMORY */
1359 	}
1360 	sctp_it_ctl.iterator_running = 0;
1361 	return;
1362 }
1363 
1364 
1365 static void
1366 sctp_handle_addr_wq(void)
1367 {
1368 	/* deal with the ADDR wq from the rtsock calls */
1369 	struct sctp_laddr *wi, *nwi;
1370 	struct sctp_asconf_iterator *asc;
1371 
1372 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1373 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1374 	if (asc == NULL) {
1375 		/* Try later, no memory */
1376 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1377 		    (struct sctp_inpcb *)NULL,
1378 		    (struct sctp_tcb *)NULL,
1379 		    (struct sctp_nets *)NULL);
1380 		return;
1381 	}
1382 	LIST_INIT(&asc->list_of_work);
1383 	asc->cnt = 0;
1384 
1385 	SCTP_WQ_ADDR_LOCK();
1386 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1387 		LIST_REMOVE(wi, sctp_nxt_addr);
1388 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1389 		asc->cnt++;
1390 	}
1391 	SCTP_WQ_ADDR_UNLOCK();
1392 
1393 	if (asc->cnt == 0) {
1394 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1395 	} else {
1396 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1397 		    sctp_asconf_iterator_stcb,
1398 		    NULL,	/* No ep end for boundall */
1399 		    SCTP_PCB_FLAGS_BOUNDALL,
1400 		    SCTP_PCB_ANY_FEATURES,
1401 		    SCTP_ASOC_ANY_STATE,
1402 		    (void *)asc, 0,
1403 		    sctp_asconf_iterator_end, NULL, 0);
1404 	}
1405 }
1406 
1407 void
1408 sctp_timeout_handler(void *t)
1409 {
1410 	struct sctp_inpcb *inp;
1411 	struct sctp_tcb *stcb;
1412 	struct sctp_nets *net;
1413 	struct sctp_timer *tmr;
1414 
1415 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1416 	struct socket *so;
1417 
1418 #endif
1419 	int did_output, type;
1420 
1421 	tmr = (struct sctp_timer *)t;
1422 	inp = (struct sctp_inpcb *)tmr->ep;
1423 	stcb = (struct sctp_tcb *)tmr->tcb;
1424 	net = (struct sctp_nets *)tmr->net;
1425 	CURVNET_SET((struct vnet *)tmr->vnet);
1426 	did_output = 1;
1427 
1428 #ifdef SCTP_AUDITING_ENABLED
1429 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1430 	sctp_auditing(3, inp, stcb, net);
1431 #endif
1432 
1433 	/* sanity checks... */
1434 	if (tmr->self != (void *)tmr) {
1435 		/*
1436 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1437 		 * tmr);
1438 		 */
1439 		CURVNET_RESTORE();
1440 		return;
1441 	}
1442 	tmr->stopped_from = 0xa001;
1443 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1444 		/*
1445 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1446 		 * tmr->type);
1447 		 */
1448 		CURVNET_RESTORE();
1449 		return;
1450 	}
1451 	tmr->stopped_from = 0xa002;
1452 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1453 		CURVNET_RESTORE();
1454 		return;
1455 	}
1456 	/* if this is an iterator timeout, get the struct and clear inp */
1457 	tmr->stopped_from = 0xa003;
1458 	type = tmr->type;
1459 	if (inp) {
1460 		SCTP_INP_INCR_REF(inp);
1461 		if ((inp->sctp_socket == NULL) &&
1462 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1463 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1464 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1465 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1466 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1467 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1468 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1469 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1470 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1471 		    ) {
1472 			SCTP_INP_DECR_REF(inp);
1473 			CURVNET_RESTORE();
1474 			return;
1475 		}
1476 	}
1477 	tmr->stopped_from = 0xa004;
1478 	if (stcb) {
1479 		atomic_add_int(&stcb->asoc.refcnt, 1);
1480 		if (stcb->asoc.state == 0) {
1481 			atomic_add_int(&stcb->asoc.refcnt, -1);
1482 			if (inp) {
1483 				SCTP_INP_DECR_REF(inp);
1484 			}
1485 			CURVNET_RESTORE();
1486 			return;
1487 		}
1488 	}
1489 	tmr->stopped_from = 0xa005;
1490 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1491 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1492 		if (inp) {
1493 			SCTP_INP_DECR_REF(inp);
1494 		}
1495 		if (stcb) {
1496 			atomic_add_int(&stcb->asoc.refcnt, -1);
1497 		}
1498 		CURVNET_RESTORE();
1499 		return;
1500 	}
1501 	tmr->stopped_from = 0xa006;
1502 
1503 	if (stcb) {
1504 		SCTP_TCB_LOCK(stcb);
1505 		atomic_add_int(&stcb->asoc.refcnt, -1);
1506 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1507 		    ((stcb->asoc.state == 0) ||
1508 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1509 			SCTP_TCB_UNLOCK(stcb);
1510 			if (inp) {
1511 				SCTP_INP_DECR_REF(inp);
1512 			}
1513 			CURVNET_RESTORE();
1514 			return;
1515 		}
1516 	}
1517 	/* record in stopped what t-o occured */
1518 	tmr->stopped_from = tmr->type;
1519 
1520 	/* mark as being serviced now */
1521 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1522 		/*
1523 		 * Callout has been rescheduled.
1524 		 */
1525 		goto get_out;
1526 	}
1527 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1528 		/*
1529 		 * Not active, so no action.
1530 		 */
1531 		goto get_out;
1532 	}
1533 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1534 
1535 	/* call the handler for the appropriate timer type */
1536 	switch (tmr->type) {
1537 	case SCTP_TIMER_TYPE_ZERO_COPY:
1538 		if (inp == NULL) {
1539 			break;
1540 		}
1541 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1542 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1543 		}
1544 		break;
1545 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1546 		if (inp == NULL) {
1547 			break;
1548 		}
1549 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1550 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1551 		}
1552 		break;
1553 	case SCTP_TIMER_TYPE_ADDR_WQ:
1554 		sctp_handle_addr_wq();
1555 		break;
1556 	case SCTP_TIMER_TYPE_SEND:
1557 		if ((stcb == NULL) || (inp == NULL)) {
1558 			break;
1559 		}
1560 		SCTP_STAT_INCR(sctps_timodata);
1561 		stcb->asoc.timodata++;
1562 		stcb->asoc.num_send_timers_up--;
1563 		if (stcb->asoc.num_send_timers_up < 0) {
1564 			stcb->asoc.num_send_timers_up = 0;
1565 		}
1566 		SCTP_TCB_LOCK_ASSERT(stcb);
1567 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1568 			/* no need to unlock on tcb its gone */
1569 
1570 			goto out_decr;
1571 		}
1572 		SCTP_TCB_LOCK_ASSERT(stcb);
1573 #ifdef SCTP_AUDITING_ENABLED
1574 		sctp_auditing(4, inp, stcb, net);
1575 #endif
1576 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1577 		if ((stcb->asoc.num_send_timers_up == 0) &&
1578 		    (stcb->asoc.sent_queue_cnt > 0)) {
1579 			struct sctp_tmit_chunk *chk;
1580 
1581 			/*
1582 			 * safeguard. If there on some on the sent queue
1583 			 * somewhere but no timers running something is
1584 			 * wrong... so we start a timer on the first chunk
1585 			 * on the send queue on whatever net it is sent to.
1586 			 */
1587 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1588 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1589 			    chk->whoTo);
1590 		}
1591 		break;
1592 	case SCTP_TIMER_TYPE_INIT:
1593 		if ((stcb == NULL) || (inp == NULL)) {
1594 			break;
1595 		}
1596 		SCTP_STAT_INCR(sctps_timoinit);
1597 		stcb->asoc.timoinit++;
1598 		if (sctp_t1init_timer(inp, stcb, net)) {
1599 			/* no need to unlock on tcb its gone */
1600 			goto out_decr;
1601 		}
1602 		/* We do output but not here */
1603 		did_output = 0;
1604 		break;
1605 	case SCTP_TIMER_TYPE_RECV:
1606 		if ((stcb == NULL) || (inp == NULL)) {
1607 			break;
1608 		}
1609 		SCTP_STAT_INCR(sctps_timosack);
1610 		stcb->asoc.timosack++;
1611 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1612 #ifdef SCTP_AUDITING_ENABLED
1613 		sctp_auditing(4, inp, stcb, net);
1614 #endif
1615 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1616 		break;
1617 	case SCTP_TIMER_TYPE_SHUTDOWN:
1618 		if ((stcb == NULL) || (inp == NULL)) {
1619 			break;
1620 		}
1621 		if (sctp_shutdown_timer(inp, stcb, net)) {
1622 			/* no need to unlock on tcb its gone */
1623 			goto out_decr;
1624 		}
1625 		SCTP_STAT_INCR(sctps_timoshutdown);
1626 		stcb->asoc.timoshutdown++;
1627 #ifdef SCTP_AUDITING_ENABLED
1628 		sctp_auditing(4, inp, stcb, net);
1629 #endif
1630 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1631 		break;
1632 	case SCTP_TIMER_TYPE_HEARTBEAT:
1633 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1634 			break;
1635 		}
1636 		SCTP_STAT_INCR(sctps_timoheartbeat);
1637 		stcb->asoc.timoheartbeat++;
1638 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1639 			/* no need to unlock on tcb its gone */
1640 			goto out_decr;
1641 		}
1642 #ifdef SCTP_AUDITING_ENABLED
1643 		sctp_auditing(4, inp, stcb, net);
1644 #endif
1645 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1646 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1647 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1648 		}
1649 		break;
1650 	case SCTP_TIMER_TYPE_COOKIE:
1651 		if ((stcb == NULL) || (inp == NULL)) {
1652 			break;
1653 		}
1654 		if (sctp_cookie_timer(inp, stcb, net)) {
1655 			/* no need to unlock on tcb its gone */
1656 			goto out_decr;
1657 		}
1658 		SCTP_STAT_INCR(sctps_timocookie);
1659 		stcb->asoc.timocookie++;
1660 #ifdef SCTP_AUDITING_ENABLED
1661 		sctp_auditing(4, inp, stcb, net);
1662 #endif
1663 		/*
1664 		 * We consider T3 and Cookie timer pretty much the same with
1665 		 * respect to where from in chunk_output.
1666 		 */
1667 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1668 		break;
1669 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1670 		{
1671 			struct timeval tv;
1672 			int i, secret;
1673 
1674 			if (inp == NULL) {
1675 				break;
1676 			}
1677 			SCTP_STAT_INCR(sctps_timosecret);
1678 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1679 			SCTP_INP_WLOCK(inp);
1680 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1681 			inp->sctp_ep.last_secret_number =
1682 			    inp->sctp_ep.current_secret_number;
1683 			inp->sctp_ep.current_secret_number++;
1684 			if (inp->sctp_ep.current_secret_number >=
1685 			    SCTP_HOW_MANY_SECRETS) {
1686 				inp->sctp_ep.current_secret_number = 0;
1687 			}
1688 			secret = (int)inp->sctp_ep.current_secret_number;
1689 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1690 				inp->sctp_ep.secret_key[secret][i] =
1691 				    sctp_select_initial_TSN(&inp->sctp_ep);
1692 			}
1693 			SCTP_INP_WUNLOCK(inp);
1694 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1695 		}
1696 		did_output = 0;
1697 		break;
1698 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1699 		if ((stcb == NULL) || (inp == NULL)) {
1700 			break;
1701 		}
1702 		SCTP_STAT_INCR(sctps_timopathmtu);
1703 		sctp_pathmtu_timer(inp, stcb, net);
1704 		did_output = 0;
1705 		break;
1706 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1707 		if ((stcb == NULL) || (inp == NULL)) {
1708 			break;
1709 		}
1710 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1711 			/* no need to unlock on tcb its gone */
1712 			goto out_decr;
1713 		}
1714 		SCTP_STAT_INCR(sctps_timoshutdownack);
1715 		stcb->asoc.timoshutdownack++;
1716 #ifdef SCTP_AUDITING_ENABLED
1717 		sctp_auditing(4, inp, stcb, net);
1718 #endif
1719 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1720 		break;
1721 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1722 		if ((stcb == NULL) || (inp == NULL)) {
1723 			break;
1724 		}
1725 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1726 		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1727 		/* no need to unlock on tcb its gone */
1728 		goto out_decr;
1729 
1730 	case SCTP_TIMER_TYPE_STRRESET:
1731 		if ((stcb == NULL) || (inp == NULL)) {
1732 			break;
1733 		}
1734 		if (sctp_strreset_timer(inp, stcb, net)) {
1735 			/* no need to unlock on tcb its gone */
1736 			goto out_decr;
1737 		}
1738 		SCTP_STAT_INCR(sctps_timostrmrst);
1739 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1740 		break;
1741 	case SCTP_TIMER_TYPE_ASCONF:
1742 		if ((stcb == NULL) || (inp == NULL)) {
1743 			break;
1744 		}
1745 		if (sctp_asconf_timer(inp, stcb, net)) {
1746 			/* no need to unlock on tcb its gone */
1747 			goto out_decr;
1748 		}
1749 		SCTP_STAT_INCR(sctps_timoasconf);
1750 #ifdef SCTP_AUDITING_ENABLED
1751 		sctp_auditing(4, inp, stcb, net);
1752 #endif
1753 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1754 		break;
1755 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1756 		if ((stcb == NULL) || (inp == NULL)) {
1757 			break;
1758 		}
1759 		sctp_delete_prim_timer(inp, stcb, net);
1760 		SCTP_STAT_INCR(sctps_timodelprim);
1761 		break;
1762 
1763 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1764 		if ((stcb == NULL) || (inp == NULL)) {
1765 			break;
1766 		}
1767 		SCTP_STAT_INCR(sctps_timoautoclose);
1768 		sctp_autoclose_timer(inp, stcb, net);
1769 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1770 		did_output = 0;
1771 		break;
1772 	case SCTP_TIMER_TYPE_ASOCKILL:
1773 		if ((stcb == NULL) || (inp == NULL)) {
1774 			break;
1775 		}
1776 		SCTP_STAT_INCR(sctps_timoassockill);
1777 		/* Can we free it yet? */
1778 		SCTP_INP_DECR_REF(inp);
1779 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1780 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1781 		so = SCTP_INP_SO(inp);
1782 		atomic_add_int(&stcb->asoc.refcnt, 1);
1783 		SCTP_TCB_UNLOCK(stcb);
1784 		SCTP_SOCKET_LOCK(so, 1);
1785 		SCTP_TCB_LOCK(stcb);
1786 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1787 #endif
1788 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1789 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1790 		SCTP_SOCKET_UNLOCK(so, 1);
1791 #endif
1792 		/*
1793 		 * free asoc, always unlocks (or destroy's) so prevent
1794 		 * duplicate unlock or unlock of a free mtx :-0
1795 		 */
1796 		stcb = NULL;
1797 		goto out_no_decr;
1798 	case SCTP_TIMER_TYPE_INPKILL:
1799 		SCTP_STAT_INCR(sctps_timoinpkill);
1800 		if (inp == NULL) {
1801 			break;
1802 		}
1803 		/*
1804 		 * special case, take away our increment since WE are the
1805 		 * killer
1806 		 */
1807 		SCTP_INP_DECR_REF(inp);
1808 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1809 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1810 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1811 		inp = NULL;
1812 		goto out_no_decr;
1813 	default:
1814 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1815 		    tmr->type);
1816 		break;
1817 	}
1818 #ifdef SCTP_AUDITING_ENABLED
1819 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1820 	if (inp)
1821 		sctp_auditing(5, inp, stcb, net);
1822 #endif
1823 	if ((did_output) && stcb) {
1824 		/*
1825 		 * Now we need to clean up the control chunk chain if an
1826 		 * ECNE is on it. It must be marked as UNSENT again so next
1827 		 * call will continue to send it until such time that we get
1828 		 * a CWR, to remove it. It is, however, less likely that we
1829 		 * will find a ecn echo on the chain though.
1830 		 */
1831 		sctp_fix_ecn_echo(&stcb->asoc);
1832 	}
1833 get_out:
1834 	if (stcb) {
1835 		SCTP_TCB_UNLOCK(stcb);
1836 	}
1837 out_decr:
1838 	if (inp) {
1839 		SCTP_INP_DECR_REF(inp);
1840 	}
1841 out_no_decr:
1842 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1843 	    type);
1844 	CURVNET_RESTORE();
1845 }
1846 
1847 void
1848 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1849     struct sctp_nets *net)
1850 {
1851 	uint32_t to_ticks;
1852 	struct sctp_timer *tmr;
1853 
1854 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1855 		return;
1856 
1857 	tmr = NULL;
1858 	if (stcb) {
1859 		SCTP_TCB_LOCK_ASSERT(stcb);
1860 	}
1861 	switch (t_type) {
1862 	case SCTP_TIMER_TYPE_ZERO_COPY:
1863 		tmr = &inp->sctp_ep.zero_copy_timer;
1864 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1865 		break;
1866 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1867 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1868 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1869 		break;
1870 	case SCTP_TIMER_TYPE_ADDR_WQ:
1871 		/* Only 1 tick away :-) */
1872 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1873 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1874 		break;
1875 	case SCTP_TIMER_TYPE_SEND:
1876 		/* Here we use the RTO timer */
1877 		{
1878 			int rto_val;
1879 
1880 			if ((stcb == NULL) || (net == NULL)) {
1881 				return;
1882 			}
1883 			tmr = &net->rxt_timer;
1884 			if (net->RTO == 0) {
1885 				rto_val = stcb->asoc.initial_rto;
1886 			} else {
1887 				rto_val = net->RTO;
1888 			}
1889 			to_ticks = MSEC_TO_TICKS(rto_val);
1890 		}
1891 		break;
1892 	case SCTP_TIMER_TYPE_INIT:
1893 		/*
1894 		 * Here we use the INIT timer default usually about 1
1895 		 * minute.
1896 		 */
1897 		if ((stcb == NULL) || (net == NULL)) {
1898 			return;
1899 		}
1900 		tmr = &net->rxt_timer;
1901 		if (net->RTO == 0) {
1902 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1903 		} else {
1904 			to_ticks = MSEC_TO_TICKS(net->RTO);
1905 		}
1906 		break;
1907 	case SCTP_TIMER_TYPE_RECV:
1908 		/*
1909 		 * Here we use the Delayed-Ack timer value from the inp
1910 		 * ususually about 200ms.
1911 		 */
1912 		if (stcb == NULL) {
1913 			return;
1914 		}
1915 		tmr = &stcb->asoc.dack_timer;
1916 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1917 		break;
1918 	case SCTP_TIMER_TYPE_SHUTDOWN:
1919 		/* Here we use the RTO of the destination. */
1920 		if ((stcb == NULL) || (net == NULL)) {
1921 			return;
1922 		}
1923 		if (net->RTO == 0) {
1924 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1925 		} else {
1926 			to_ticks = MSEC_TO_TICKS(net->RTO);
1927 		}
1928 		tmr = &net->rxt_timer;
1929 		break;
1930 	case SCTP_TIMER_TYPE_HEARTBEAT:
1931 		/*
1932 		 * the net is used here so that we can add in the RTO. Even
1933 		 * though we use a different timer. We also add the HB timer
1934 		 * PLUS a random jitter.
1935 		 */
1936 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1937 			return;
1938 		} else {
1939 			uint32_t rndval;
1940 			uint32_t jitter;
1941 
1942 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1943 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1944 				return;
1945 			}
1946 			if (net->RTO == 0) {
1947 				to_ticks = stcb->asoc.initial_rto;
1948 			} else {
1949 				to_ticks = net->RTO;
1950 			}
1951 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1952 			jitter = rndval % to_ticks;
1953 			if (jitter >= (to_ticks >> 1)) {
1954 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1955 			} else {
1956 				to_ticks = to_ticks - jitter;
1957 			}
1958 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1959 			    !(net->dest_state & SCTP_ADDR_PF)) {
1960 				to_ticks += net->heart_beat_delay;
1961 			}
1962 			/*
1963 			 * Now we must convert the to_ticks that are now in
1964 			 * ms to ticks.
1965 			 */
1966 			to_ticks = MSEC_TO_TICKS(to_ticks);
1967 			tmr = &net->hb_timer;
1968 		}
1969 		break;
1970 	case SCTP_TIMER_TYPE_COOKIE:
1971 		/*
1972 		 * Here we can use the RTO timer from the network since one
1973 		 * RTT was compelete. If a retran happened then we will be
1974 		 * using the RTO initial value.
1975 		 */
1976 		if ((stcb == NULL) || (net == NULL)) {
1977 			return;
1978 		}
1979 		if (net->RTO == 0) {
1980 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1981 		} else {
1982 			to_ticks = MSEC_TO_TICKS(net->RTO);
1983 		}
1984 		tmr = &net->rxt_timer;
1985 		break;
1986 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1987 		/*
1988 		 * nothing needed but the endpoint here ususually about 60
1989 		 * minutes.
1990 		 */
1991 		if (inp == NULL) {
1992 			return;
1993 		}
1994 		tmr = &inp->sctp_ep.signature_change;
1995 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1996 		break;
1997 	case SCTP_TIMER_TYPE_ASOCKILL:
1998 		if (stcb == NULL) {
1999 			return;
2000 		}
2001 		tmr = &stcb->asoc.strreset_timer;
2002 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2003 		break;
2004 	case SCTP_TIMER_TYPE_INPKILL:
2005 		/*
2006 		 * The inp is setup to die. We re-use the signature_chage
2007 		 * timer since that has stopped and we are in the GONE
2008 		 * state.
2009 		 */
2010 		if (inp == NULL) {
2011 			return;
2012 		}
2013 		tmr = &inp->sctp_ep.signature_change;
2014 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2015 		break;
2016 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2017 		/*
2018 		 * Here we use the value found in the EP for PMTU ususually
2019 		 * about 10 minutes.
2020 		 */
2021 		if ((stcb == NULL) || (inp == NULL)) {
2022 			return;
2023 		}
2024 		if (net == NULL) {
2025 			return;
2026 		}
2027 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2028 			return;
2029 		}
2030 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2031 		tmr = &net->pmtu_timer;
2032 		break;
2033 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2034 		/* Here we use the RTO of the destination */
2035 		if ((stcb == NULL) || (net == NULL)) {
2036 			return;
2037 		}
2038 		if (net->RTO == 0) {
2039 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2040 		} else {
2041 			to_ticks = MSEC_TO_TICKS(net->RTO);
2042 		}
2043 		tmr = &net->rxt_timer;
2044 		break;
2045 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2046 		/*
2047 		 * Here we use the endpoints shutdown guard timer usually
2048 		 * about 3 minutes.
2049 		 */
2050 		if ((inp == NULL) || (stcb == NULL)) {
2051 			return;
2052 		}
2053 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2054 		tmr = &stcb->asoc.shut_guard_timer;
2055 		break;
2056 	case SCTP_TIMER_TYPE_STRRESET:
2057 		/*
2058 		 * Here the timer comes from the stcb but its value is from
2059 		 * the net's RTO.
2060 		 */
2061 		if ((stcb == NULL) || (net == NULL)) {
2062 			return;
2063 		}
2064 		if (net->RTO == 0) {
2065 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2066 		} else {
2067 			to_ticks = MSEC_TO_TICKS(net->RTO);
2068 		}
2069 		tmr = &stcb->asoc.strreset_timer;
2070 		break;
2071 	case SCTP_TIMER_TYPE_ASCONF:
2072 		/*
2073 		 * Here the timer comes from the stcb but its value is from
2074 		 * the net's RTO.
2075 		 */
2076 		if ((stcb == NULL) || (net == NULL)) {
2077 			return;
2078 		}
2079 		if (net->RTO == 0) {
2080 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2081 		} else {
2082 			to_ticks = MSEC_TO_TICKS(net->RTO);
2083 		}
2084 		tmr = &stcb->asoc.asconf_timer;
2085 		break;
2086 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2087 		if ((stcb == NULL) || (net != NULL)) {
2088 			return;
2089 		}
2090 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2091 		tmr = &stcb->asoc.delete_prim_timer;
2092 		break;
2093 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2094 		if (stcb == NULL) {
2095 			return;
2096 		}
2097 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2098 			/*
2099 			 * Really an error since stcb is NOT set to
2100 			 * autoclose
2101 			 */
2102 			return;
2103 		}
2104 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2105 		tmr = &stcb->asoc.autoclose_timer;
2106 		break;
2107 	default:
2108 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2109 		    __FUNCTION__, t_type);
2110 		return;
2111 		break;
2112 	}
2113 	if ((to_ticks <= 0) || (tmr == NULL)) {
2114 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2115 		    __FUNCTION__, t_type, to_ticks, tmr);
2116 		return;
2117 	}
2118 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2119 		/*
2120 		 * we do NOT allow you to have it already running. if it is
2121 		 * we leave the current one up unchanged
2122 		 */
2123 		return;
2124 	}
2125 	/* At this point we can proceed */
2126 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2127 		stcb->asoc.num_send_timers_up++;
2128 	}
2129 	tmr->stopped_from = 0;
2130 	tmr->type = t_type;
2131 	tmr->ep = (void *)inp;
2132 	tmr->tcb = (void *)stcb;
2133 	tmr->net = (void *)net;
2134 	tmr->self = (void *)tmr;
2135 	tmr->vnet = (void *)curvnet;
2136 	tmr->ticks = sctp_get_tick_count();
2137 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2138 	return;
2139 }
2140 
2141 void
2142 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2143     struct sctp_nets *net, uint32_t from)
2144 {
2145 	struct sctp_timer *tmr;
2146 
2147 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2148 	    (inp == NULL))
2149 		return;
2150 
2151 	tmr = NULL;
2152 	if (stcb) {
2153 		SCTP_TCB_LOCK_ASSERT(stcb);
2154 	}
2155 	switch (t_type) {
2156 	case SCTP_TIMER_TYPE_ZERO_COPY:
2157 		tmr = &inp->sctp_ep.zero_copy_timer;
2158 		break;
2159 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2160 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2161 		break;
2162 	case SCTP_TIMER_TYPE_ADDR_WQ:
2163 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2164 		break;
2165 	case SCTP_TIMER_TYPE_SEND:
2166 		if ((stcb == NULL) || (net == NULL)) {
2167 			return;
2168 		}
2169 		tmr = &net->rxt_timer;
2170 		break;
2171 	case SCTP_TIMER_TYPE_INIT:
2172 		if ((stcb == NULL) || (net == NULL)) {
2173 			return;
2174 		}
2175 		tmr = &net->rxt_timer;
2176 		break;
2177 	case SCTP_TIMER_TYPE_RECV:
2178 		if (stcb == NULL) {
2179 			return;
2180 		}
2181 		tmr = &stcb->asoc.dack_timer;
2182 		break;
2183 	case SCTP_TIMER_TYPE_SHUTDOWN:
2184 		if ((stcb == NULL) || (net == NULL)) {
2185 			return;
2186 		}
2187 		tmr = &net->rxt_timer;
2188 		break;
2189 	case SCTP_TIMER_TYPE_HEARTBEAT:
2190 		if ((stcb == NULL) || (net == NULL)) {
2191 			return;
2192 		}
2193 		tmr = &net->hb_timer;
2194 		break;
2195 	case SCTP_TIMER_TYPE_COOKIE:
2196 		if ((stcb == NULL) || (net == NULL)) {
2197 			return;
2198 		}
2199 		tmr = &net->rxt_timer;
2200 		break;
2201 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2202 		/* nothing needed but the endpoint here */
2203 		tmr = &inp->sctp_ep.signature_change;
2204 		/*
2205 		 * We re-use the newcookie timer for the INP kill timer. We
2206 		 * must assure that we do not kill it by accident.
2207 		 */
2208 		break;
2209 	case SCTP_TIMER_TYPE_ASOCKILL:
2210 		/*
2211 		 * Stop the asoc kill timer.
2212 		 */
2213 		if (stcb == NULL) {
2214 			return;
2215 		}
2216 		tmr = &stcb->asoc.strreset_timer;
2217 		break;
2218 
2219 	case SCTP_TIMER_TYPE_INPKILL:
2220 		/*
2221 		 * The inp is setup to die. We re-use the signature_chage
2222 		 * timer since that has stopped and we are in the GONE
2223 		 * state.
2224 		 */
2225 		tmr = &inp->sctp_ep.signature_change;
2226 		break;
2227 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2228 		if ((stcb == NULL) || (net == NULL)) {
2229 			return;
2230 		}
2231 		tmr = &net->pmtu_timer;
2232 		break;
2233 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2234 		if ((stcb == NULL) || (net == NULL)) {
2235 			return;
2236 		}
2237 		tmr = &net->rxt_timer;
2238 		break;
2239 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2240 		if (stcb == NULL) {
2241 			return;
2242 		}
2243 		tmr = &stcb->asoc.shut_guard_timer;
2244 		break;
2245 	case SCTP_TIMER_TYPE_STRRESET:
2246 		if (stcb == NULL) {
2247 			return;
2248 		}
2249 		tmr = &stcb->asoc.strreset_timer;
2250 		break;
2251 	case SCTP_TIMER_TYPE_ASCONF:
2252 		if (stcb == NULL) {
2253 			return;
2254 		}
2255 		tmr = &stcb->asoc.asconf_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2258 		if (stcb == NULL) {
2259 			return;
2260 		}
2261 		tmr = &stcb->asoc.delete_prim_timer;
2262 		break;
2263 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2264 		if (stcb == NULL) {
2265 			return;
2266 		}
2267 		tmr = &stcb->asoc.autoclose_timer;
2268 		break;
2269 	default:
2270 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2271 		    __FUNCTION__, t_type);
2272 		break;
2273 	}
2274 	if (tmr == NULL) {
2275 		return;
2276 	}
2277 	if ((tmr->type != t_type) && tmr->type) {
2278 		/*
2279 		 * Ok we have a timer that is under joint use. Cookie timer
2280 		 * per chance with the SEND timer. We therefore are NOT
2281 		 * running the timer that the caller wants stopped.  So just
2282 		 * return.
2283 		 */
2284 		return;
2285 	}
2286 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2287 		stcb->asoc.num_send_timers_up--;
2288 		if (stcb->asoc.num_send_timers_up < 0) {
2289 			stcb->asoc.num_send_timers_up = 0;
2290 		}
2291 	}
2292 	tmr->self = NULL;
2293 	tmr->stopped_from = from;
2294 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2295 	return;
2296 }
2297 
2298 uint32_t
2299 sctp_calculate_len(struct mbuf *m)
2300 {
2301 	uint32_t tlen = 0;
2302 	struct mbuf *at;
2303 
2304 	at = m;
2305 	while (at) {
2306 		tlen += SCTP_BUF_LEN(at);
2307 		at = SCTP_BUF_NEXT(at);
2308 	}
2309 	return (tlen);
2310 }
2311 
2312 void
2313 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2314     struct sctp_association *asoc, uint32_t mtu)
2315 {
2316 	/*
2317 	 * Reset the P-MTU size on this association, this involves changing
2318 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2319 	 * allow the DF flag to be cleared.
2320 	 */
2321 	struct sctp_tmit_chunk *chk;
2322 	unsigned int eff_mtu, ovh;
2323 
2324 	asoc->smallest_mtu = mtu;
2325 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2326 		ovh = SCTP_MIN_OVERHEAD;
2327 	} else {
2328 		ovh = SCTP_MIN_V4_OVERHEAD;
2329 	}
2330 	eff_mtu = mtu - ovh;
2331 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2332 		if (chk->send_size > eff_mtu) {
2333 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2334 		}
2335 	}
2336 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2337 		if (chk->send_size > eff_mtu) {
2338 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2339 		}
2340 	}
2341 }
2342 
2343 
2344 /*
2345  * given an association and starting time of the current RTT period return
2346  * RTO in number of msecs net should point to the current network
2347  */
2348 
2349 uint32_t
2350 sctp_calculate_rto(struct sctp_tcb *stcb,
2351     struct sctp_association *asoc,
2352     struct sctp_nets *net,
2353     struct timeval *told,
2354     int safe, int rtt_from_sack)
2355 {
2356 	/*-
2357 	 * given an association and the starting time of the current RTT
2358 	 * period (in value1/value2) return RTO in number of msecs.
2359 	 */
2360 	int32_t rtt;		/* RTT in ms */
2361 	uint32_t new_rto;
2362 	int first_measure = 0;
2363 	struct timeval now, then, *old;
2364 
2365 	/* Copy it out for sparc64 */
2366 	if (safe == sctp_align_unsafe_makecopy) {
2367 		old = &then;
2368 		memcpy(&then, told, sizeof(struct timeval));
2369 	} else if (safe == sctp_align_safe_nocopy) {
2370 		old = told;
2371 	} else {
2372 		/* error */
2373 		SCTP_PRINTF("Huh, bad rto calc call\n");
2374 		return (0);
2375 	}
2376 	/************************/
2377 	/* 1. calculate new RTT */
2378 	/************************/
2379 	/* get the current time */
2380 	if (stcb->asoc.use_precise_time) {
2381 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2382 	} else {
2383 		(void)SCTP_GETTIME_TIMEVAL(&now);
2384 	}
2385 	timevalsub(&now, old);
2386 	/* store the current RTT in us */
2387 	net->rtt = (uint64_t) 10000000 *(uint64_t) now.tv_sec +
2388 	         (uint64_t) now.tv_usec;
2389 
2390 	/* computer rtt in ms */
2391 	rtt = net->rtt / 1000;
2392 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2393 		/*
2394 		 * Tell the CC module that a new update has just occurred
2395 		 * from a sack
2396 		 */
2397 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2398 	}
2399 	/*
2400 	 * Do we need to determine the lan? We do this only on sacks i.e.
2401 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2402 	 */
2403 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2404 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2405 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2406 			net->lan_type = SCTP_LAN_INTERNET;
2407 		} else {
2408 			net->lan_type = SCTP_LAN_LOCAL;
2409 		}
2410 	}
2411 	/***************************/
2412 	/* 2. update RTTVAR & SRTT */
2413 	/***************************/
2414 	/*-
2415 	 * Compute the scaled average lastsa and the
2416 	 * scaled variance lastsv as described in van Jacobson
2417 	 * Paper "Congestion Avoidance and Control", Annex A.
2418 	 *
2419 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2420 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2421 	 */
2422 	if (net->RTO_measured) {
2423 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2424 		net->lastsa += rtt;
2425 		if (rtt < 0) {
2426 			rtt = -rtt;
2427 		}
2428 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2429 		net->lastsv += rtt;
2430 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2431 			rto_logging(net, SCTP_LOG_RTTVAR);
2432 		}
2433 	} else {
2434 		/* First RTO measurment */
2435 		net->RTO_measured = 1;
2436 		first_measure = 1;
2437 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2438 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2439 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2440 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2441 		}
2442 	}
2443 	if (net->lastsv == 0) {
2444 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2445 	}
2446 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2447 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2448 	    (stcb->asoc.sat_network_lockout == 0)) {
2449 		stcb->asoc.sat_network = 1;
2450 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2451 		stcb->asoc.sat_network = 0;
2452 		stcb->asoc.sat_network_lockout = 1;
2453 	}
2454 	/* bound it, per C6/C7 in Section 5.3.1 */
2455 	if (new_rto < stcb->asoc.minrto) {
2456 		new_rto = stcb->asoc.minrto;
2457 	}
2458 	if (new_rto > stcb->asoc.maxrto) {
2459 		new_rto = stcb->asoc.maxrto;
2460 	}
2461 	/* we are now returning the RTO */
2462 	return (new_rto);
2463 }
2464 
2465 /*
2466  * return a pointer to a contiguous piece of data from the given mbuf chain
2467  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2468  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2469  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2470  */
2471 caddr_t
2472 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2473 {
2474 	uint32_t count;
2475 	uint8_t *ptr;
2476 
2477 	ptr = in_ptr;
2478 	if ((off < 0) || (len <= 0))
2479 		return (NULL);
2480 
2481 	/* find the desired start location */
2482 	while ((m != NULL) && (off > 0)) {
2483 		if (off < SCTP_BUF_LEN(m))
2484 			break;
2485 		off -= SCTP_BUF_LEN(m);
2486 		m = SCTP_BUF_NEXT(m);
2487 	}
2488 	if (m == NULL)
2489 		return (NULL);
2490 
2491 	/* is the current mbuf large enough (eg. contiguous)? */
2492 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2493 		return (mtod(m, caddr_t)+off);
2494 	} else {
2495 		/* else, it spans more than one mbuf, so save a temp copy... */
2496 		while ((m != NULL) && (len > 0)) {
2497 			count = min(SCTP_BUF_LEN(m) - off, len);
2498 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2499 			len -= count;
2500 			ptr += count;
2501 			off = 0;
2502 			m = SCTP_BUF_NEXT(m);
2503 		}
2504 		if ((m == NULL) && (len > 0))
2505 			return (NULL);
2506 		else
2507 			return ((caddr_t)in_ptr);
2508 	}
2509 }
2510 
2511 
2512 
2513 struct sctp_paramhdr *
2514 sctp_get_next_param(struct mbuf *m,
2515     int offset,
2516     struct sctp_paramhdr *pull,
2517     int pull_limit)
2518 {
2519 	/* This just provides a typed signature to Peter's Pull routine */
2520 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2521 	    (uint8_t *) pull));
2522 }
2523 
2524 
2525 int
2526 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2527 {
2528 	/*
2529 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2530 	 * padlen is > 3 this routine will fail.
2531 	 */
2532 	uint8_t *dp;
2533 	int i;
2534 
2535 	if (padlen > 3) {
2536 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2537 		return (ENOBUFS);
2538 	}
2539 	if (padlen <= M_TRAILINGSPACE(m)) {
2540 		/*
2541 		 * The easy way. We hope the majority of the time we hit
2542 		 * here :)
2543 		 */
2544 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2545 		SCTP_BUF_LEN(m) += padlen;
2546 	} else {
2547 		/* Hard way we must grow the mbuf */
2548 		struct mbuf *tmp;
2549 
2550 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2551 		if (tmp == NULL) {
2552 			/* Out of space GAK! we are in big trouble. */
2553 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2554 			return (ENOBUFS);
2555 		}
2556 		/* setup and insert in middle */
2557 		SCTP_BUF_LEN(tmp) = padlen;
2558 		SCTP_BUF_NEXT(tmp) = NULL;
2559 		SCTP_BUF_NEXT(m) = tmp;
2560 		dp = mtod(tmp, uint8_t *);
2561 	}
2562 	/* zero out the pad */
2563 	for (i = 0; i < padlen; i++) {
2564 		*dp = 0;
2565 		dp++;
2566 	}
2567 	return (0);
2568 }
2569 
2570 int
2571 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2572 {
2573 	/* find the last mbuf in chain and pad it */
2574 	struct mbuf *m_at;
2575 
2576 	m_at = m;
2577 	if (last_mbuf) {
2578 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2579 	} else {
2580 		while (m_at) {
2581 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2582 				return (sctp_add_pad_tombuf(m_at, padval));
2583 			}
2584 			m_at = SCTP_BUF_NEXT(m_at);
2585 		}
2586 	}
2587 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2588 	return (EFAULT);
2589 }
2590 
2591 static void
2592 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2593     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2594 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2595     SCTP_UNUSED
2596 #endif
2597 )
2598 {
2599 	struct mbuf *m_notify;
2600 	struct sctp_assoc_change *sac;
2601 	struct sctp_queued_to_read *control;
2602 	size_t notif_len, abort_len;
2603 	unsigned int i;
2604 
2605 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2606 	struct socket *so;
2607 
2608 #endif
2609 
2610 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2611 		notif_len = sizeof(struct sctp_assoc_change);
2612 		if (abort != NULL) {
2613 			abort_len = htons(abort->ch.chunk_length);
2614 		} else {
2615 			abort_len = 0;
2616 		}
2617 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2618 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2619 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2620 			notif_len += abort_len;
2621 		}
2622 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
2623 		if (m_notify == NULL) {
2624 			/* Retry with smaller value. */
2625 			notif_len = sizeof(struct sctp_assoc_change);
2626 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
2627 			if (m_notify == NULL) {
2628 				goto set_error;
2629 			}
2630 		}
2631 		SCTP_BUF_NEXT(m_notify) = NULL;
2632 		sac = mtod(m_notify, struct sctp_assoc_change *);
2633 		sac->sac_type = SCTP_ASSOC_CHANGE;
2634 		sac->sac_flags = 0;
2635 		sac->sac_length = sizeof(struct sctp_assoc_change);
2636 		sac->sac_state = state;
2637 		sac->sac_error = error;
2638 		/* XXX verify these stream counts */
2639 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2640 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2641 		sac->sac_assoc_id = sctp_get_associd(stcb);
2642 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2643 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2644 				i = 0;
2645 				if (stcb->asoc.peer_supports_prsctp) {
2646 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2647 				}
2648 				if (stcb->asoc.peer_supports_auth) {
2649 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2650 				}
2651 				if (stcb->asoc.peer_supports_asconf) {
2652 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2653 				}
2654 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2655 				if (stcb->asoc.peer_supports_strreset) {
2656 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2657 				}
2658 				sac->sac_length += i;
2659 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2660 				memcpy(sac->sac_info, abort, abort_len);
2661 				sac->sac_length += abort_len;
2662 			}
2663 		}
2664 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2665 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2666 		    0, 0, stcb->asoc.context, 0, 0, 0,
2667 		    m_notify);
2668 		if (control != NULL) {
2669 			control->length = SCTP_BUF_LEN(m_notify);
2670 			/* not that we need this */
2671 			control->tail_mbuf = m_notify;
2672 			control->spec_flags = M_NOTIFICATION;
2673 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2674 			    control,
2675 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2676 			    so_locked);
2677 		} else {
2678 			sctp_m_freem(m_notify);
2679 		}
2680 	}
2681 	/*
2682 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2683 	 * comes in.
2684 	 */
2685 set_error:
2686 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2687 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2688 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2689 		if (from_peer) {
2690 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2691 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2692 				stcb->sctp_socket->so_error = ECONNREFUSED;
2693 			} else {
2694 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2695 				stcb->sctp_socket->so_error = ECONNRESET;
2696 			}
2697 		} else {
2698 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2699 			stcb->sctp_socket->so_error = ECONNABORTED;
2700 		}
2701 	}
2702 	/* Wake ANY sleepers */
2703 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2704 	so = SCTP_INP_SO(stcb->sctp_ep);
2705 	if (!so_locked) {
2706 		atomic_add_int(&stcb->asoc.refcnt, 1);
2707 		SCTP_TCB_UNLOCK(stcb);
2708 		SCTP_SOCKET_LOCK(so, 1);
2709 		SCTP_TCB_LOCK(stcb);
2710 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2711 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2712 			SCTP_SOCKET_UNLOCK(so, 1);
2713 			return;
2714 		}
2715 	}
2716 #endif
2717 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2718 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2719 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2720 		socantrcvmore(stcb->sctp_socket);
2721 	}
2722 	sorwakeup(stcb->sctp_socket);
2723 	sowwakeup(stcb->sctp_socket);
2724 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2725 	if (!so_locked) {
2726 		SCTP_SOCKET_UNLOCK(so, 1);
2727 	}
2728 #endif
2729 }
2730 
2731 static void
2732 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2733     struct sockaddr *sa, uint32_t error)
2734 {
2735 	struct mbuf *m_notify;
2736 	struct sctp_paddr_change *spc;
2737 	struct sctp_queued_to_read *control;
2738 
2739 	if ((stcb == NULL) ||
2740 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2741 		/* event not enabled */
2742 		return;
2743 	}
2744 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2745 	if (m_notify == NULL)
2746 		return;
2747 	SCTP_BUF_LEN(m_notify) = 0;
2748 	spc = mtod(m_notify, struct sctp_paddr_change *);
2749 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2750 	spc->spc_flags = 0;
2751 	spc->spc_length = sizeof(struct sctp_paddr_change);
2752 	switch (sa->sa_family) {
2753 #ifdef INET
2754 	case AF_INET:
2755 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2756 		break;
2757 #endif
2758 #ifdef INET6
2759 	case AF_INET6:
2760 		{
2761 			struct sockaddr_in6 *sin6;
2762 
2763 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2764 
2765 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2766 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2767 				if (sin6->sin6_scope_id == 0) {
2768 					/* recover scope_id for user */
2769 					(void)sa6_recoverscope(sin6);
2770 				} else {
2771 					/* clear embedded scope_id for user */
2772 					in6_clearscope(&sin6->sin6_addr);
2773 				}
2774 			}
2775 			break;
2776 		}
2777 #endif
2778 	default:
2779 		/* TSNH */
2780 		break;
2781 	}
2782 	spc->spc_state = state;
2783 	spc->spc_error = error;
2784 	spc->spc_assoc_id = sctp_get_associd(stcb);
2785 
2786 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2787 	SCTP_BUF_NEXT(m_notify) = NULL;
2788 
2789 	/* append to socket */
2790 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2791 	    0, 0, stcb->asoc.context, 0, 0, 0,
2792 	    m_notify);
2793 	if (control == NULL) {
2794 		/* no memory */
2795 		sctp_m_freem(m_notify);
2796 		return;
2797 	}
2798 	control->length = SCTP_BUF_LEN(m_notify);
2799 	control->spec_flags = M_NOTIFICATION;
2800 	/* not that we need this */
2801 	control->tail_mbuf = m_notify;
2802 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2803 	    control,
2804 	    &stcb->sctp_socket->so_rcv, 1,
2805 	    SCTP_READ_LOCK_NOT_HELD,
2806 	    SCTP_SO_NOT_LOCKED);
2807 }
2808 
2809 
2810 static void
2811 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2812     struct sctp_tmit_chunk *chk, int so_locked
2813 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2814     SCTP_UNUSED
2815 #endif
2816 )
2817 {
2818 	struct mbuf *m_notify;
2819 	struct sctp_send_failed *ssf;
2820 	struct sctp_send_failed_event *ssfe;
2821 	struct sctp_queued_to_read *control;
2822 	int length;
2823 
2824 	if ((stcb == NULL) ||
2825 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2826 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2827 		/* event not enabled */
2828 		return;
2829 	}
2830 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2831 		length = sizeof(struct sctp_send_failed_event);
2832 	} else {
2833 		length = sizeof(struct sctp_send_failed);
2834 	}
2835 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA);
2836 	if (m_notify == NULL)
2837 		/* no space left */
2838 		return;
2839 	length += chk->send_size;
2840 	length -= sizeof(struct sctp_data_chunk);
2841 	SCTP_BUF_LEN(m_notify) = 0;
2842 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2843 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2844 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2845 		if (sent) {
2846 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2847 		} else {
2848 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2849 		}
2850 		ssfe->ssfe_length = length;
2851 		ssfe->ssfe_error = error;
2852 		/* not exactly what the user sent in, but should be close :) */
2853 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2854 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2855 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2856 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2857 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2858 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2859 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2860 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2861 	} else {
2862 		ssf = mtod(m_notify, struct sctp_send_failed *);
2863 		ssf->ssf_type = SCTP_SEND_FAILED;
2864 		if (sent) {
2865 			ssf->ssf_flags = SCTP_DATA_SENT;
2866 		} else {
2867 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2868 		}
2869 		ssf->ssf_length = length;
2870 		ssf->ssf_error = error;
2871 		/* not exactly what the user sent in, but should be close :) */
2872 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2873 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2874 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2875 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2876 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2877 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2878 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2879 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2880 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2881 	}
2882 	if (chk->data) {
2883 		/*
2884 		 * trim off the sctp chunk header(it should be there)
2885 		 */
2886 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2887 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2888 			sctp_mbuf_crush(chk->data);
2889 			chk->send_size -= sizeof(struct sctp_data_chunk);
2890 		}
2891 	}
2892 	SCTP_BUF_NEXT(m_notify) = chk->data;
2893 	/* Steal off the mbuf */
2894 	chk->data = NULL;
2895 	/*
2896 	 * For this case, we check the actual socket buffer, since the assoc
2897 	 * is going away we don't want to overfill the socket buffer for a
2898 	 * non-reader
2899 	 */
2900 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2901 		sctp_m_freem(m_notify);
2902 		return;
2903 	}
2904 	/* append to socket */
2905 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2906 	    0, 0, stcb->asoc.context, 0, 0, 0,
2907 	    m_notify);
2908 	if (control == NULL) {
2909 		/* no memory */
2910 		sctp_m_freem(m_notify);
2911 		return;
2912 	}
2913 	control->spec_flags = M_NOTIFICATION;
2914 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2915 	    control,
2916 	    &stcb->sctp_socket->so_rcv, 1,
2917 	    SCTP_READ_LOCK_NOT_HELD,
2918 	    so_locked);
2919 }
2920 
2921 
2922 static void
2923 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2924     struct sctp_stream_queue_pending *sp, int so_locked
2925 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2926     SCTP_UNUSED
2927 #endif
2928 )
2929 {
2930 	struct mbuf *m_notify;
2931 	struct sctp_send_failed *ssf;
2932 	struct sctp_send_failed_event *ssfe;
2933 	struct sctp_queued_to_read *control;
2934 	int length;
2935 
2936 	if ((stcb == NULL) ||
2937 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2938 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2939 		/* event not enabled */
2940 		return;
2941 	}
2942 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2943 		length = sizeof(struct sctp_send_failed_event);
2944 	} else {
2945 		length = sizeof(struct sctp_send_failed);
2946 	}
2947 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA);
2948 	if (m_notify == NULL) {
2949 		/* no space left */
2950 		return;
2951 	}
2952 	length += sp->length;
2953 	SCTP_BUF_LEN(m_notify) = 0;
2954 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2955 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2956 		ssfe->ssfe_type = SCTP_SEND_FAILED;
2957 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2958 		ssfe->ssfe_length = length;
2959 		ssfe->ssfe_error = error;
2960 		/* not exactly what the user sent in, but should be close :) */
2961 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2962 		ssfe->ssfe_info.snd_sid = sp->stream;
2963 		if (sp->some_taken) {
2964 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2965 		} else {
2966 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2967 		}
2968 		ssfe->ssfe_info.snd_ppid = sp->ppid;
2969 		ssfe->ssfe_info.snd_context = sp->context;
2970 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2971 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2972 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2973 	} else {
2974 		ssf = mtod(m_notify, struct sctp_send_failed *);
2975 		ssf->ssf_type = SCTP_SEND_FAILED;
2976 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2977 		ssf->ssf_length = length;
2978 		ssf->ssf_error = error;
2979 		/* not exactly what the user sent in, but should be close :) */
2980 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2981 		ssf->ssf_info.sinfo_stream = sp->stream;
2982 		ssf->ssf_info.sinfo_ssn = sp->strseq;
2983 		if (sp->some_taken) {
2984 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2985 		} else {
2986 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2987 		}
2988 		ssf->ssf_info.sinfo_ppid = sp->ppid;
2989 		ssf->ssf_info.sinfo_context = sp->context;
2990 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2991 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2992 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2993 	}
2994 	SCTP_BUF_NEXT(m_notify) = sp->data;
2995 
2996 	/* Steal off the mbuf */
2997 	sp->data = NULL;
2998 	/*
2999 	 * For this case, we check the actual socket buffer, since the assoc
3000 	 * is going away we don't want to overfill the socket buffer for a
3001 	 * non-reader
3002 	 */
3003 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3004 		sctp_m_freem(m_notify);
3005 		return;
3006 	}
3007 	/* append to socket */
3008 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3009 	    0, 0, stcb->asoc.context, 0, 0, 0,
3010 	    m_notify);
3011 	if (control == NULL) {
3012 		/* no memory */
3013 		sctp_m_freem(m_notify);
3014 		return;
3015 	}
3016 	control->spec_flags = M_NOTIFICATION;
3017 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3018 	    control,
3019 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3020 }
3021 
3022 
3023 
3024 static void
3025 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3026 {
3027 	struct mbuf *m_notify;
3028 	struct sctp_adaptation_event *sai;
3029 	struct sctp_queued_to_read *control;
3030 
3031 	if ((stcb == NULL) ||
3032 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3033 		/* event not enabled */
3034 		return;
3035 	}
3036 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3037 	if (m_notify == NULL)
3038 		/* no space left */
3039 		return;
3040 	SCTP_BUF_LEN(m_notify) = 0;
3041 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3042 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3043 	sai->sai_flags = 0;
3044 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3045 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3046 	sai->sai_assoc_id = sctp_get_associd(stcb);
3047 
3048 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3049 	SCTP_BUF_NEXT(m_notify) = NULL;
3050 
3051 	/* append to socket */
3052 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3053 	    0, 0, stcb->asoc.context, 0, 0, 0,
3054 	    m_notify);
3055 	if (control == NULL) {
3056 		/* no memory */
3057 		sctp_m_freem(m_notify);
3058 		return;
3059 	}
3060 	control->length = SCTP_BUF_LEN(m_notify);
3061 	control->spec_flags = M_NOTIFICATION;
3062 	/* not that we need this */
3063 	control->tail_mbuf = m_notify;
3064 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3065 	    control,
3066 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3067 }
3068 
3069 /* This always must be called with the read-queue LOCKED in the INP */
3070 static void
3071 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3072     uint32_t val, int so_locked
3073 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3074     SCTP_UNUSED
3075 #endif
3076 )
3077 {
3078 	struct mbuf *m_notify;
3079 	struct sctp_pdapi_event *pdapi;
3080 	struct sctp_queued_to_read *control;
3081 	struct sockbuf *sb;
3082 
3083 	if ((stcb == NULL) ||
3084 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3085 		/* event not enabled */
3086 		return;
3087 	}
3088 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3089 		return;
3090 	}
3091 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3092 	if (m_notify == NULL)
3093 		/* no space left */
3094 		return;
3095 	SCTP_BUF_LEN(m_notify) = 0;
3096 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3097 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3098 	pdapi->pdapi_flags = 0;
3099 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3100 	pdapi->pdapi_indication = error;
3101 	pdapi->pdapi_stream = (val >> 16);
3102 	pdapi->pdapi_seq = (val & 0x0000ffff);
3103 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3104 
3105 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3106 	SCTP_BUF_NEXT(m_notify) = NULL;
3107 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3108 	    0, 0, stcb->asoc.context, 0, 0, 0,
3109 	    m_notify);
3110 	if (control == NULL) {
3111 		/* no memory */
3112 		sctp_m_freem(m_notify);
3113 		return;
3114 	}
3115 	control->spec_flags = M_NOTIFICATION;
3116 	control->length = SCTP_BUF_LEN(m_notify);
3117 	/* not that we need this */
3118 	control->tail_mbuf = m_notify;
3119 	control->held_length = 0;
3120 	control->length = 0;
3121 	sb = &stcb->sctp_socket->so_rcv;
3122 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3123 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3124 	}
3125 	sctp_sballoc(stcb, sb, m_notify);
3126 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3127 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3128 	}
3129 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3130 	control->end_added = 1;
3131 	if (stcb->asoc.control_pdapi)
3132 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3133 	else {
3134 		/* we really should not see this case */
3135 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3136 	}
3137 	if (stcb->sctp_ep && stcb->sctp_socket) {
3138 		/* This should always be the case */
3139 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3140 		struct socket *so;
3141 
3142 		so = SCTP_INP_SO(stcb->sctp_ep);
3143 		if (!so_locked) {
3144 			atomic_add_int(&stcb->asoc.refcnt, 1);
3145 			SCTP_TCB_UNLOCK(stcb);
3146 			SCTP_SOCKET_LOCK(so, 1);
3147 			SCTP_TCB_LOCK(stcb);
3148 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3149 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3150 				SCTP_SOCKET_UNLOCK(so, 1);
3151 				return;
3152 			}
3153 		}
3154 #endif
3155 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3156 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3157 		if (!so_locked) {
3158 			SCTP_SOCKET_UNLOCK(so, 1);
3159 		}
3160 #endif
3161 	}
3162 }
3163 
3164 static void
3165 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3166 {
3167 	struct mbuf *m_notify;
3168 	struct sctp_shutdown_event *sse;
3169 	struct sctp_queued_to_read *control;
3170 
3171 	/*
3172 	 * For TCP model AND UDP connected sockets we will send an error up
3173 	 * when an SHUTDOWN completes
3174 	 */
3175 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3176 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3177 		/* mark socket closed for read/write and wakeup! */
3178 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3179 		struct socket *so;
3180 
3181 		so = SCTP_INP_SO(stcb->sctp_ep);
3182 		atomic_add_int(&stcb->asoc.refcnt, 1);
3183 		SCTP_TCB_UNLOCK(stcb);
3184 		SCTP_SOCKET_LOCK(so, 1);
3185 		SCTP_TCB_LOCK(stcb);
3186 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3187 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3188 			SCTP_SOCKET_UNLOCK(so, 1);
3189 			return;
3190 		}
3191 #endif
3192 		socantsendmore(stcb->sctp_socket);
3193 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3194 		SCTP_SOCKET_UNLOCK(so, 1);
3195 #endif
3196 	}
3197 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3198 		/* event not enabled */
3199 		return;
3200 	}
3201 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3202 	if (m_notify == NULL)
3203 		/* no space left */
3204 		return;
3205 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3206 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3207 	sse->sse_flags = 0;
3208 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3209 	sse->sse_assoc_id = sctp_get_associd(stcb);
3210 
3211 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3212 	SCTP_BUF_NEXT(m_notify) = NULL;
3213 
3214 	/* append to socket */
3215 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3216 	    0, 0, stcb->asoc.context, 0, 0, 0,
3217 	    m_notify);
3218 	if (control == NULL) {
3219 		/* no memory */
3220 		sctp_m_freem(m_notify);
3221 		return;
3222 	}
3223 	control->spec_flags = M_NOTIFICATION;
3224 	control->length = SCTP_BUF_LEN(m_notify);
3225 	/* not that we need this */
3226 	control->tail_mbuf = m_notify;
3227 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3228 	    control,
3229 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3230 }
3231 
3232 static void
3233 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3234     int so_locked
3235 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3236     SCTP_UNUSED
3237 #endif
3238 )
3239 {
3240 	struct mbuf *m_notify;
3241 	struct sctp_sender_dry_event *event;
3242 	struct sctp_queued_to_read *control;
3243 
3244 	if ((stcb == NULL) ||
3245 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3246 		/* event not enabled */
3247 		return;
3248 	}
3249 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3250 	if (m_notify == NULL) {
3251 		/* no space left */
3252 		return;
3253 	}
3254 	SCTP_BUF_LEN(m_notify) = 0;
3255 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3256 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3257 	event->sender_dry_flags = 0;
3258 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3259 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3260 
3261 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3262 	SCTP_BUF_NEXT(m_notify) = NULL;
3263 
3264 	/* append to socket */
3265 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3266 	    0, 0, stcb->asoc.context, 0, 0, 0,
3267 	    m_notify);
3268 	if (control == NULL) {
3269 		/* no memory */
3270 		sctp_m_freem(m_notify);
3271 		return;
3272 	}
3273 	control->length = SCTP_BUF_LEN(m_notify);
3274 	control->spec_flags = M_NOTIFICATION;
3275 	/* not that we need this */
3276 	control->tail_mbuf = m_notify;
3277 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3278 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3279 }
3280 
3281 
3282 void
3283 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3284 {
3285 	struct mbuf *m_notify;
3286 	struct sctp_queued_to_read *control;
3287 	struct sctp_stream_change_event *stradd;
3288 	int len;
3289 
3290 	if ((stcb == NULL) ||
3291 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3292 		/* event not enabled */
3293 		return;
3294 	}
3295 	if ((stcb->asoc.peer_req_out) && flag) {
3296 		/* Peer made the request, don't tell the local user */
3297 		stcb->asoc.peer_req_out = 0;
3298 		return;
3299 	}
3300 	stcb->asoc.peer_req_out = 0;
3301 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3302 	if (m_notify == NULL)
3303 		/* no space left */
3304 		return;
3305 	SCTP_BUF_LEN(m_notify) = 0;
3306 	len = sizeof(struct sctp_stream_change_event);
3307 	if (len > M_TRAILINGSPACE(m_notify)) {
3308 		/* never enough room */
3309 		sctp_m_freem(m_notify);
3310 		return;
3311 	}
3312 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3313 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3314 	stradd->strchange_flags = flag;
3315 	stradd->strchange_length = len;
3316 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3317 	stradd->strchange_instrms = numberin;
3318 	stradd->strchange_outstrms = numberout;
3319 	SCTP_BUF_LEN(m_notify) = len;
3320 	SCTP_BUF_NEXT(m_notify) = NULL;
3321 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3322 		/* no space */
3323 		sctp_m_freem(m_notify);
3324 		return;
3325 	}
3326 	/* append to socket */
3327 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3328 	    0, 0, stcb->asoc.context, 0, 0, 0,
3329 	    m_notify);
3330 	if (control == NULL) {
3331 		/* no memory */
3332 		sctp_m_freem(m_notify);
3333 		return;
3334 	}
3335 	control->spec_flags = M_NOTIFICATION;
3336 	control->length = SCTP_BUF_LEN(m_notify);
3337 	/* not that we need this */
3338 	control->tail_mbuf = m_notify;
3339 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3340 	    control,
3341 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3342 }
3343 
3344 void
3345 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3346 {
3347 	struct mbuf *m_notify;
3348 	struct sctp_queued_to_read *control;
3349 	struct sctp_assoc_reset_event *strasoc;
3350 	int len;
3351 
3352 	if ((stcb == NULL) ||
3353 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3354 		/* event not enabled */
3355 		return;
3356 	}
3357 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3358 	if (m_notify == NULL)
3359 		/* no space left */
3360 		return;
3361 	SCTP_BUF_LEN(m_notify) = 0;
3362 	len = sizeof(struct sctp_assoc_reset_event);
3363 	if (len > M_TRAILINGSPACE(m_notify)) {
3364 		/* never enough room */
3365 		sctp_m_freem(m_notify);
3366 		return;
3367 	}
3368 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3369 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3370 	strasoc->assocreset_flags = flag;
3371 	strasoc->assocreset_length = len;
3372 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3373 	strasoc->assocreset_local_tsn = sending_tsn;
3374 	strasoc->assocreset_remote_tsn = recv_tsn;
3375 	SCTP_BUF_LEN(m_notify) = len;
3376 	SCTP_BUF_NEXT(m_notify) = NULL;
3377 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3378 		/* no space */
3379 		sctp_m_freem(m_notify);
3380 		return;
3381 	}
3382 	/* append to socket */
3383 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3384 	    0, 0, stcb->asoc.context, 0, 0, 0,
3385 	    m_notify);
3386 	if (control == NULL) {
3387 		/* no memory */
3388 		sctp_m_freem(m_notify);
3389 		return;
3390 	}
3391 	control->spec_flags = M_NOTIFICATION;
3392 	control->length = SCTP_BUF_LEN(m_notify);
3393 	/* not that we need this */
3394 	control->tail_mbuf = m_notify;
3395 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3396 	    control,
3397 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3398 }
3399 
3400 
3401 
3402 static void
3403 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3404     int number_entries, uint16_t * list, int flag)
3405 {
3406 	struct mbuf *m_notify;
3407 	struct sctp_queued_to_read *control;
3408 	struct sctp_stream_reset_event *strreset;
3409 	int len;
3410 
3411 	if ((stcb == NULL) ||
3412 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3413 		/* event not enabled */
3414 		return;
3415 	}
3416 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3417 	if (m_notify == NULL)
3418 		/* no space left */
3419 		return;
3420 	SCTP_BUF_LEN(m_notify) = 0;
3421 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3422 	if (len > M_TRAILINGSPACE(m_notify)) {
3423 		/* never enough room */
3424 		sctp_m_freem(m_notify);
3425 		return;
3426 	}
3427 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3428 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3429 	strreset->strreset_flags = flag;
3430 	strreset->strreset_length = len;
3431 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3432 	if (number_entries) {
3433 		int i;
3434 
3435 		for (i = 0; i < number_entries; i++) {
3436 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3437 		}
3438 	}
3439 	SCTP_BUF_LEN(m_notify) = len;
3440 	SCTP_BUF_NEXT(m_notify) = NULL;
3441 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3442 		/* no space */
3443 		sctp_m_freem(m_notify);
3444 		return;
3445 	}
3446 	/* append to socket */
3447 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3448 	    0, 0, stcb->asoc.context, 0, 0, 0,
3449 	    m_notify);
3450 	if (control == NULL) {
3451 		/* no memory */
3452 		sctp_m_freem(m_notify);
3453 		return;
3454 	}
3455 	control->spec_flags = M_NOTIFICATION;
3456 	control->length = SCTP_BUF_LEN(m_notify);
3457 	/* not that we need this */
3458 	control->tail_mbuf = m_notify;
3459 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3460 	    control,
3461 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3462 }
3463 
3464 
3465 static void
3466 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3467 {
3468 	struct mbuf *m_notify;
3469 	struct sctp_remote_error *sre;
3470 	struct sctp_queued_to_read *control;
3471 	size_t notif_len, chunk_len;
3472 
3473 	if ((stcb == NULL) ||
3474 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3475 		return;
3476 	}
3477 	if (chunk != NULL) {
3478 		chunk_len = htons(chunk->ch.chunk_length);
3479 	} else {
3480 		chunk_len = 0;
3481 	}
3482 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3483 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
3484 	if (m_notify == NULL) {
3485 		/* Retry with smaller value. */
3486 		notif_len = sizeof(struct sctp_remote_error);
3487 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
3488 		if (m_notify == NULL) {
3489 			return;
3490 		}
3491 	}
3492 	SCTP_BUF_NEXT(m_notify) = NULL;
3493 	sre = mtod(m_notify, struct sctp_remote_error *);
3494 	sre->sre_type = SCTP_REMOTE_ERROR;
3495 	sre->sre_flags = 0;
3496 	sre->sre_length = sizeof(struct sctp_remote_error);
3497 	sre->sre_error = error;
3498 	sre->sre_assoc_id = sctp_get_associd(stcb);
3499 	if (notif_len > sizeof(struct sctp_remote_error)) {
3500 		memcpy(sre->sre_data, chunk, chunk_len);
3501 		sre->sre_length += chunk_len;
3502 	}
3503 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3504 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3505 	    0, 0, stcb->asoc.context, 0, 0, 0,
3506 	    m_notify);
3507 	if (control != NULL) {
3508 		control->length = SCTP_BUF_LEN(m_notify);
3509 		/* not that we need this */
3510 		control->tail_mbuf = m_notify;
3511 		control->spec_flags = M_NOTIFICATION;
3512 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3513 		    control,
3514 		    &stcb->sctp_socket->so_rcv, 1,
3515 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3516 	} else {
3517 		sctp_m_freem(m_notify);
3518 	}
3519 }
3520 
3521 
3522 void
3523 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3524     uint32_t error, void *data, int so_locked
3525 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3526     SCTP_UNUSED
3527 #endif
3528 )
3529 {
3530 	if ((stcb == NULL) ||
3531 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3532 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3533 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3534 		/* If the socket is gone we are out of here */
3535 		return;
3536 	}
3537 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3538 		return;
3539 	}
3540 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3541 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3542 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3543 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3544 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3545 			/* Don't report these in front states */
3546 			return;
3547 		}
3548 	}
3549 	switch (notification) {
3550 	case SCTP_NOTIFY_ASSOC_UP:
3551 		if (stcb->asoc.assoc_up_sent == 0) {
3552 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3553 			stcb->asoc.assoc_up_sent = 1;
3554 		}
3555 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3556 			sctp_notify_adaptation_layer(stcb);
3557 		}
3558 		if (stcb->asoc.peer_supports_auth == 0) {
3559 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3560 			    NULL, so_locked);
3561 		}
3562 		break;
3563 	case SCTP_NOTIFY_ASSOC_DOWN:
3564 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3565 		break;
3566 	case SCTP_NOTIFY_INTERFACE_DOWN:
3567 		{
3568 			struct sctp_nets *net;
3569 
3570 			net = (struct sctp_nets *)data;
3571 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3572 			    (struct sockaddr *)&net->ro._l_addr, error);
3573 			break;
3574 		}
3575 	case SCTP_NOTIFY_INTERFACE_UP:
3576 		{
3577 			struct sctp_nets *net;
3578 
3579 			net = (struct sctp_nets *)data;
3580 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3581 			    (struct sockaddr *)&net->ro._l_addr, error);
3582 			break;
3583 		}
3584 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3585 		{
3586 			struct sctp_nets *net;
3587 
3588 			net = (struct sctp_nets *)data;
3589 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3590 			    (struct sockaddr *)&net->ro._l_addr, error);
3591 			break;
3592 		}
3593 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3594 		sctp_notify_send_failed2(stcb, error,
3595 		    (struct sctp_stream_queue_pending *)data, so_locked);
3596 		break;
3597 	case SCTP_NOTIFY_SENT_DG_FAIL:
3598 		sctp_notify_send_failed(stcb, 1, error,
3599 		    (struct sctp_tmit_chunk *)data, so_locked);
3600 		break;
3601 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3602 		sctp_notify_send_failed(stcb, 0, error,
3603 		    (struct sctp_tmit_chunk *)data, so_locked);
3604 		break;
3605 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3606 		{
3607 			uint32_t val;
3608 
3609 			val = *((uint32_t *) data);
3610 
3611 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3612 			break;
3613 		}
3614 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3615 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3616 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3617 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3618 		} else {
3619 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3620 		}
3621 		break;
3622 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3623 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3624 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3625 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3626 		} else {
3627 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3628 		}
3629 		break;
3630 	case SCTP_NOTIFY_ASSOC_RESTART:
3631 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3632 		if (stcb->asoc.peer_supports_auth == 0) {
3633 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3634 			    NULL, so_locked);
3635 		}
3636 		break;
3637 	case SCTP_NOTIFY_STR_RESET_SEND:
3638 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3639 		break;
3640 	case SCTP_NOTIFY_STR_RESET_RECV:
3641 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3642 		break;
3643 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3644 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3645 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3646 		break;
3647 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3648 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3649 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3650 		break;
3651 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3652 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3653 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3654 		break;
3655 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3656 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3657 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3658 		break;
3659 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3660 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3661 		    error);
3662 		break;
3663 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3664 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3665 		    error);
3666 		break;
3667 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3668 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3669 		    error);
3670 		break;
3671 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3672 		sctp_notify_shutdown_event(stcb);
3673 		break;
3674 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3675 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3676 		    (uint16_t) (uintptr_t) data,
3677 		    so_locked);
3678 		break;
3679 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3680 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3681 		    (uint16_t) (uintptr_t) data,
3682 		    so_locked);
3683 		break;
3684 	case SCTP_NOTIFY_NO_PEER_AUTH:
3685 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3686 		    (uint16_t) (uintptr_t) data,
3687 		    so_locked);
3688 		break;
3689 	case SCTP_NOTIFY_SENDER_DRY:
3690 		sctp_notify_sender_dry_event(stcb, so_locked);
3691 		break;
3692 	case SCTP_NOTIFY_REMOTE_ERROR:
3693 		sctp_notify_remote_error(stcb, error, data);
3694 		break;
3695 	default:
3696 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3697 		    __FUNCTION__, notification, notification);
3698 		break;
3699 	}			/* end switch */
3700 }
3701 
3702 void
3703 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3704 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3705     SCTP_UNUSED
3706 #endif
3707 )
3708 {
3709 	struct sctp_association *asoc;
3710 	struct sctp_stream_out *outs;
3711 	struct sctp_tmit_chunk *chk, *nchk;
3712 	struct sctp_stream_queue_pending *sp, *nsp;
3713 	int i;
3714 
3715 	if (stcb == NULL) {
3716 		return;
3717 	}
3718 	asoc = &stcb->asoc;
3719 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3720 		/* already being freed */
3721 		return;
3722 	}
3723 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3724 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3725 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3726 		return;
3727 	}
3728 	/* now through all the gunk freeing chunks */
3729 	if (holds_lock == 0) {
3730 		SCTP_TCB_SEND_LOCK(stcb);
3731 	}
3732 	/* sent queue SHOULD be empty */
3733 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3734 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3735 		asoc->sent_queue_cnt--;
3736 		if (chk->data != NULL) {
3737 			sctp_free_bufspace(stcb, asoc, chk, 1);
3738 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3739 			    error, chk, so_locked);
3740 			if (chk->data) {
3741 				sctp_m_freem(chk->data);
3742 				chk->data = NULL;
3743 			}
3744 		}
3745 		sctp_free_a_chunk(stcb, chk, so_locked);
3746 		/* sa_ignore FREED_MEMORY */
3747 	}
3748 	/* pending send queue SHOULD be empty */
3749 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3750 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3751 		asoc->send_queue_cnt--;
3752 		if (chk->data != NULL) {
3753 			sctp_free_bufspace(stcb, asoc, chk, 1);
3754 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3755 			    error, chk, so_locked);
3756 			if (chk->data) {
3757 				sctp_m_freem(chk->data);
3758 				chk->data = NULL;
3759 			}
3760 		}
3761 		sctp_free_a_chunk(stcb, chk, so_locked);
3762 		/* sa_ignore FREED_MEMORY */
3763 	}
3764 	for (i = 0; i < asoc->streamoutcnt; i++) {
3765 		/* For each stream */
3766 		outs = &asoc->strmout[i];
3767 		/* clean up any sends there */
3768 		asoc->locked_on_sending = NULL;
3769 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3770 			asoc->stream_queue_cnt--;
3771 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3772 			sctp_free_spbufspace(stcb, asoc, sp);
3773 			if (sp->data) {
3774 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3775 				    error, (void *)sp, so_locked);
3776 				if (sp->data) {
3777 					sctp_m_freem(sp->data);
3778 					sp->data = NULL;
3779 				}
3780 			}
3781 			if (sp->net) {
3782 				sctp_free_remote_addr(sp->net);
3783 				sp->net = NULL;
3784 			}
3785 			/* Free the chunk */
3786 			sctp_free_a_strmoq(stcb, sp, so_locked);
3787 			/* sa_ignore FREED_MEMORY */
3788 		}
3789 	}
3790 
3791 	if (holds_lock == 0) {
3792 		SCTP_TCB_SEND_UNLOCK(stcb);
3793 	}
3794 }
3795 
3796 void
3797 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3798     struct sctp_abort_chunk *abort, int so_locked
3799 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3800     SCTP_UNUSED
3801 #endif
3802 )
3803 {
3804 	if (stcb == NULL) {
3805 		return;
3806 	}
3807 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3808 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3809 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3810 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3811 	}
3812 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3813 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3814 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3815 		return;
3816 	}
3817 	/* Tell them we lost the asoc */
3818 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3819 	if (from_peer) {
3820 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3821 	} else {
3822 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3823 	}
3824 }
3825 
3826 void
3827 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3828     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3829     uint32_t vrf_id, uint16_t port)
3830 {
3831 	uint32_t vtag;
3832 
3833 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3834 	struct socket *so;
3835 
3836 #endif
3837 
3838 	vtag = 0;
3839 	if (stcb != NULL) {
3840 		/* We have a TCB to abort, send notification too */
3841 		vtag = stcb->asoc.peer_vtag;
3842 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3843 		/* get the assoc vrf id and table id */
3844 		vrf_id = stcb->asoc.vrf_id;
3845 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3846 	}
3847 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3848 	if (stcb != NULL) {
3849 		/* Ok, now lets free it */
3850 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3851 		so = SCTP_INP_SO(inp);
3852 		atomic_add_int(&stcb->asoc.refcnt, 1);
3853 		SCTP_TCB_UNLOCK(stcb);
3854 		SCTP_SOCKET_LOCK(so, 1);
3855 		SCTP_TCB_LOCK(stcb);
3856 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3857 #endif
3858 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3859 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3860 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3861 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3862 		}
3863 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3864 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3865 		SCTP_SOCKET_UNLOCK(so, 1);
3866 #endif
3867 	}
3868 }
3869 
3870 #ifdef SCTP_ASOCLOG_OF_TSNS
3871 void
3872 sctp_print_out_track_log(struct sctp_tcb *stcb)
3873 {
3874 #ifdef NOSIY_PRINTS
3875 	int i;
3876 
3877 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3878 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3879 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3880 		SCTP_PRINTF("None rcvd\n");
3881 		goto none_in;
3882 	}
3883 	if (stcb->asoc.tsn_in_wrapped) {
3884 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3885 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3886 			    stcb->asoc.in_tsnlog[i].tsn,
3887 			    stcb->asoc.in_tsnlog[i].strm,
3888 			    stcb->asoc.in_tsnlog[i].seq,
3889 			    stcb->asoc.in_tsnlog[i].flgs,
3890 			    stcb->asoc.in_tsnlog[i].sz);
3891 		}
3892 	}
3893 	if (stcb->asoc.tsn_in_at) {
3894 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3895 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3896 			    stcb->asoc.in_tsnlog[i].tsn,
3897 			    stcb->asoc.in_tsnlog[i].strm,
3898 			    stcb->asoc.in_tsnlog[i].seq,
3899 			    stcb->asoc.in_tsnlog[i].flgs,
3900 			    stcb->asoc.in_tsnlog[i].sz);
3901 		}
3902 	}
3903 none_in:
3904 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3905 	if ((stcb->asoc.tsn_out_at == 0) &&
3906 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3907 		SCTP_PRINTF("None sent\n");
3908 	}
3909 	if (stcb->asoc.tsn_out_wrapped) {
3910 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3911 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3912 			    stcb->asoc.out_tsnlog[i].tsn,
3913 			    stcb->asoc.out_tsnlog[i].strm,
3914 			    stcb->asoc.out_tsnlog[i].seq,
3915 			    stcb->asoc.out_tsnlog[i].flgs,
3916 			    stcb->asoc.out_tsnlog[i].sz);
3917 		}
3918 	}
3919 	if (stcb->asoc.tsn_out_at) {
3920 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3921 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3922 			    stcb->asoc.out_tsnlog[i].tsn,
3923 			    stcb->asoc.out_tsnlog[i].strm,
3924 			    stcb->asoc.out_tsnlog[i].seq,
3925 			    stcb->asoc.out_tsnlog[i].flgs,
3926 			    stcb->asoc.out_tsnlog[i].sz);
3927 		}
3928 	}
3929 #endif
3930 }
3931 
3932 #endif
3933 
3934 void
3935 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3936     struct mbuf *op_err,
3937     int so_locked
3938 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3939     SCTP_UNUSED
3940 #endif
3941 )
3942 {
3943 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3944 	struct socket *so;
3945 
3946 #endif
3947 
3948 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3949 	so = SCTP_INP_SO(inp);
3950 #endif
3951 	if (stcb == NULL) {
3952 		/* Got to have a TCB */
3953 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3954 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3955 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3956 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3957 			}
3958 		}
3959 		return;
3960 	} else {
3961 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3962 	}
3963 	/* notify the ulp */
3964 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
3965 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
3966 	}
3967 	/* notify the peer */
3968 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3969 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3970 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3971 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3972 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3973 	}
3974 	/* now free the asoc */
3975 #ifdef SCTP_ASOCLOG_OF_TSNS
3976 	sctp_print_out_track_log(stcb);
3977 #endif
3978 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3979 	if (!so_locked) {
3980 		atomic_add_int(&stcb->asoc.refcnt, 1);
3981 		SCTP_TCB_UNLOCK(stcb);
3982 		SCTP_SOCKET_LOCK(so, 1);
3983 		SCTP_TCB_LOCK(stcb);
3984 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3985 	}
3986 #endif
3987 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3988 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3989 	if (!so_locked) {
3990 		SCTP_SOCKET_UNLOCK(so, 1);
3991 	}
3992 #endif
3993 }
3994 
3995 void
3996 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3997     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3998 {
3999 	struct sctp_chunkhdr *ch, chunk_buf;
4000 	unsigned int chk_length;
4001 	int contains_init_chunk;
4002 
4003 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4004 	/* Generate a TO address for future reference */
4005 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4006 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
4007 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4008 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4009 		}
4010 	}
4011 	contains_init_chunk = 0;
4012 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4013 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4014 	while (ch != NULL) {
4015 		chk_length = ntohs(ch->chunk_length);
4016 		if (chk_length < sizeof(*ch)) {
4017 			/* break to abort land */
4018 			break;
4019 		}
4020 		switch (ch->chunk_type) {
4021 		case SCTP_INIT:
4022 			contains_init_chunk = 1;
4023 			break;
4024 		case SCTP_COOKIE_ECHO:
4025 			/* We hit here only if the assoc is being freed */
4026 			return;
4027 		case SCTP_PACKET_DROPPED:
4028 			/* we don't respond to pkt-dropped */
4029 			return;
4030 		case SCTP_ABORT_ASSOCIATION:
4031 			/* we don't respond with an ABORT to an ABORT */
4032 			return;
4033 		case SCTP_SHUTDOWN_COMPLETE:
4034 			/*
4035 			 * we ignore it since we are not waiting for it and
4036 			 * peer is gone
4037 			 */
4038 			return;
4039 		case SCTP_SHUTDOWN_ACK:
4040 			sctp_send_shutdown_complete2(m, sh, vrf_id, port);
4041 			return;
4042 		default:
4043 			break;
4044 		}
4045 		offset += SCTP_SIZE32(chk_length);
4046 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4047 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4048 	}
4049 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4050 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4051 	    (contains_init_chunk == 0))) {
4052 		sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4053 	}
4054 }
4055 
4056 /*
4057  * check the inbound datagram to make sure there is not an abort inside it,
4058  * if there is return 1, else return 0.
4059  */
4060 int
4061 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4062 {
4063 	struct sctp_chunkhdr *ch;
4064 	struct sctp_init_chunk *init_chk, chunk_buf;
4065 	int offset;
4066 	unsigned int chk_length;
4067 
4068 	offset = iphlen + sizeof(struct sctphdr);
4069 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4070 	    (uint8_t *) & chunk_buf);
4071 	while (ch != NULL) {
4072 		chk_length = ntohs(ch->chunk_length);
4073 		if (chk_length < sizeof(*ch)) {
4074 			/* packet is probably corrupt */
4075 			break;
4076 		}
4077 		/* we seem to be ok, is it an abort? */
4078 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4079 			/* yep, tell them */
4080 			return (1);
4081 		}
4082 		if (ch->chunk_type == SCTP_INITIATION) {
4083 			/* need to update the Vtag */
4084 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4085 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4086 			if (init_chk != NULL) {
4087 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4088 			}
4089 		}
4090 		/* Nope, move to the next chunk */
4091 		offset += SCTP_SIZE32(chk_length);
4092 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4093 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4094 	}
4095 	return (0);
4096 }
4097 
4098 /*
4099  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4100  * set (i.e. it's 0) so, create this function to compare link local scopes
4101  */
4102 #ifdef INET6
4103 uint32_t
4104 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4105 {
4106 	struct sockaddr_in6 a, b;
4107 
4108 	/* save copies */
4109 	a = *addr1;
4110 	b = *addr2;
4111 
4112 	if (a.sin6_scope_id == 0)
4113 		if (sa6_recoverscope(&a)) {
4114 			/* can't get scope, so can't match */
4115 			return (0);
4116 		}
4117 	if (b.sin6_scope_id == 0)
4118 		if (sa6_recoverscope(&b)) {
4119 			/* can't get scope, so can't match */
4120 			return (0);
4121 		}
4122 	if (a.sin6_scope_id != b.sin6_scope_id)
4123 		return (0);
4124 
4125 	return (1);
4126 }
4127 
4128 /*
4129  * returns a sockaddr_in6 with embedded scope recovered and removed
4130  */
4131 struct sockaddr_in6 *
4132 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4133 {
4134 	/* check and strip embedded scope junk */
4135 	if (addr->sin6_family == AF_INET6) {
4136 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4137 			if (addr->sin6_scope_id == 0) {
4138 				*store = *addr;
4139 				if (!sa6_recoverscope(store)) {
4140 					/* use the recovered scope */
4141 					addr = store;
4142 				}
4143 			} else {
4144 				/* else, return the original "to" addr */
4145 				in6_clearscope(&addr->sin6_addr);
4146 			}
4147 		}
4148 	}
4149 	return (addr);
4150 }
4151 
4152 #endif
4153 
4154 /*
4155  * are the two addresses the same?  currently a "scopeless" check returns: 1
4156  * if same, 0 if not
4157  */
4158 int
4159 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4160 {
4161 
4162 	/* must be valid */
4163 	if (sa1 == NULL || sa2 == NULL)
4164 		return (0);
4165 
4166 	/* must be the same family */
4167 	if (sa1->sa_family != sa2->sa_family)
4168 		return (0);
4169 
4170 	switch (sa1->sa_family) {
4171 #ifdef INET6
4172 	case AF_INET6:
4173 		{
4174 			/* IPv6 addresses */
4175 			struct sockaddr_in6 *sin6_1, *sin6_2;
4176 
4177 			sin6_1 = (struct sockaddr_in6 *)sa1;
4178 			sin6_2 = (struct sockaddr_in6 *)sa2;
4179 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4180 			    sin6_2));
4181 		}
4182 #endif
4183 #ifdef INET
4184 	case AF_INET:
4185 		{
4186 			/* IPv4 addresses */
4187 			struct sockaddr_in *sin_1, *sin_2;
4188 
4189 			sin_1 = (struct sockaddr_in *)sa1;
4190 			sin_2 = (struct sockaddr_in *)sa2;
4191 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4192 		}
4193 #endif
4194 	default:
4195 		/* we don't do these... */
4196 		return (0);
4197 	}
4198 }
4199 
4200 void
4201 sctp_print_address(struct sockaddr *sa)
4202 {
4203 #ifdef INET6
4204 	char ip6buf[INET6_ADDRSTRLEN];
4205 
4206 	ip6buf[0] = 0;
4207 #endif
4208 
4209 	switch (sa->sa_family) {
4210 #ifdef INET6
4211 	case AF_INET6:
4212 		{
4213 			struct sockaddr_in6 *sin6;
4214 
4215 			sin6 = (struct sockaddr_in6 *)sa;
4216 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4217 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4218 			    ntohs(sin6->sin6_port),
4219 			    sin6->sin6_scope_id);
4220 			break;
4221 		}
4222 #endif
4223 #ifdef INET
4224 	case AF_INET:
4225 		{
4226 			struct sockaddr_in *sin;
4227 			unsigned char *p;
4228 
4229 			sin = (struct sockaddr_in *)sa;
4230 			p = (unsigned char *)&sin->sin_addr;
4231 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4232 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4233 			break;
4234 		}
4235 #endif
4236 	default:
4237 		SCTP_PRINTF("?\n");
4238 		break;
4239 	}
4240 }
4241 
4242 void
4243 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4244 {
4245 	switch (iph->ip_v) {
4246 #ifdef INET
4247 	case IPVERSION:
4248 		{
4249 			struct sockaddr_in lsa, fsa;
4250 
4251 			bzero(&lsa, sizeof(lsa));
4252 			lsa.sin_len = sizeof(lsa);
4253 			lsa.sin_family = AF_INET;
4254 			lsa.sin_addr = iph->ip_src;
4255 			lsa.sin_port = sh->src_port;
4256 			bzero(&fsa, sizeof(fsa));
4257 			fsa.sin_len = sizeof(fsa);
4258 			fsa.sin_family = AF_INET;
4259 			fsa.sin_addr = iph->ip_dst;
4260 			fsa.sin_port = sh->dest_port;
4261 			SCTP_PRINTF("src: ");
4262 			sctp_print_address((struct sockaddr *)&lsa);
4263 			SCTP_PRINTF("dest: ");
4264 			sctp_print_address((struct sockaddr *)&fsa);
4265 			break;
4266 		}
4267 #endif
4268 #ifdef INET6
4269 	case IPV6_VERSION >> 4:
4270 		{
4271 			struct ip6_hdr *ip6;
4272 			struct sockaddr_in6 lsa6, fsa6;
4273 
4274 			ip6 = (struct ip6_hdr *)iph;
4275 			bzero(&lsa6, sizeof(lsa6));
4276 			lsa6.sin6_len = sizeof(lsa6);
4277 			lsa6.sin6_family = AF_INET6;
4278 			lsa6.sin6_addr = ip6->ip6_src;
4279 			lsa6.sin6_port = sh->src_port;
4280 			bzero(&fsa6, sizeof(fsa6));
4281 			fsa6.sin6_len = sizeof(fsa6);
4282 			fsa6.sin6_family = AF_INET6;
4283 			fsa6.sin6_addr = ip6->ip6_dst;
4284 			fsa6.sin6_port = sh->dest_port;
4285 			SCTP_PRINTF("src: ");
4286 			sctp_print_address((struct sockaddr *)&lsa6);
4287 			SCTP_PRINTF("dest: ");
4288 			sctp_print_address((struct sockaddr *)&fsa6);
4289 			break;
4290 		}
4291 #endif
4292 	default:
4293 		/* TSNH */
4294 		break;
4295 	}
4296 }
4297 
4298 void
4299 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4300     struct sctp_inpcb *new_inp,
4301     struct sctp_tcb *stcb,
4302     int waitflags)
4303 {
4304 	/*
4305 	 * go through our old INP and pull off any control structures that
4306 	 * belong to stcb and move then to the new inp.
4307 	 */
4308 	struct socket *old_so, *new_so;
4309 	struct sctp_queued_to_read *control, *nctl;
4310 	struct sctp_readhead tmp_queue;
4311 	struct mbuf *m;
4312 	int error = 0;
4313 
4314 	old_so = old_inp->sctp_socket;
4315 	new_so = new_inp->sctp_socket;
4316 	TAILQ_INIT(&tmp_queue);
4317 	error = sblock(&old_so->so_rcv, waitflags);
4318 	if (error) {
4319 		/*
4320 		 * Gak, can't get sblock, we have a problem. data will be
4321 		 * left stranded.. and we don't dare look at it since the
4322 		 * other thread may be reading something. Oh well, its a
4323 		 * screwed up app that does a peeloff OR a accept while
4324 		 * reading from the main socket... actually its only the
4325 		 * peeloff() case, since I think read will fail on a
4326 		 * listening socket..
4327 		 */
4328 		return;
4329 	}
4330 	/* lock the socket buffers */
4331 	SCTP_INP_READ_LOCK(old_inp);
4332 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4333 		/* Pull off all for out target stcb */
4334 		if (control->stcb == stcb) {
4335 			/* remove it we want it */
4336 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4337 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4338 			m = control->data;
4339 			while (m) {
4340 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4341 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4342 				}
4343 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4344 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4345 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4346 				}
4347 				m = SCTP_BUF_NEXT(m);
4348 			}
4349 		}
4350 	}
4351 	SCTP_INP_READ_UNLOCK(old_inp);
4352 	/* Remove the sb-lock on the old socket */
4353 
4354 	sbunlock(&old_so->so_rcv);
4355 	/* Now we move them over to the new socket buffer */
4356 	SCTP_INP_READ_LOCK(new_inp);
4357 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4358 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4359 		m = control->data;
4360 		while (m) {
4361 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4362 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4363 			}
4364 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4365 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4366 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4367 			}
4368 			m = SCTP_BUF_NEXT(m);
4369 		}
4370 	}
4371 	SCTP_INP_READ_UNLOCK(new_inp);
4372 }
4373 
4374 void
4375 sctp_add_to_readq(struct sctp_inpcb *inp,
4376     struct sctp_tcb *stcb,
4377     struct sctp_queued_to_read *control,
4378     struct sockbuf *sb,
4379     int end,
4380     int inp_read_lock_held,
4381     int so_locked
4382 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4383     SCTP_UNUSED
4384 #endif
4385 )
4386 {
4387 	/*
4388 	 * Here we must place the control on the end of the socket read
4389 	 * queue AND increment sb_cc so that select will work properly on
4390 	 * read.
4391 	 */
4392 	struct mbuf *m, *prev = NULL;
4393 
4394 	if (inp == NULL) {
4395 		/* Gak, TSNH!! */
4396 #ifdef INVARIANTS
4397 		panic("Gak, inp NULL on add_to_readq");
4398 #endif
4399 		return;
4400 	}
4401 	if (inp_read_lock_held == 0)
4402 		SCTP_INP_READ_LOCK(inp);
4403 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4404 		sctp_free_remote_addr(control->whoFrom);
4405 		if (control->data) {
4406 			sctp_m_freem(control->data);
4407 			control->data = NULL;
4408 		}
4409 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4410 		if (inp_read_lock_held == 0)
4411 			SCTP_INP_READ_UNLOCK(inp);
4412 		return;
4413 	}
4414 	if (!(control->spec_flags & M_NOTIFICATION)) {
4415 		atomic_add_int(&inp->total_recvs, 1);
4416 		if (!control->do_not_ref_stcb) {
4417 			atomic_add_int(&stcb->total_recvs, 1);
4418 		}
4419 	}
4420 	m = control->data;
4421 	control->held_length = 0;
4422 	control->length = 0;
4423 	while (m) {
4424 		if (SCTP_BUF_LEN(m) == 0) {
4425 			/* Skip mbufs with NO length */
4426 			if (prev == NULL) {
4427 				/* First one */
4428 				control->data = sctp_m_free(m);
4429 				m = control->data;
4430 			} else {
4431 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4432 				m = SCTP_BUF_NEXT(prev);
4433 			}
4434 			if (m == NULL) {
4435 				control->tail_mbuf = prev;
4436 			}
4437 			continue;
4438 		}
4439 		prev = m;
4440 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4441 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4442 		}
4443 		sctp_sballoc(stcb, sb, m);
4444 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4445 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4446 		}
4447 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4448 		m = SCTP_BUF_NEXT(m);
4449 	}
4450 	if (prev != NULL) {
4451 		control->tail_mbuf = prev;
4452 	} else {
4453 		/* Everything got collapsed out?? */
4454 		sctp_free_remote_addr(control->whoFrom);
4455 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4456 		if (inp_read_lock_held == 0)
4457 			SCTP_INP_READ_UNLOCK(inp);
4458 		return;
4459 	}
4460 	if (end) {
4461 		control->end_added = 1;
4462 	}
4463 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4464 	if (inp_read_lock_held == 0)
4465 		SCTP_INP_READ_UNLOCK(inp);
4466 	if (inp && inp->sctp_socket) {
4467 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4468 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4469 		} else {
4470 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4471 			struct socket *so;
4472 
4473 			so = SCTP_INP_SO(inp);
4474 			if (!so_locked) {
4475 				if (stcb) {
4476 					atomic_add_int(&stcb->asoc.refcnt, 1);
4477 					SCTP_TCB_UNLOCK(stcb);
4478 				}
4479 				SCTP_SOCKET_LOCK(so, 1);
4480 				if (stcb) {
4481 					SCTP_TCB_LOCK(stcb);
4482 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4483 				}
4484 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4485 					SCTP_SOCKET_UNLOCK(so, 1);
4486 					return;
4487 				}
4488 			}
4489 #endif
4490 			sctp_sorwakeup(inp, inp->sctp_socket);
4491 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4492 			if (!so_locked) {
4493 				SCTP_SOCKET_UNLOCK(so, 1);
4494 			}
4495 #endif
4496 		}
4497 	}
4498 }
4499 
4500 
4501 int
4502 sctp_append_to_readq(struct sctp_inpcb *inp,
4503     struct sctp_tcb *stcb,
4504     struct sctp_queued_to_read *control,
4505     struct mbuf *m,
4506     int end,
4507     int ctls_cumack,
4508     struct sockbuf *sb)
4509 {
4510 	/*
4511 	 * A partial delivery API event is underway. OR we are appending on
4512 	 * the reassembly queue.
4513 	 *
4514 	 * If PDAPI this means we need to add m to the end of the data.
4515 	 * Increase the length in the control AND increment the sb_cc.
4516 	 * Otherwise sb is NULL and all we need to do is put it at the end
4517 	 * of the mbuf chain.
4518 	 */
4519 	int len = 0;
4520 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4521 
4522 	if (inp) {
4523 		SCTP_INP_READ_LOCK(inp);
4524 	}
4525 	if (control == NULL) {
4526 get_out:
4527 		if (inp) {
4528 			SCTP_INP_READ_UNLOCK(inp);
4529 		}
4530 		return (-1);
4531 	}
4532 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4533 		SCTP_INP_READ_UNLOCK(inp);
4534 		return (0);
4535 	}
4536 	if (control->end_added) {
4537 		/* huh this one is complete? */
4538 		goto get_out;
4539 	}
4540 	mm = m;
4541 	if (mm == NULL) {
4542 		goto get_out;
4543 	}
4544 	while (mm) {
4545 		if (SCTP_BUF_LEN(mm) == 0) {
4546 			/* Skip mbufs with NO lenght */
4547 			if (prev == NULL) {
4548 				/* First one */
4549 				m = sctp_m_free(mm);
4550 				mm = m;
4551 			} else {
4552 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4553 				mm = SCTP_BUF_NEXT(prev);
4554 			}
4555 			continue;
4556 		}
4557 		prev = mm;
4558 		len += SCTP_BUF_LEN(mm);
4559 		if (sb) {
4560 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4561 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4562 			}
4563 			sctp_sballoc(stcb, sb, mm);
4564 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4565 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4566 			}
4567 		}
4568 		mm = SCTP_BUF_NEXT(mm);
4569 	}
4570 	if (prev) {
4571 		tail = prev;
4572 	} else {
4573 		/* Really there should always be a prev */
4574 		if (m == NULL) {
4575 			/* Huh nothing left? */
4576 #ifdef INVARIANTS
4577 			panic("Nothing left to add?");
4578 #else
4579 			goto get_out;
4580 #endif
4581 		}
4582 		tail = m;
4583 	}
4584 	if (control->tail_mbuf) {
4585 		/* append */
4586 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4587 		control->tail_mbuf = tail;
4588 	} else {
4589 		/* nothing there */
4590 #ifdef INVARIANTS
4591 		if (control->data != NULL) {
4592 			panic("This should NOT happen");
4593 		}
4594 #endif
4595 		control->data = m;
4596 		control->tail_mbuf = tail;
4597 	}
4598 	atomic_add_int(&control->length, len);
4599 	if (end) {
4600 		/* message is complete */
4601 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4602 			stcb->asoc.control_pdapi = NULL;
4603 		}
4604 		control->held_length = 0;
4605 		control->end_added = 1;
4606 	}
4607 	if (stcb == NULL) {
4608 		control->do_not_ref_stcb = 1;
4609 	}
4610 	/*
4611 	 * When we are appending in partial delivery, the cum-ack is used
4612 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4613 	 * is populated in the outbound sinfo structure from the true cumack
4614 	 * if the association exists...
4615 	 */
4616 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4617 	if (inp) {
4618 		SCTP_INP_READ_UNLOCK(inp);
4619 	}
4620 	if (inp && inp->sctp_socket) {
4621 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4622 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4623 		} else {
4624 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4625 			struct socket *so;
4626 
4627 			so = SCTP_INP_SO(inp);
4628 			if (stcb) {
4629 				atomic_add_int(&stcb->asoc.refcnt, 1);
4630 				SCTP_TCB_UNLOCK(stcb);
4631 			}
4632 			SCTP_SOCKET_LOCK(so, 1);
4633 			if (stcb) {
4634 				SCTP_TCB_LOCK(stcb);
4635 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4636 			}
4637 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4638 				SCTP_SOCKET_UNLOCK(so, 1);
4639 				return (0);
4640 			}
4641 #endif
4642 			sctp_sorwakeup(inp, inp->sctp_socket);
4643 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4644 			SCTP_SOCKET_UNLOCK(so, 1);
4645 #endif
4646 		}
4647 	}
4648 	return (0);
4649 }
4650 
4651 
4652 
4653 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4654  *************ALTERNATE ROUTING CODE
4655  */
4656 
4657 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4658  *************ALTERNATE ROUTING CODE
4659  */
4660 
4661 struct mbuf *
4662 sctp_generate_invmanparam(int err)
4663 {
4664 	/* Return a MBUF with a invalid mandatory parameter */
4665 	struct mbuf *m;
4666 
4667 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4668 	if (m) {
4669 		struct sctp_paramhdr *ph;
4670 
4671 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4672 		ph = mtod(m, struct sctp_paramhdr *);
4673 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4674 		ph->param_type = htons(err);
4675 	}
4676 	return (m);
4677 }
4678 
4679 #ifdef SCTP_MBCNT_LOGGING
4680 void
4681 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4682     struct sctp_tmit_chunk *tp1, int chk_cnt)
4683 {
4684 	if (tp1->data == NULL) {
4685 		return;
4686 	}
4687 	asoc->chunks_on_out_queue -= chk_cnt;
4688 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4689 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4690 		    asoc->total_output_queue_size,
4691 		    tp1->book_size,
4692 		    0,
4693 		    tp1->mbcnt);
4694 	}
4695 	if (asoc->total_output_queue_size >= tp1->book_size) {
4696 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4697 	} else {
4698 		asoc->total_output_queue_size = 0;
4699 	}
4700 
4701 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4702 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4703 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4704 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4705 		} else {
4706 			stcb->sctp_socket->so_snd.sb_cc = 0;
4707 
4708 		}
4709 	}
4710 }
4711 
4712 #endif
4713 
4714 int
4715 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4716     uint8_t sent, int so_locked
4717 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4718     SCTP_UNUSED
4719 #endif
4720 )
4721 {
4722 	struct sctp_stream_out *strq;
4723 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4724 	struct sctp_stream_queue_pending *sp;
4725 	uint16_t stream = 0, seq = 0;
4726 	uint8_t foundeom = 0;
4727 	int ret_sz = 0;
4728 	int notdone;
4729 	int do_wakeup_routine = 0;
4730 
4731 	stream = tp1->rec.data.stream_number;
4732 	seq = tp1->rec.data.stream_seq;
4733 	do {
4734 		ret_sz += tp1->book_size;
4735 		if (tp1->data != NULL) {
4736 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4737 				sctp_flight_size_decrease(tp1);
4738 				sctp_total_flight_decrease(stcb, tp1);
4739 			}
4740 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4741 			stcb->asoc.peers_rwnd += tp1->send_size;
4742 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4743 			if (sent) {
4744 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4745 			} else {
4746 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4747 			}
4748 			if (tp1->data) {
4749 				sctp_m_freem(tp1->data);
4750 				tp1->data = NULL;
4751 			}
4752 			do_wakeup_routine = 1;
4753 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4754 				stcb->asoc.sent_queue_cnt_removeable--;
4755 			}
4756 		}
4757 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4758 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4759 		    SCTP_DATA_NOT_FRAG) {
4760 			/* not frag'ed we ae done   */
4761 			notdone = 0;
4762 			foundeom = 1;
4763 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4764 			/* end of frag, we are done */
4765 			notdone = 0;
4766 			foundeom = 1;
4767 		} else {
4768 			/*
4769 			 * Its a begin or middle piece, we must mark all of
4770 			 * it
4771 			 */
4772 			notdone = 1;
4773 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4774 		}
4775 	} while (tp1 && notdone);
4776 	if (foundeom == 0) {
4777 		/*
4778 		 * The multi-part message was scattered across the send and
4779 		 * sent queue.
4780 		 */
4781 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4782 			if ((tp1->rec.data.stream_number != stream) ||
4783 			    (tp1->rec.data.stream_seq != seq)) {
4784 				break;
4785 			}
4786 			/*
4787 			 * save to chk in case we have some on stream out
4788 			 * queue. If so and we have an un-transmitted one we
4789 			 * don't have to fudge the TSN.
4790 			 */
4791 			chk = tp1;
4792 			ret_sz += tp1->book_size;
4793 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4794 			if (sent) {
4795 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4796 			} else {
4797 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4798 			}
4799 			if (tp1->data) {
4800 				sctp_m_freem(tp1->data);
4801 				tp1->data = NULL;
4802 			}
4803 			/* No flight involved here book the size to 0 */
4804 			tp1->book_size = 0;
4805 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4806 				foundeom = 1;
4807 			}
4808 			do_wakeup_routine = 1;
4809 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4810 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4811 			/*
4812 			 * on to the sent queue so we can wait for it to be
4813 			 * passed by.
4814 			 */
4815 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4816 			    sctp_next);
4817 			stcb->asoc.send_queue_cnt--;
4818 			stcb->asoc.sent_queue_cnt++;
4819 		}
4820 	}
4821 	if (foundeom == 0) {
4822 		/*
4823 		 * Still no eom found. That means there is stuff left on the
4824 		 * stream out queue.. yuck.
4825 		 */
4826 		strq = &stcb->asoc.strmout[stream];
4827 		SCTP_TCB_SEND_LOCK(stcb);
4828 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4829 			/* FIXME: Shouldn't this be a serial number check? */
4830 			if (sp->strseq > seq) {
4831 				break;
4832 			}
4833 			/* Check if its our SEQ */
4834 			if (sp->strseq == seq) {
4835 				sp->discard_rest = 1;
4836 				/*
4837 				 * We may need to put a chunk on the queue
4838 				 * that holds the TSN that would have been
4839 				 * sent with the LAST bit.
4840 				 */
4841 				if (chk == NULL) {
4842 					/* Yep, we have to */
4843 					sctp_alloc_a_chunk(stcb, chk);
4844 					if (chk == NULL) {
4845 						/*
4846 						 * we are hosed. All we can
4847 						 * do is nothing.. which
4848 						 * will cause an abort if
4849 						 * the peer is paying
4850 						 * attention.
4851 						 */
4852 						goto oh_well;
4853 					}
4854 					memset(chk, 0, sizeof(*chk));
4855 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4856 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4857 					chk->asoc = &stcb->asoc;
4858 					chk->rec.data.stream_seq = sp->strseq;
4859 					chk->rec.data.stream_number = sp->stream;
4860 					chk->rec.data.payloadtype = sp->ppid;
4861 					chk->rec.data.context = sp->context;
4862 					chk->flags = sp->act_flags;
4863 					if (sp->net)
4864 						chk->whoTo = sp->net;
4865 					else
4866 						chk->whoTo = stcb->asoc.primary_destination;
4867 					atomic_add_int(&chk->whoTo->ref_count, 1);
4868 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4869 					stcb->asoc.pr_sctp_cnt++;
4870 					chk->pr_sctp_on = 1;
4871 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4872 					stcb->asoc.sent_queue_cnt++;
4873 					stcb->asoc.pr_sctp_cnt++;
4874 				} else {
4875 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4876 				}
4877 		oh_well:
4878 				if (sp->data) {
4879 					/*
4880 					 * Pull any data to free up the SB
4881 					 * and allow sender to "add more"
4882 					 * whilc we will throw away :-)
4883 					 */
4884 					sctp_free_spbufspace(stcb, &stcb->asoc,
4885 					    sp);
4886 					ret_sz += sp->length;
4887 					do_wakeup_routine = 1;
4888 					sp->some_taken = 1;
4889 					sctp_m_freem(sp->data);
4890 					sp->length = 0;
4891 					sp->data = NULL;
4892 					sp->tail_mbuf = NULL;
4893 				}
4894 				break;
4895 			}
4896 		}		/* End tailq_foreach */
4897 		SCTP_TCB_SEND_UNLOCK(stcb);
4898 	}
4899 	if (do_wakeup_routine) {
4900 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4901 		struct socket *so;
4902 
4903 		so = SCTP_INP_SO(stcb->sctp_ep);
4904 		if (!so_locked) {
4905 			atomic_add_int(&stcb->asoc.refcnt, 1);
4906 			SCTP_TCB_UNLOCK(stcb);
4907 			SCTP_SOCKET_LOCK(so, 1);
4908 			SCTP_TCB_LOCK(stcb);
4909 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4910 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4911 				/* assoc was freed while we were unlocked */
4912 				SCTP_SOCKET_UNLOCK(so, 1);
4913 				return (ret_sz);
4914 			}
4915 		}
4916 #endif
4917 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4918 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4919 		if (!so_locked) {
4920 			SCTP_SOCKET_UNLOCK(so, 1);
4921 		}
4922 #endif
4923 	}
4924 	return (ret_sz);
4925 }
4926 
4927 /*
4928  * checks to see if the given address, sa, is one that is currently known by
4929  * the kernel note: can't distinguish the same address on multiple interfaces
4930  * and doesn't handle multiple addresses with different zone/scope id's note:
4931  * ifa_ifwithaddr() compares the entire sockaddr struct
4932  */
4933 struct sctp_ifa *
4934 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4935     int holds_lock)
4936 {
4937 	struct sctp_laddr *laddr;
4938 
4939 	if (holds_lock == 0) {
4940 		SCTP_INP_RLOCK(inp);
4941 	}
4942 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4943 		if (laddr->ifa == NULL)
4944 			continue;
4945 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4946 			continue;
4947 #ifdef INET
4948 		if (addr->sa_family == AF_INET) {
4949 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4950 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4951 				/* found him. */
4952 				if (holds_lock == 0) {
4953 					SCTP_INP_RUNLOCK(inp);
4954 				}
4955 				return (laddr->ifa);
4956 				break;
4957 			}
4958 		}
4959 #endif
4960 #ifdef INET6
4961 		if (addr->sa_family == AF_INET6) {
4962 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4963 			    &laddr->ifa->address.sin6)) {
4964 				/* found him. */
4965 				if (holds_lock == 0) {
4966 					SCTP_INP_RUNLOCK(inp);
4967 				}
4968 				return (laddr->ifa);
4969 				break;
4970 			}
4971 		}
4972 #endif
4973 	}
4974 	if (holds_lock == 0) {
4975 		SCTP_INP_RUNLOCK(inp);
4976 	}
4977 	return (NULL);
4978 }
4979 
4980 uint32_t
4981 sctp_get_ifa_hash_val(struct sockaddr *addr)
4982 {
4983 	switch (addr->sa_family) {
4984 #ifdef INET
4985 	case AF_INET:
4986 		{
4987 			struct sockaddr_in *sin;
4988 
4989 			sin = (struct sockaddr_in *)addr;
4990 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4991 		}
4992 #endif
4993 #ifdef INET6
4994 	case INET6:
4995 		{
4996 			struct sockaddr_in6 *sin6;
4997 			uint32_t hash_of_addr;
4998 
4999 			sin6 = (struct sockaddr_in6 *)addr;
5000 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
5001 			    sin6->sin6_addr.s6_addr32[1] +
5002 			    sin6->sin6_addr.s6_addr32[2] +
5003 			    sin6->sin6_addr.s6_addr32[3]);
5004 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
5005 			return (hash_of_addr);
5006 		}
5007 #endif
5008 	default:
5009 		break;
5010 	}
5011 	return (0);
5012 }
5013 
5014 struct sctp_ifa *
5015 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
5016 {
5017 	struct sctp_ifa *sctp_ifap;
5018 	struct sctp_vrf *vrf;
5019 	struct sctp_ifalist *hash_head;
5020 	uint32_t hash_of_addr;
5021 
5022 	if (holds_lock == 0)
5023 		SCTP_IPI_ADDR_RLOCK();
5024 
5025 	vrf = sctp_find_vrf(vrf_id);
5026 	if (vrf == NULL) {
5027 stage_right:
5028 		if (holds_lock == 0)
5029 			SCTP_IPI_ADDR_RUNLOCK();
5030 		return (NULL);
5031 	}
5032 	hash_of_addr = sctp_get_ifa_hash_val(addr);
5033 
5034 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
5035 	if (hash_head == NULL) {
5036 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
5037 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
5038 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
5039 		sctp_print_address(addr);
5040 		SCTP_PRINTF("No such bucket for address\n");
5041 		if (holds_lock == 0)
5042 			SCTP_IPI_ADDR_RUNLOCK();
5043 
5044 		return (NULL);
5045 	}
5046 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5047 		if (sctp_ifap == NULL) {
5048 #ifdef INVARIANTS
5049 			panic("Huh LIST_FOREACH corrupt");
5050 			goto stage_right;
5051 #else
5052 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5053 			goto stage_right;
5054 #endif
5055 		}
5056 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5057 			continue;
5058 #ifdef INET
5059 		if (addr->sa_family == AF_INET) {
5060 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5061 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5062 				/* found him. */
5063 				if (holds_lock == 0)
5064 					SCTP_IPI_ADDR_RUNLOCK();
5065 				return (sctp_ifap);
5066 				break;
5067 			}
5068 		}
5069 #endif
5070 #ifdef INET6
5071 		if (addr->sa_family == AF_INET6) {
5072 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5073 			    &sctp_ifap->address.sin6)) {
5074 				/* found him. */
5075 				if (holds_lock == 0)
5076 					SCTP_IPI_ADDR_RUNLOCK();
5077 				return (sctp_ifap);
5078 				break;
5079 			}
5080 		}
5081 #endif
5082 	}
5083 	if (holds_lock == 0)
5084 		SCTP_IPI_ADDR_RUNLOCK();
5085 	return (NULL);
5086 }
5087 
5088 static void
5089 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5090     uint32_t rwnd_req)
5091 {
5092 	/* User pulled some data, do we need a rwnd update? */
5093 	int r_unlocked = 0;
5094 	uint32_t dif, rwnd;
5095 	struct socket *so = NULL;
5096 
5097 	if (stcb == NULL)
5098 		return;
5099 
5100 	atomic_add_int(&stcb->asoc.refcnt, 1);
5101 
5102 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5103 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5104 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5105 		/* Pre-check If we are freeing no update */
5106 		goto no_lock;
5107 	}
5108 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5109 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5110 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5111 		goto out;
5112 	}
5113 	so = stcb->sctp_socket;
5114 	if (so == NULL) {
5115 		goto out;
5116 	}
5117 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5118 	/* Have you have freed enough to look */
5119 	*freed_so_far = 0;
5120 	/* Yep, its worth a look and the lock overhead */
5121 
5122 	/* Figure out what the rwnd would be */
5123 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5124 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5125 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5126 	} else {
5127 		dif = 0;
5128 	}
5129 	if (dif >= rwnd_req) {
5130 		if (hold_rlock) {
5131 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5132 			r_unlocked = 1;
5133 		}
5134 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5135 			/*
5136 			 * One last check before we allow the guy possibly
5137 			 * to get in. There is a race, where the guy has not
5138 			 * reached the gate. In that case
5139 			 */
5140 			goto out;
5141 		}
5142 		SCTP_TCB_LOCK(stcb);
5143 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5144 			/* No reports here */
5145 			SCTP_TCB_UNLOCK(stcb);
5146 			goto out;
5147 		}
5148 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5149 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5150 
5151 		sctp_chunk_output(stcb->sctp_ep, stcb,
5152 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5153 		/* make sure no timer is running */
5154 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5155 		SCTP_TCB_UNLOCK(stcb);
5156 	} else {
5157 		/* Update how much we have pending */
5158 		stcb->freed_by_sorcv_sincelast = dif;
5159 	}
5160 out:
5161 	if (so && r_unlocked && hold_rlock) {
5162 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5163 	}
5164 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5165 no_lock:
5166 	atomic_add_int(&stcb->asoc.refcnt, -1);
5167 	return;
5168 }
5169 
5170 int
5171 sctp_sorecvmsg(struct socket *so,
5172     struct uio *uio,
5173     struct mbuf **mp,
5174     struct sockaddr *from,
5175     int fromlen,
5176     int *msg_flags,
5177     struct sctp_sndrcvinfo *sinfo,
5178     int filling_sinfo)
5179 {
5180 	/*
5181 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5182 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5183 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5184 	 * On the way out we may send out any combination of:
5185 	 * MSG_NOTIFICATION MSG_EOR
5186 	 *
5187 	 */
5188 	struct sctp_inpcb *inp = NULL;
5189 	int my_len = 0;
5190 	int cp_len = 0, error = 0;
5191 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5192 	struct mbuf *m = NULL;
5193 	struct sctp_tcb *stcb = NULL;
5194 	int wakeup_read_socket = 0;
5195 	int freecnt_applied = 0;
5196 	int out_flags = 0, in_flags = 0;
5197 	int block_allowed = 1;
5198 	uint32_t freed_so_far = 0;
5199 	uint32_t copied_so_far = 0;
5200 	int in_eeor_mode = 0;
5201 	int no_rcv_needed = 0;
5202 	uint32_t rwnd_req = 0;
5203 	int hold_sblock = 0;
5204 	int hold_rlock = 0;
5205 	int slen = 0;
5206 	uint32_t held_length = 0;
5207 	int sockbuf_lock = 0;
5208 
5209 	if (uio == NULL) {
5210 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5211 		return (EINVAL);
5212 	}
5213 	if (msg_flags) {
5214 		in_flags = *msg_flags;
5215 		if (in_flags & MSG_PEEK)
5216 			SCTP_STAT_INCR(sctps_read_peeks);
5217 	} else {
5218 		in_flags = 0;
5219 	}
5220 	slen = uio->uio_resid;
5221 
5222 	/* Pull in and set up our int flags */
5223 	if (in_flags & MSG_OOB) {
5224 		/* Out of band's NOT supported */
5225 		return (EOPNOTSUPP);
5226 	}
5227 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5228 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5229 		return (EINVAL);
5230 	}
5231 	if ((in_flags & (MSG_DONTWAIT
5232 	    | MSG_NBIO
5233 	    )) ||
5234 	    SCTP_SO_IS_NBIO(so)) {
5235 		block_allowed = 0;
5236 	}
5237 	/* setup the endpoint */
5238 	inp = (struct sctp_inpcb *)so->so_pcb;
5239 	if (inp == NULL) {
5240 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5241 		return (EFAULT);
5242 	}
5243 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5244 	/* Must be at least a MTU's worth */
5245 	if (rwnd_req < SCTP_MIN_RWND)
5246 		rwnd_req = SCTP_MIN_RWND;
5247 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5248 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5249 		sctp_misc_ints(SCTP_SORECV_ENTER,
5250 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5251 	}
5252 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5253 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5254 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5255 	}
5256 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5257 	sockbuf_lock = 1;
5258 	if (error) {
5259 		goto release_unlocked;
5260 	}
5261 restart:
5262 
5263 
5264 restart_nosblocks:
5265 	if (hold_sblock == 0) {
5266 		SOCKBUF_LOCK(&so->so_rcv);
5267 		hold_sblock = 1;
5268 	}
5269 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5270 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5271 		goto out;
5272 	}
5273 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5274 		if (so->so_error) {
5275 			error = so->so_error;
5276 			if ((in_flags & MSG_PEEK) == 0)
5277 				so->so_error = 0;
5278 			goto out;
5279 		} else {
5280 			if (so->so_rcv.sb_cc == 0) {
5281 				/* indicate EOF */
5282 				error = 0;
5283 				goto out;
5284 			}
5285 		}
5286 	}
5287 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5288 		/* we need to wait for data */
5289 		if ((so->so_rcv.sb_cc == 0) &&
5290 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5291 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5292 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5293 				/*
5294 				 * For active open side clear flags for
5295 				 * re-use passive open is blocked by
5296 				 * connect.
5297 				 */
5298 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5299 					/*
5300 					 * You were aborted, passive side
5301 					 * always hits here
5302 					 */
5303 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5304 					error = ECONNRESET;
5305 				}
5306 				so->so_state &= ~(SS_ISCONNECTING |
5307 				    SS_ISDISCONNECTING |
5308 				    SS_ISCONFIRMING |
5309 				    SS_ISCONNECTED);
5310 				if (error == 0) {
5311 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5312 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5313 						error = ENOTCONN;
5314 					}
5315 				}
5316 				goto out;
5317 			}
5318 		}
5319 		error = sbwait(&so->so_rcv);
5320 		if (error) {
5321 			goto out;
5322 		}
5323 		held_length = 0;
5324 		goto restart_nosblocks;
5325 	} else if (so->so_rcv.sb_cc == 0) {
5326 		if (so->so_error) {
5327 			error = so->so_error;
5328 			if ((in_flags & MSG_PEEK) == 0)
5329 				so->so_error = 0;
5330 		} else {
5331 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5332 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5333 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5334 					/*
5335 					 * For active open side clear flags
5336 					 * for re-use passive open is
5337 					 * blocked by connect.
5338 					 */
5339 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5340 						/*
5341 						 * You were aborted, passive
5342 						 * side always hits here
5343 						 */
5344 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5345 						error = ECONNRESET;
5346 					}
5347 					so->so_state &= ~(SS_ISCONNECTING |
5348 					    SS_ISDISCONNECTING |
5349 					    SS_ISCONFIRMING |
5350 					    SS_ISCONNECTED);
5351 					if (error == 0) {
5352 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5353 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5354 							error = ENOTCONN;
5355 						}
5356 					}
5357 					goto out;
5358 				}
5359 			}
5360 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5361 			error = EWOULDBLOCK;
5362 		}
5363 		goto out;
5364 	}
5365 	if (hold_sblock == 1) {
5366 		SOCKBUF_UNLOCK(&so->so_rcv);
5367 		hold_sblock = 0;
5368 	}
5369 	/* we possibly have data we can read */
5370 	/* sa_ignore FREED_MEMORY */
5371 	control = TAILQ_FIRST(&inp->read_queue);
5372 	if (control == NULL) {
5373 		/*
5374 		 * This could be happening since the appender did the
5375 		 * increment but as not yet did the tailq insert onto the
5376 		 * read_queue
5377 		 */
5378 		if (hold_rlock == 0) {
5379 			SCTP_INP_READ_LOCK(inp);
5380 		}
5381 		control = TAILQ_FIRST(&inp->read_queue);
5382 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5383 #ifdef INVARIANTS
5384 			panic("Huh, its non zero and nothing on control?");
5385 #endif
5386 			so->so_rcv.sb_cc = 0;
5387 		}
5388 		SCTP_INP_READ_UNLOCK(inp);
5389 		hold_rlock = 0;
5390 		goto restart;
5391 	}
5392 	if ((control->length == 0) &&
5393 	    (control->do_not_ref_stcb)) {
5394 		/*
5395 		 * Clean up code for freeing assoc that left behind a
5396 		 * pdapi.. maybe a peer in EEOR that just closed after
5397 		 * sending and never indicated a EOR.
5398 		 */
5399 		if (hold_rlock == 0) {
5400 			hold_rlock = 1;
5401 			SCTP_INP_READ_LOCK(inp);
5402 		}
5403 		control->held_length = 0;
5404 		if (control->data) {
5405 			/* Hmm there is data here .. fix */
5406 			struct mbuf *m_tmp;
5407 			int cnt = 0;
5408 
5409 			m_tmp = control->data;
5410 			while (m_tmp) {
5411 				cnt += SCTP_BUF_LEN(m_tmp);
5412 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5413 					control->tail_mbuf = m_tmp;
5414 					control->end_added = 1;
5415 				}
5416 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5417 			}
5418 			control->length = cnt;
5419 		} else {
5420 			/* remove it */
5421 			TAILQ_REMOVE(&inp->read_queue, control, next);
5422 			/* Add back any hiddend data */
5423 			sctp_free_remote_addr(control->whoFrom);
5424 			sctp_free_a_readq(stcb, control);
5425 		}
5426 		if (hold_rlock) {
5427 			hold_rlock = 0;
5428 			SCTP_INP_READ_UNLOCK(inp);
5429 		}
5430 		goto restart;
5431 	}
5432 	if ((control->length == 0) &&
5433 	    (control->end_added == 1)) {
5434 		/*
5435 		 * Do we also need to check for (control->pdapi_aborted ==
5436 		 * 1)?
5437 		 */
5438 		if (hold_rlock == 0) {
5439 			hold_rlock = 1;
5440 			SCTP_INP_READ_LOCK(inp);
5441 		}
5442 		TAILQ_REMOVE(&inp->read_queue, control, next);
5443 		if (control->data) {
5444 #ifdef INVARIANTS
5445 			panic("control->data not null but control->length == 0");
5446 #else
5447 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5448 			sctp_m_freem(control->data);
5449 			control->data = NULL;
5450 #endif
5451 		}
5452 		if (control->aux_data) {
5453 			sctp_m_free(control->aux_data);
5454 			control->aux_data = NULL;
5455 		}
5456 		sctp_free_remote_addr(control->whoFrom);
5457 		sctp_free_a_readq(stcb, control);
5458 		if (hold_rlock) {
5459 			hold_rlock = 0;
5460 			SCTP_INP_READ_UNLOCK(inp);
5461 		}
5462 		goto restart;
5463 	}
5464 	if (control->length == 0) {
5465 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5466 		    (filling_sinfo)) {
5467 			/* find a more suitable one then this */
5468 			ctl = TAILQ_NEXT(control, next);
5469 			while (ctl) {
5470 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5471 				    (ctl->some_taken ||
5472 				    (ctl->spec_flags & M_NOTIFICATION) ||
5473 				    ((ctl->do_not_ref_stcb == 0) &&
5474 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5475 				    ) {
5476 					/*-
5477 					 * If we have a different TCB next, and there is data
5478 					 * present. If we have already taken some (pdapi), OR we can
5479 					 * ref the tcb and no delivery as started on this stream, we
5480 					 * take it. Note we allow a notification on a different
5481 					 * assoc to be delivered..
5482 					 */
5483 					control = ctl;
5484 					goto found_one;
5485 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5486 					    (ctl->length) &&
5487 					    ((ctl->some_taken) ||
5488 					    ((ctl->do_not_ref_stcb == 0) &&
5489 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5490 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5491 					/*-
5492 					 * If we have the same tcb, and there is data present, and we
5493 					 * have the strm interleave feature present. Then if we have
5494 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5495 					 * not started a delivery for this stream, we can take it.
5496 					 * Note we do NOT allow a notificaiton on the same assoc to
5497 					 * be delivered.
5498 					 */
5499 					control = ctl;
5500 					goto found_one;
5501 				}
5502 				ctl = TAILQ_NEXT(ctl, next);
5503 			}
5504 		}
5505 		/*
5506 		 * if we reach here, not suitable replacement is available
5507 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5508 		 * into the our held count, and its time to sleep again.
5509 		 */
5510 		held_length = so->so_rcv.sb_cc;
5511 		control->held_length = so->so_rcv.sb_cc;
5512 		goto restart;
5513 	}
5514 	/* Clear the held length since there is something to read */
5515 	control->held_length = 0;
5516 	if (hold_rlock) {
5517 		SCTP_INP_READ_UNLOCK(inp);
5518 		hold_rlock = 0;
5519 	}
5520 found_one:
5521 	/*
5522 	 * If we reach here, control has a some data for us to read off.
5523 	 * Note that stcb COULD be NULL.
5524 	 */
5525 	control->some_taken++;
5526 	if (hold_sblock) {
5527 		SOCKBUF_UNLOCK(&so->so_rcv);
5528 		hold_sblock = 0;
5529 	}
5530 	stcb = control->stcb;
5531 	if (stcb) {
5532 		if ((control->do_not_ref_stcb == 0) &&
5533 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5534 			if (freecnt_applied == 0)
5535 				stcb = NULL;
5536 		} else if (control->do_not_ref_stcb == 0) {
5537 			/* you can't free it on me please */
5538 			/*
5539 			 * The lock on the socket buffer protects us so the
5540 			 * free code will stop. But since we used the
5541 			 * socketbuf lock and the sender uses the tcb_lock
5542 			 * to increment, we need to use the atomic add to
5543 			 * the refcnt
5544 			 */
5545 			if (freecnt_applied) {
5546 #ifdef INVARIANTS
5547 				panic("refcnt already incremented");
5548 #else
5549 				SCTP_PRINTF("refcnt already incremented?\n");
5550 #endif
5551 			} else {
5552 				atomic_add_int(&stcb->asoc.refcnt, 1);
5553 				freecnt_applied = 1;
5554 			}
5555 			/*
5556 			 * Setup to remember how much we have not yet told
5557 			 * the peer our rwnd has opened up. Note we grab the
5558 			 * value from the tcb from last time. Note too that
5559 			 * sack sending clears this when a sack is sent,
5560 			 * which is fine. Once we hit the rwnd_req, we then
5561 			 * will go to the sctp_user_rcvd() that will not
5562 			 * lock until it KNOWs it MUST send a WUP-SACK.
5563 			 */
5564 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5565 			stcb->freed_by_sorcv_sincelast = 0;
5566 		}
5567 	}
5568 	if (stcb &&
5569 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5570 	    control->do_not_ref_stcb == 0) {
5571 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5572 	}
5573 	/* First lets get off the sinfo and sockaddr info */
5574 	if ((sinfo) && filling_sinfo) {
5575 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5576 		nxt = TAILQ_NEXT(control, next);
5577 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5578 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5579 			struct sctp_extrcvinfo *s_extra;
5580 
5581 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5582 			if ((nxt) &&
5583 			    (nxt->length)) {
5584 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5585 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5586 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5587 				}
5588 				if (nxt->spec_flags & M_NOTIFICATION) {
5589 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5590 				}
5591 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5592 				s_extra->sreinfo_next_length = nxt->length;
5593 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5594 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5595 				if (nxt->tail_mbuf != NULL) {
5596 					if (nxt->end_added) {
5597 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5598 					}
5599 				}
5600 			} else {
5601 				/*
5602 				 * we explicitly 0 this, since the memcpy
5603 				 * got some other things beyond the older
5604 				 * sinfo_ that is on the control's structure
5605 				 * :-D
5606 				 */
5607 				nxt = NULL;
5608 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5609 				s_extra->sreinfo_next_aid = 0;
5610 				s_extra->sreinfo_next_length = 0;
5611 				s_extra->sreinfo_next_ppid = 0;
5612 				s_extra->sreinfo_next_stream = 0;
5613 			}
5614 		}
5615 		/*
5616 		 * update off the real current cum-ack, if we have an stcb.
5617 		 */
5618 		if ((control->do_not_ref_stcb == 0) && stcb)
5619 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5620 		/*
5621 		 * mask off the high bits, we keep the actual chunk bits in
5622 		 * there.
5623 		 */
5624 		sinfo->sinfo_flags &= 0x00ff;
5625 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5626 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5627 		}
5628 	}
5629 #ifdef SCTP_ASOCLOG_OF_TSNS
5630 	{
5631 		int index, newindex;
5632 		struct sctp_pcbtsn_rlog *entry;
5633 
5634 		do {
5635 			index = inp->readlog_index;
5636 			newindex = index + 1;
5637 			if (newindex >= SCTP_READ_LOG_SIZE) {
5638 				newindex = 0;
5639 			}
5640 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5641 		entry = &inp->readlog[index];
5642 		entry->vtag = control->sinfo_assoc_id;
5643 		entry->strm = control->sinfo_stream;
5644 		entry->seq = control->sinfo_ssn;
5645 		entry->sz = control->length;
5646 		entry->flgs = control->sinfo_flags;
5647 	}
5648 #endif
5649 	if (fromlen && from) {
5650 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
5651 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5652 #ifdef INET6
5653 		case AF_INET6:
5654 			((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5655 			break;
5656 #endif
5657 #ifdef INET
5658 		case AF_INET:
5659 			((struct sockaddr_in *)from)->sin_port = control->port_from;
5660 			break;
5661 #endif
5662 		default:
5663 			break;
5664 		}
5665 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5666 
5667 #if defined(INET) && defined(INET6)
5668 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5669 		    (from->sa_family == AF_INET) &&
5670 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5671 			struct sockaddr_in *sin;
5672 			struct sockaddr_in6 sin6;
5673 
5674 			sin = (struct sockaddr_in *)from;
5675 			bzero(&sin6, sizeof(sin6));
5676 			sin6.sin6_family = AF_INET6;
5677 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5678 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5679 			bcopy(&sin->sin_addr,
5680 			    &sin6.sin6_addr.s6_addr32[3],
5681 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5682 			sin6.sin6_port = sin->sin_port;
5683 			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
5684 		}
5685 #endif
5686 #if defined(INET6)
5687 		{
5688 			struct sockaddr_in6 lsa6, *from6;
5689 
5690 			from6 = (struct sockaddr_in6 *)from;
5691 			sctp_recover_scope_mac(from6, (&lsa6));
5692 		}
5693 #endif
5694 	}
5695 	/* now copy out what data we can */
5696 	if (mp == NULL) {
5697 		/* copy out each mbuf in the chain up to length */
5698 get_more_data:
5699 		m = control->data;
5700 		while (m) {
5701 			/* Move out all we can */
5702 			cp_len = (int)uio->uio_resid;
5703 			my_len = (int)SCTP_BUF_LEN(m);
5704 			if (cp_len > my_len) {
5705 				/* not enough in this buf */
5706 				cp_len = my_len;
5707 			}
5708 			if (hold_rlock) {
5709 				SCTP_INP_READ_UNLOCK(inp);
5710 				hold_rlock = 0;
5711 			}
5712 			if (cp_len > 0)
5713 				error = uiomove(mtod(m, char *), cp_len, uio);
5714 			/* re-read */
5715 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5716 				goto release;
5717 			}
5718 			if ((control->do_not_ref_stcb == 0) && stcb &&
5719 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5720 				no_rcv_needed = 1;
5721 			}
5722 			if (error) {
5723 				/* error we are out of here */
5724 				goto release;
5725 			}
5726 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5727 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5728 			    ((control->end_added == 0) ||
5729 			    (control->end_added &&
5730 			    (TAILQ_NEXT(control, next) == NULL)))
5731 			    ) {
5732 				SCTP_INP_READ_LOCK(inp);
5733 				hold_rlock = 1;
5734 			}
5735 			if (cp_len == SCTP_BUF_LEN(m)) {
5736 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5737 				    (control->end_added)) {
5738 					out_flags |= MSG_EOR;
5739 					if ((control->do_not_ref_stcb == 0) &&
5740 					    (control->stcb != NULL) &&
5741 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5742 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5743 				}
5744 				if (control->spec_flags & M_NOTIFICATION) {
5745 					out_flags |= MSG_NOTIFICATION;
5746 				}
5747 				/* we ate up the mbuf */
5748 				if (in_flags & MSG_PEEK) {
5749 					/* just looking */
5750 					m = SCTP_BUF_NEXT(m);
5751 					copied_so_far += cp_len;
5752 				} else {
5753 					/* dispose of the mbuf */
5754 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5755 						sctp_sblog(&so->so_rcv,
5756 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5757 					}
5758 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5759 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5760 						sctp_sblog(&so->so_rcv,
5761 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5762 					}
5763 					copied_so_far += cp_len;
5764 					freed_so_far += cp_len;
5765 					freed_so_far += MSIZE;
5766 					atomic_subtract_int(&control->length, cp_len);
5767 					control->data = sctp_m_free(m);
5768 					m = control->data;
5769 					/*
5770 					 * been through it all, must hold sb
5771 					 * lock ok to null tail
5772 					 */
5773 					if (control->data == NULL) {
5774 #ifdef INVARIANTS
5775 						if ((control->end_added == 0) ||
5776 						    (TAILQ_NEXT(control, next) == NULL)) {
5777 							/*
5778 							 * If the end is not
5779 							 * added, OR the
5780 							 * next is NOT null
5781 							 * we MUST have the
5782 							 * lock.
5783 							 */
5784 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5785 								panic("Hmm we don't own the lock?");
5786 							}
5787 						}
5788 #endif
5789 						control->tail_mbuf = NULL;
5790 #ifdef INVARIANTS
5791 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5792 							panic("end_added, nothing left and no MSG_EOR");
5793 						}
5794 #endif
5795 					}
5796 				}
5797 			} else {
5798 				/* Do we need to trim the mbuf? */
5799 				if (control->spec_flags & M_NOTIFICATION) {
5800 					out_flags |= MSG_NOTIFICATION;
5801 				}
5802 				if ((in_flags & MSG_PEEK) == 0) {
5803 					SCTP_BUF_RESV_UF(m, cp_len);
5804 					SCTP_BUF_LEN(m) -= cp_len;
5805 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5806 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5807 					}
5808 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5809 					if ((control->do_not_ref_stcb == 0) &&
5810 					    stcb) {
5811 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5812 					}
5813 					copied_so_far += cp_len;
5814 					freed_so_far += cp_len;
5815 					freed_so_far += MSIZE;
5816 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5817 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5818 						    SCTP_LOG_SBRESULT, 0);
5819 					}
5820 					atomic_subtract_int(&control->length, cp_len);
5821 				} else {
5822 					copied_so_far += cp_len;
5823 				}
5824 			}
5825 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5826 				break;
5827 			}
5828 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5829 			    (control->do_not_ref_stcb == 0) &&
5830 			    (freed_so_far >= rwnd_req)) {
5831 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5832 			}
5833 		}		/* end while(m) */
5834 		/*
5835 		 * At this point we have looked at it all and we either have
5836 		 * a MSG_EOR/or read all the user wants... <OR>
5837 		 * control->length == 0.
5838 		 */
5839 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5840 			/* we are done with this control */
5841 			if (control->length == 0) {
5842 				if (control->data) {
5843 #ifdef INVARIANTS
5844 					panic("control->data not null at read eor?");
5845 #else
5846 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5847 					sctp_m_freem(control->data);
5848 					control->data = NULL;
5849 #endif
5850 				}
5851 		done_with_control:
5852 				if (TAILQ_NEXT(control, next) == NULL) {
5853 					/*
5854 					 * If we don't have a next we need a
5855 					 * lock, if there is a next
5856 					 * interrupt is filling ahead of us
5857 					 * and we don't need a lock to
5858 					 * remove this guy (which is the
5859 					 * head of the queue).
5860 					 */
5861 					if (hold_rlock == 0) {
5862 						SCTP_INP_READ_LOCK(inp);
5863 						hold_rlock = 1;
5864 					}
5865 				}
5866 				TAILQ_REMOVE(&inp->read_queue, control, next);
5867 				/* Add back any hiddend data */
5868 				if (control->held_length) {
5869 					held_length = 0;
5870 					control->held_length = 0;
5871 					wakeup_read_socket = 1;
5872 				}
5873 				if (control->aux_data) {
5874 					sctp_m_free(control->aux_data);
5875 					control->aux_data = NULL;
5876 				}
5877 				no_rcv_needed = control->do_not_ref_stcb;
5878 				sctp_free_remote_addr(control->whoFrom);
5879 				control->data = NULL;
5880 				sctp_free_a_readq(stcb, control);
5881 				control = NULL;
5882 				if ((freed_so_far >= rwnd_req) &&
5883 				    (no_rcv_needed == 0))
5884 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5885 
5886 			} else {
5887 				/*
5888 				 * The user did not read all of this
5889 				 * message, turn off the returned MSG_EOR
5890 				 * since we are leaving more behind on the
5891 				 * control to read.
5892 				 */
5893 #ifdef INVARIANTS
5894 				if (control->end_added &&
5895 				    (control->data == NULL) &&
5896 				    (control->tail_mbuf == NULL)) {
5897 					panic("Gak, control->length is corrupt?");
5898 				}
5899 #endif
5900 				no_rcv_needed = control->do_not_ref_stcb;
5901 				out_flags &= ~MSG_EOR;
5902 			}
5903 		}
5904 		if (out_flags & MSG_EOR) {
5905 			goto release;
5906 		}
5907 		if ((uio->uio_resid == 0) ||
5908 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5909 		    ) {
5910 			goto release;
5911 		}
5912 		/*
5913 		 * If I hit here the receiver wants more and this message is
5914 		 * NOT done (pd-api). So two questions. Can we block? if not
5915 		 * we are done. Did the user NOT set MSG_WAITALL?
5916 		 */
5917 		if (block_allowed == 0) {
5918 			goto release;
5919 		}
5920 		/*
5921 		 * We need to wait for more data a few things: - We don't
5922 		 * sbunlock() so we don't get someone else reading. - We
5923 		 * must be sure to account for the case where what is added
5924 		 * is NOT to our control when we wakeup.
5925 		 */
5926 
5927 		/*
5928 		 * Do we need to tell the transport a rwnd update might be
5929 		 * needed before we go to sleep?
5930 		 */
5931 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5932 		    ((freed_so_far >= rwnd_req) &&
5933 		    (control->do_not_ref_stcb == 0) &&
5934 		    (no_rcv_needed == 0))) {
5935 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5936 		}
5937 wait_some_more:
5938 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5939 			goto release;
5940 		}
5941 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5942 			goto release;
5943 
5944 		if (hold_rlock == 1) {
5945 			SCTP_INP_READ_UNLOCK(inp);
5946 			hold_rlock = 0;
5947 		}
5948 		if (hold_sblock == 0) {
5949 			SOCKBUF_LOCK(&so->so_rcv);
5950 			hold_sblock = 1;
5951 		}
5952 		if ((copied_so_far) && (control->length == 0) &&
5953 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5954 			goto release;
5955 		}
5956 		if (so->so_rcv.sb_cc <= control->held_length) {
5957 			error = sbwait(&so->so_rcv);
5958 			if (error) {
5959 				goto release;
5960 			}
5961 			control->held_length = 0;
5962 		}
5963 		if (hold_sblock) {
5964 			SOCKBUF_UNLOCK(&so->so_rcv);
5965 			hold_sblock = 0;
5966 		}
5967 		if (control->length == 0) {
5968 			/* still nothing here */
5969 			if (control->end_added == 1) {
5970 				/* he aborted, or is done i.e.did a shutdown */
5971 				out_flags |= MSG_EOR;
5972 				if (control->pdapi_aborted) {
5973 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5974 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5975 
5976 					out_flags |= MSG_TRUNC;
5977 				} else {
5978 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5979 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5980 				}
5981 				goto done_with_control;
5982 			}
5983 			if (so->so_rcv.sb_cc > held_length) {
5984 				control->held_length = so->so_rcv.sb_cc;
5985 				held_length = 0;
5986 			}
5987 			goto wait_some_more;
5988 		} else if (control->data == NULL) {
5989 			/*
5990 			 * we must re-sync since data is probably being
5991 			 * added
5992 			 */
5993 			SCTP_INP_READ_LOCK(inp);
5994 			if ((control->length > 0) && (control->data == NULL)) {
5995 				/*
5996 				 * big trouble.. we have the lock and its
5997 				 * corrupt?
5998 				 */
5999 #ifdef INVARIANTS
6000 				panic("Impossible data==NULL length !=0");
6001 #endif
6002 				out_flags |= MSG_EOR;
6003 				out_flags |= MSG_TRUNC;
6004 				control->length = 0;
6005 				SCTP_INP_READ_UNLOCK(inp);
6006 				goto done_with_control;
6007 			}
6008 			SCTP_INP_READ_UNLOCK(inp);
6009 			/* We will fall around to get more data */
6010 		}
6011 		goto get_more_data;
6012 	} else {
6013 		/*-
6014 		 * Give caller back the mbuf chain,
6015 		 * store in uio_resid the length
6016 		 */
6017 		wakeup_read_socket = 0;
6018 		if ((control->end_added == 0) ||
6019 		    (TAILQ_NEXT(control, next) == NULL)) {
6020 			/* Need to get rlock */
6021 			if (hold_rlock == 0) {
6022 				SCTP_INP_READ_LOCK(inp);
6023 				hold_rlock = 1;
6024 			}
6025 		}
6026 		if (control->end_added) {
6027 			out_flags |= MSG_EOR;
6028 			if ((control->do_not_ref_stcb == 0) &&
6029 			    (control->stcb != NULL) &&
6030 			    ((control->spec_flags & M_NOTIFICATION) == 0))
6031 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
6032 		}
6033 		if (control->spec_flags & M_NOTIFICATION) {
6034 			out_flags |= MSG_NOTIFICATION;
6035 		}
6036 		uio->uio_resid = control->length;
6037 		*mp = control->data;
6038 		m = control->data;
6039 		while (m) {
6040 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6041 				sctp_sblog(&so->so_rcv,
6042 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
6043 			}
6044 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6045 			freed_so_far += SCTP_BUF_LEN(m);
6046 			freed_so_far += MSIZE;
6047 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6048 				sctp_sblog(&so->so_rcv,
6049 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6050 			}
6051 			m = SCTP_BUF_NEXT(m);
6052 		}
6053 		control->data = control->tail_mbuf = NULL;
6054 		control->length = 0;
6055 		if (out_flags & MSG_EOR) {
6056 			/* Done with this control */
6057 			goto done_with_control;
6058 		}
6059 	}
6060 release:
6061 	if (hold_rlock == 1) {
6062 		SCTP_INP_READ_UNLOCK(inp);
6063 		hold_rlock = 0;
6064 	}
6065 	if (hold_sblock == 1) {
6066 		SOCKBUF_UNLOCK(&so->so_rcv);
6067 		hold_sblock = 0;
6068 	}
6069 	sbunlock(&so->so_rcv);
6070 	sockbuf_lock = 0;
6071 
6072 release_unlocked:
6073 	if (hold_sblock) {
6074 		SOCKBUF_UNLOCK(&so->so_rcv);
6075 		hold_sblock = 0;
6076 	}
6077 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6078 		if ((freed_so_far >= rwnd_req) &&
6079 		    (control && (control->do_not_ref_stcb == 0)) &&
6080 		    (no_rcv_needed == 0))
6081 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6082 	}
6083 out:
6084 	if (msg_flags) {
6085 		*msg_flags = out_flags;
6086 	}
6087 	if (((out_flags & MSG_EOR) == 0) &&
6088 	    ((in_flags & MSG_PEEK) == 0) &&
6089 	    (sinfo) &&
6090 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6091 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6092 		struct sctp_extrcvinfo *s_extra;
6093 
6094 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6095 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6096 	}
6097 	if (hold_rlock == 1) {
6098 		SCTP_INP_READ_UNLOCK(inp);
6099 	}
6100 	if (hold_sblock) {
6101 		SOCKBUF_UNLOCK(&so->so_rcv);
6102 	}
6103 	if (sockbuf_lock) {
6104 		sbunlock(&so->so_rcv);
6105 	}
6106 	if (freecnt_applied) {
6107 		/*
6108 		 * The lock on the socket buffer protects us so the free
6109 		 * code will stop. But since we used the socketbuf lock and
6110 		 * the sender uses the tcb_lock to increment, we need to use
6111 		 * the atomic add to the refcnt.
6112 		 */
6113 		if (stcb == NULL) {
6114 #ifdef INVARIANTS
6115 			panic("stcb for refcnt has gone NULL?");
6116 			goto stage_left;
6117 #else
6118 			goto stage_left;
6119 #endif
6120 		}
6121 		atomic_add_int(&stcb->asoc.refcnt, -1);
6122 		/* Save the value back for next time */
6123 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6124 	}
6125 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6126 		if (stcb) {
6127 			sctp_misc_ints(SCTP_SORECV_DONE,
6128 			    freed_so_far,
6129 			    ((uio) ? (slen - uio->uio_resid) : slen),
6130 			    stcb->asoc.my_rwnd,
6131 			    so->so_rcv.sb_cc);
6132 		} else {
6133 			sctp_misc_ints(SCTP_SORECV_DONE,
6134 			    freed_so_far,
6135 			    ((uio) ? (slen - uio->uio_resid) : slen),
6136 			    0,
6137 			    so->so_rcv.sb_cc);
6138 		}
6139 	}
6140 stage_left:
6141 	if (wakeup_read_socket) {
6142 		sctp_sorwakeup(inp, so);
6143 	}
6144 	return (error);
6145 }
6146 
6147 
6148 #ifdef SCTP_MBUF_LOGGING
6149 struct mbuf *
6150 sctp_m_free(struct mbuf *m)
6151 {
6152 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6153 		if (SCTP_BUF_IS_EXTENDED(m)) {
6154 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6155 		}
6156 	}
6157 	return (m_free(m));
6158 }
6159 
6160 void
6161 sctp_m_freem(struct mbuf *mb)
6162 {
6163 	while (mb != NULL)
6164 		mb = sctp_m_free(mb);
6165 }
6166 
6167 #endif
6168 
6169 int
6170 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6171 {
6172 	/*
6173 	 * Given a local address. For all associations that holds the
6174 	 * address, request a peer-set-primary.
6175 	 */
6176 	struct sctp_ifa *ifa;
6177 	struct sctp_laddr *wi;
6178 
6179 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6180 	if (ifa == NULL) {
6181 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6182 		return (EADDRNOTAVAIL);
6183 	}
6184 	/*
6185 	 * Now that we have the ifa we must awaken the iterator with this
6186 	 * message.
6187 	 */
6188 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6189 	if (wi == NULL) {
6190 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6191 		return (ENOMEM);
6192 	}
6193 	/* Now incr the count and int wi structure */
6194 	SCTP_INCR_LADDR_COUNT();
6195 	bzero(wi, sizeof(*wi));
6196 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6197 	wi->ifa = ifa;
6198 	wi->action = SCTP_SET_PRIM_ADDR;
6199 	atomic_add_int(&ifa->refcount, 1);
6200 
6201 	/* Now add it to the work queue */
6202 	SCTP_WQ_ADDR_LOCK();
6203 	/*
6204 	 * Should this really be a tailq? As it is we will process the
6205 	 * newest first :-0
6206 	 */
6207 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6208 	SCTP_WQ_ADDR_UNLOCK();
6209 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6210 	    (struct sctp_inpcb *)NULL,
6211 	    (struct sctp_tcb *)NULL,
6212 	    (struct sctp_nets *)NULL);
6213 	return (0);
6214 }
6215 
6216 
6217 int
6218 sctp_soreceive(struct socket *so,
6219     struct sockaddr **psa,
6220     struct uio *uio,
6221     struct mbuf **mp0,
6222     struct mbuf **controlp,
6223     int *flagsp)
6224 {
6225 	int error, fromlen;
6226 	uint8_t sockbuf[256];
6227 	struct sockaddr *from;
6228 	struct sctp_extrcvinfo sinfo;
6229 	int filling_sinfo = 1;
6230 	struct sctp_inpcb *inp;
6231 
6232 	inp = (struct sctp_inpcb *)so->so_pcb;
6233 	/* pickup the assoc we are reading from */
6234 	if (inp == NULL) {
6235 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6236 		return (EINVAL);
6237 	}
6238 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6239 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6240 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6241 	    (controlp == NULL)) {
6242 		/* user does not want the sndrcv ctl */
6243 		filling_sinfo = 0;
6244 	}
6245 	if (psa) {
6246 		from = (struct sockaddr *)sockbuf;
6247 		fromlen = sizeof(sockbuf);
6248 		from->sa_len = 0;
6249 	} else {
6250 		from = NULL;
6251 		fromlen = 0;
6252 	}
6253 
6254 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6255 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6256 	if ((controlp) && (filling_sinfo)) {
6257 		/* copy back the sinfo in a CMSG format */
6258 		if (filling_sinfo)
6259 			*controlp = sctp_build_ctl_nchunk(inp,
6260 			    (struct sctp_sndrcvinfo *)&sinfo);
6261 		else
6262 			*controlp = NULL;
6263 	}
6264 	if (psa) {
6265 		/* copy back the address info */
6266 		if (from && from->sa_len) {
6267 			*psa = sodupsockaddr(from, M_NOWAIT);
6268 		} else {
6269 			*psa = NULL;
6270 		}
6271 	}
6272 	return (error);
6273 }
6274 
6275 
6276 
6277 
6278 
6279 int
6280 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6281     int totaddr, int *error)
6282 {
6283 	int added = 0;
6284 	int i;
6285 	struct sctp_inpcb *inp;
6286 	struct sockaddr *sa;
6287 	size_t incr = 0;
6288 
6289 #ifdef INET
6290 	struct sockaddr_in *sin;
6291 
6292 #endif
6293 #ifdef INET6
6294 	struct sockaddr_in6 *sin6;
6295 
6296 #endif
6297 
6298 	sa = addr;
6299 	inp = stcb->sctp_ep;
6300 	*error = 0;
6301 	for (i = 0; i < totaddr; i++) {
6302 		switch (sa->sa_family) {
6303 #ifdef INET
6304 		case AF_INET:
6305 			incr = sizeof(struct sockaddr_in);
6306 			sin = (struct sockaddr_in *)sa;
6307 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6308 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6309 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6310 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6311 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6312 				*error = EINVAL;
6313 				goto out_now;
6314 			}
6315 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6316 				/* assoc gone no un-lock */
6317 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6318 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6319 				*error = ENOBUFS;
6320 				goto out_now;
6321 			}
6322 			added++;
6323 			break;
6324 #endif
6325 #ifdef INET6
6326 		case AF_INET6:
6327 			incr = sizeof(struct sockaddr_in6);
6328 			sin6 = (struct sockaddr_in6 *)sa;
6329 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6330 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6331 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6332 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6333 				*error = EINVAL;
6334 				goto out_now;
6335 			}
6336 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6337 				/* assoc gone no un-lock */
6338 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6339 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6340 				*error = ENOBUFS;
6341 				goto out_now;
6342 			}
6343 			added++;
6344 			break;
6345 #endif
6346 		default:
6347 			break;
6348 		}
6349 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6350 	}
6351 out_now:
6352 	return (added);
6353 }
6354 
6355 struct sctp_tcb *
6356 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6357     int *totaddr, int *num_v4, int *num_v6, int *error,
6358     int limit, int *bad_addr)
6359 {
6360 	struct sockaddr *sa;
6361 	struct sctp_tcb *stcb = NULL;
6362 	size_t incr, at, i;
6363 
6364 	at = incr = 0;
6365 	sa = addr;
6366 
6367 	*error = *num_v6 = *num_v4 = 0;
6368 	/* account and validate addresses */
6369 	for (i = 0; i < (size_t)*totaddr; i++) {
6370 		switch (sa->sa_family) {
6371 #ifdef INET
6372 		case AF_INET:
6373 			(*num_v4) += 1;
6374 			incr = sizeof(struct sockaddr_in);
6375 			if (sa->sa_len != incr) {
6376 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6377 				*error = EINVAL;
6378 				*bad_addr = 1;
6379 				return (NULL);
6380 			}
6381 			break;
6382 #endif
6383 #ifdef INET6
6384 		case AF_INET6:
6385 			{
6386 				struct sockaddr_in6 *sin6;
6387 
6388 				sin6 = (struct sockaddr_in6 *)sa;
6389 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6390 					/* Must be non-mapped for connectx */
6391 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6392 					*error = EINVAL;
6393 					*bad_addr = 1;
6394 					return (NULL);
6395 				}
6396 				(*num_v6) += 1;
6397 				incr = sizeof(struct sockaddr_in6);
6398 				if (sa->sa_len != incr) {
6399 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6400 					*error = EINVAL;
6401 					*bad_addr = 1;
6402 					return (NULL);
6403 				}
6404 				break;
6405 			}
6406 #endif
6407 		default:
6408 			*totaddr = i;
6409 			/* we are done */
6410 			break;
6411 		}
6412 		if (i == (size_t)*totaddr) {
6413 			break;
6414 		}
6415 		SCTP_INP_INCR_REF(inp);
6416 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6417 		if (stcb != NULL) {
6418 			/* Already have or am bring up an association */
6419 			return (stcb);
6420 		} else {
6421 			SCTP_INP_DECR_REF(inp);
6422 		}
6423 		if ((at + incr) > (size_t)limit) {
6424 			*totaddr = i;
6425 			break;
6426 		}
6427 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6428 	}
6429 	return ((struct sctp_tcb *)NULL);
6430 }
6431 
6432 /*
6433  * sctp_bindx(ADD) for one address.
6434  * assumes all arguments are valid/checked by caller.
6435  */
6436 void
6437 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6438     struct sockaddr *sa, sctp_assoc_t assoc_id,
6439     uint32_t vrf_id, int *error, void *p)
6440 {
6441 	struct sockaddr *addr_touse;
6442 
6443 #ifdef INET6
6444 	struct sockaddr_in sin;
6445 
6446 #endif
6447 
6448 	/* see if we're bound all already! */
6449 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6450 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6451 		*error = EINVAL;
6452 		return;
6453 	}
6454 	addr_touse = sa;
6455 #ifdef INET6
6456 	if (sa->sa_family == AF_INET6) {
6457 		struct sockaddr_in6 *sin6;
6458 
6459 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6460 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6461 			*error = EINVAL;
6462 			return;
6463 		}
6464 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6465 			/* can only bind v6 on PF_INET6 sockets */
6466 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6467 			*error = EINVAL;
6468 			return;
6469 		}
6470 		sin6 = (struct sockaddr_in6 *)addr_touse;
6471 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6472 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6473 			    SCTP_IPV6_V6ONLY(inp)) {
6474 				/* can't bind v4-mapped on PF_INET sockets */
6475 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6476 				*error = EINVAL;
6477 				return;
6478 			}
6479 			in6_sin6_2_sin(&sin, sin6);
6480 			addr_touse = (struct sockaddr *)&sin;
6481 		}
6482 	}
6483 #endif
6484 #ifdef INET
6485 	if (sa->sa_family == AF_INET) {
6486 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6487 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6488 			*error = EINVAL;
6489 			return;
6490 		}
6491 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6492 		    SCTP_IPV6_V6ONLY(inp)) {
6493 			/* can't bind v4 on PF_INET sockets */
6494 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6495 			*error = EINVAL;
6496 			return;
6497 		}
6498 	}
6499 #endif
6500 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6501 		if (p == NULL) {
6502 			/* Can't get proc for Net/Open BSD */
6503 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6504 			*error = EINVAL;
6505 			return;
6506 		}
6507 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6508 		return;
6509 	}
6510 	/*
6511 	 * No locks required here since bind and mgmt_ep_sa all do their own
6512 	 * locking. If we do something for the FIX: below we may need to
6513 	 * lock in that case.
6514 	 */
6515 	if (assoc_id == 0) {
6516 		/* add the address */
6517 		struct sctp_inpcb *lep;
6518 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6519 
6520 		/* validate the incoming port */
6521 		if ((lsin->sin_port != 0) &&
6522 		    (lsin->sin_port != inp->sctp_lport)) {
6523 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6524 			*error = EINVAL;
6525 			return;
6526 		} else {
6527 			/* user specified 0 port, set it to existing port */
6528 			lsin->sin_port = inp->sctp_lport;
6529 		}
6530 
6531 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6532 		if (lep != NULL) {
6533 			/*
6534 			 * We must decrement the refcount since we have the
6535 			 * ep already and are binding. No remove going on
6536 			 * here.
6537 			 */
6538 			SCTP_INP_DECR_REF(lep);
6539 		}
6540 		if (lep == inp) {
6541 			/* already bound to it.. ok */
6542 			return;
6543 		} else if (lep == NULL) {
6544 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6545 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6546 			    SCTP_ADD_IP_ADDRESS,
6547 			    vrf_id, NULL);
6548 		} else {
6549 			*error = EADDRINUSE;
6550 		}
6551 		if (*error)
6552 			return;
6553 	} else {
6554 		/*
6555 		 * FIX: decide whether we allow assoc based bindx
6556 		 */
6557 	}
6558 }
6559 
6560 /*
6561  * sctp_bindx(DELETE) for one address.
6562  * assumes all arguments are valid/checked by caller.
6563  */
6564 void
6565 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6566     struct sockaddr *sa, sctp_assoc_t assoc_id,
6567     uint32_t vrf_id, int *error)
6568 {
6569 	struct sockaddr *addr_touse;
6570 
6571 #ifdef INET6
6572 	struct sockaddr_in sin;
6573 
6574 #endif
6575 
6576 	/* see if we're bound all already! */
6577 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6578 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6579 		*error = EINVAL;
6580 		return;
6581 	}
6582 	addr_touse = sa;
6583 #if defined(INET6)
6584 	if (sa->sa_family == AF_INET6) {
6585 		struct sockaddr_in6 *sin6;
6586 
6587 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6588 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6589 			*error = EINVAL;
6590 			return;
6591 		}
6592 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6593 			/* can only bind v6 on PF_INET6 sockets */
6594 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6595 			*error = EINVAL;
6596 			return;
6597 		}
6598 		sin6 = (struct sockaddr_in6 *)addr_touse;
6599 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6600 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6601 			    SCTP_IPV6_V6ONLY(inp)) {
6602 				/* can't bind mapped-v4 on PF_INET sockets */
6603 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6604 				*error = EINVAL;
6605 				return;
6606 			}
6607 			in6_sin6_2_sin(&sin, sin6);
6608 			addr_touse = (struct sockaddr *)&sin;
6609 		}
6610 	}
6611 #endif
6612 #ifdef INET
6613 	if (sa->sa_family == AF_INET) {
6614 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6615 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6616 			*error = EINVAL;
6617 			return;
6618 		}
6619 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6620 		    SCTP_IPV6_V6ONLY(inp)) {
6621 			/* can't bind v4 on PF_INET sockets */
6622 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6623 			*error = EINVAL;
6624 			return;
6625 		}
6626 	}
6627 #endif
6628 	/*
6629 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6630 	 * below is ever changed we may need to lock before calling
6631 	 * association level binding.
6632 	 */
6633 	if (assoc_id == 0) {
6634 		/* delete the address */
6635 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6636 		    SCTP_DEL_IP_ADDRESS,
6637 		    vrf_id, NULL);
6638 	} else {
6639 		/*
6640 		 * FIX: decide whether we allow assoc based bindx
6641 		 */
6642 	}
6643 }
6644 
6645 /*
6646  * returns the valid local address count for an assoc, taking into account
6647  * all scoping rules
6648  */
6649 int
6650 sctp_local_addr_count(struct sctp_tcb *stcb)
6651 {
6652 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6653 	int ipv4_addr_legal, ipv6_addr_legal;
6654 	struct sctp_vrf *vrf;
6655 	struct sctp_ifn *sctp_ifn;
6656 	struct sctp_ifa *sctp_ifa;
6657 	int count = 0;
6658 
6659 	/* Turn on all the appropriate scopes */
6660 	loopback_scope = stcb->asoc.loopback_scope;
6661 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6662 	local_scope = stcb->asoc.local_scope;
6663 	site_scope = stcb->asoc.site_scope;
6664 	ipv4_addr_legal = ipv6_addr_legal = 0;
6665 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6666 		ipv6_addr_legal = 1;
6667 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6668 			ipv4_addr_legal = 1;
6669 		}
6670 	} else {
6671 		ipv4_addr_legal = 1;
6672 	}
6673 
6674 	SCTP_IPI_ADDR_RLOCK();
6675 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6676 	if (vrf == NULL) {
6677 		/* no vrf, no addresses */
6678 		SCTP_IPI_ADDR_RUNLOCK();
6679 		return (0);
6680 	}
6681 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6682 		/*
6683 		 * bound all case: go through all ifns on the vrf
6684 		 */
6685 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6686 			if ((loopback_scope == 0) &&
6687 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6688 				continue;
6689 			}
6690 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6691 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6692 					continue;
6693 				switch (sctp_ifa->address.sa.sa_family) {
6694 #ifdef INET
6695 				case AF_INET:
6696 					if (ipv4_addr_legal) {
6697 						struct sockaddr_in *sin;
6698 
6699 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6700 						if (sin->sin_addr.s_addr == 0) {
6701 							/*
6702 							 * skip unspecified
6703 							 * addrs
6704 							 */
6705 							continue;
6706 						}
6707 						if ((ipv4_local_scope == 0) &&
6708 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6709 							continue;
6710 						}
6711 						/* count this one */
6712 						count++;
6713 					} else {
6714 						continue;
6715 					}
6716 					break;
6717 #endif
6718 #ifdef INET6
6719 				case AF_INET6:
6720 					if (ipv6_addr_legal) {
6721 						struct sockaddr_in6 *sin6;
6722 
6723 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6724 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6725 							continue;
6726 						}
6727 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6728 							if (local_scope == 0)
6729 								continue;
6730 							if (sin6->sin6_scope_id == 0) {
6731 								if (sa6_recoverscope(sin6) != 0)
6732 									/*
6733 									 *
6734 									 * bad
6735 									 *
6736 									 * li
6737 									 * nk
6738 									 *
6739 									 * loc
6740 									 * al
6741 									 *
6742 									 * add
6743 									 * re
6744 									 * ss
6745 									 * */
6746 									continue;
6747 							}
6748 						}
6749 						if ((site_scope == 0) &&
6750 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6751 							continue;
6752 						}
6753 						/* count this one */
6754 						count++;
6755 					}
6756 					break;
6757 #endif
6758 				default:
6759 					/* TSNH */
6760 					break;
6761 				}
6762 			}
6763 		}
6764 	} else {
6765 		/*
6766 		 * subset bound case
6767 		 */
6768 		struct sctp_laddr *laddr;
6769 
6770 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6771 		    sctp_nxt_addr) {
6772 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6773 				continue;
6774 			}
6775 			/* count this one */
6776 			count++;
6777 		}
6778 	}
6779 	SCTP_IPI_ADDR_RUNLOCK();
6780 	return (count);
6781 }
6782 
6783 #if defined(SCTP_LOCAL_TRACE_BUF)
6784 
6785 void
6786 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6787 {
6788 	uint32_t saveindex, newindex;
6789 
6790 	do {
6791 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6792 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6793 			newindex = 1;
6794 		} else {
6795 			newindex = saveindex + 1;
6796 		}
6797 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6798 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6799 		saveindex = 0;
6800 	}
6801 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6802 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6803 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6804 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6805 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6806 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6807 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6808 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6809 }
6810 
6811 #endif
6812 /* XXX: Remove the #ifdef after tunneling over IPv6 works also on FreeBSD. */
6813 #ifdef INET
6814 /* We will need to add support
6815  * to bind the ports and such here
6816  * so we can do UDP tunneling. In
6817  * the mean-time, we return error
6818  */
6819 #include <netinet/udp.h>
6820 #include <netinet/udp_var.h>
6821 #include <sys/proc.h>
6822 #ifdef INET6
6823 #include <netinet6/sctp6_var.h>
6824 #endif
6825 
6826 static void
6827 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6828 {
6829 	struct ip *iph;
6830 	struct mbuf *sp, *last;
6831 	struct udphdr *uhdr;
6832 	uint16_t port = 0;
6833 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6834 
6835 	/*
6836 	 * Split out the mbuf chain. Leave the IP header in m, place the
6837 	 * rest in the sp.
6838 	 */
6839 	if ((m->m_flags & M_PKTHDR) == 0) {
6840 		/* Can't handle one that is not a pkt hdr */
6841 		goto out;
6842 	}
6843 	/* pull the src port */
6844 	iph = mtod(m, struct ip *);
6845 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6846 
6847 	port = uhdr->uh_sport;
6848 	sp = m_split(m, off, M_DONTWAIT);
6849 	if (sp == NULL) {
6850 		/* Gak, drop packet, we can't do a split */
6851 		goto out;
6852 	}
6853 	if (sp->m_pkthdr.len < header_size) {
6854 		/* Gak, packet can't have an SCTP header in it - to small */
6855 		m_freem(sp);
6856 		goto out;
6857 	}
6858 	/* ok now pull up the UDP header and SCTP header together */
6859 	sp = m_pullup(sp, header_size);
6860 	if (sp == NULL) {
6861 		/* Gak pullup failed */
6862 		goto out;
6863 	}
6864 	/* trim out the UDP header */
6865 	m_adj(sp, sizeof(struct udphdr));
6866 
6867 	/* Now reconstruct the mbuf chain */
6868 	/* 1) find last one */
6869 	last = m;
6870 	while (last->m_next != NULL) {
6871 		last = last->m_next;
6872 	}
6873 	last->m_next = sp;
6874 	m->m_pkthdr.len += sp->m_pkthdr.len;
6875 	last = m;
6876 	while (last != NULL) {
6877 		last = last->m_next;
6878 	}
6879 	/* Now its ready for sctp_input or sctp6_input */
6880 	iph = mtod(m, struct ip *);
6881 	switch (iph->ip_v) {
6882 #ifdef INET
6883 	case IPVERSION:
6884 		{
6885 			uint16_t len;
6886 
6887 			/* its IPv4 */
6888 			len = SCTP_GET_IPV4_LENGTH(iph);
6889 			len -= sizeof(struct udphdr);
6890 			SCTP_GET_IPV4_LENGTH(iph) = len;
6891 			sctp_input_with_port(m, off, port);
6892 			break;
6893 		}
6894 #endif
6895 #ifdef INET6
6896 	case IPV6_VERSION >> 4:
6897 		{
6898 			/* its IPv6 - NOT supported */
6899 			goto out;
6900 			break;
6901 
6902 		}
6903 #endif
6904 	default:
6905 		{
6906 			m_freem(m);
6907 			break;
6908 		}
6909 	}
6910 	return;
6911 out:
6912 	m_freem(m);
6913 }
6914 
6915 void
6916 sctp_over_udp_stop(void)
6917 {
6918 	struct socket *sop;
6919 
6920 	/*
6921 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6922 	 * for writting!
6923 	 */
6924 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6925 		/* Nothing to do */
6926 		return;
6927 	}
6928 	sop = SCTP_BASE_INFO(udp_tun_socket);
6929 	soclose(sop);
6930 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6931 }
6932 
6933 int
6934 sctp_over_udp_start(void)
6935 {
6936 	uint16_t port;
6937 	int ret;
6938 	struct sockaddr_in sin;
6939 	struct socket *sop = NULL;
6940 	struct thread *th;
6941 	struct ucred *cred;
6942 
6943 	/*
6944 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6945 	 * for writting!
6946 	 */
6947 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6948 	if (port == 0) {
6949 		/* Must have a port set */
6950 		return (EINVAL);
6951 	}
6952 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6953 		/* Already running -- must stop first */
6954 		return (EALREADY);
6955 	}
6956 	th = curthread;
6957 	cred = th->td_ucred;
6958 	if ((ret = socreate(PF_INET, &sop,
6959 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6960 		return (ret);
6961 	}
6962 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6963 	/* call the special UDP hook */
6964 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6965 	if (ret) {
6966 		goto exit_stage_left;
6967 	}
6968 	/* Ok we have a socket, bind it to the port */
6969 	memset(&sin, 0, sizeof(sin));
6970 	sin.sin_len = sizeof(sin);
6971 	sin.sin_family = AF_INET;
6972 	sin.sin_port = htons(port);
6973 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6974 	if (ret) {
6975 		/* Close up we cant get the port */
6976 exit_stage_left:
6977 		sctp_over_udp_stop();
6978 		return (ret);
6979 	}
6980 	/*
6981 	 * Ok we should now get UDP packets directly to our input routine
6982 	 * sctp_recv_upd_tunneled_packet().
6983 	 */
6984 	return (0);
6985 }
6986 
6987 #endif
6988