xref: /freebsd/sys/netinet/sctputil.c (revision 884a2a699669ec61e2366e3e358342dbc94be24a)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *   this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *   the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #endif
45 #include <netinet/sctp_header.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_bsd_addr.h>
53 
54 
55 #ifndef KTR_SCTP
56 #define KTR_SCTP KTR_SUBSYS
57 #endif
58 
59 extern struct sctp_cc_functions sctp_cc_functions[];
60 extern struct sctp_ss_functions sctp_ss_functions[];
61 
62 void
63 sctp_sblog(struct sockbuf *sb,
64     struct sctp_tcb *stcb, int from, int incr)
65 {
66 	struct sctp_cwnd_log sctp_clog;
67 
68 	sctp_clog.x.sb.stcb = stcb;
69 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
70 	if (stcb)
71 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
72 	else
73 		sctp_clog.x.sb.stcb_sbcc = 0;
74 	sctp_clog.x.sb.incr = incr;
75 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
76 	    SCTP_LOG_EVENT_SB,
77 	    from,
78 	    sctp_clog.x.misc.log1,
79 	    sctp_clog.x.misc.log2,
80 	    sctp_clog.x.misc.log3,
81 	    sctp_clog.x.misc.log4);
82 }
83 
84 void
85 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
86 {
87 	struct sctp_cwnd_log sctp_clog;
88 
89 	sctp_clog.x.close.inp = (void *)inp;
90 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
91 	if (stcb) {
92 		sctp_clog.x.close.stcb = (void *)stcb;
93 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
94 	} else {
95 		sctp_clog.x.close.stcb = 0;
96 		sctp_clog.x.close.state = 0;
97 	}
98 	sctp_clog.x.close.loc = loc;
99 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
100 	    SCTP_LOG_EVENT_CLOSE,
101 	    0,
102 	    sctp_clog.x.misc.log1,
103 	    sctp_clog.x.misc.log2,
104 	    sctp_clog.x.misc.log3,
105 	    sctp_clog.x.misc.log4);
106 }
107 
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 
125 }
126 
127 void
128 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
129 {
130 	struct sctp_cwnd_log sctp_clog;
131 
132 	sctp_clog.x.strlog.stcb = stcb;
133 	sctp_clog.x.strlog.n_tsn = tsn;
134 	sctp_clog.x.strlog.n_sseq = sseq;
135 	sctp_clog.x.strlog.e_tsn = 0;
136 	sctp_clog.x.strlog.e_sseq = 0;
137 	sctp_clog.x.strlog.strm = stream;
138 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
139 	    SCTP_LOG_EVENT_STRM,
140 	    from,
141 	    sctp_clog.x.misc.log1,
142 	    sctp_clog.x.misc.log2,
143 	    sctp_clog.x.misc.log3,
144 	    sctp_clog.x.misc.log4);
145 
146 }
147 
148 void
149 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
150 {
151 	struct sctp_cwnd_log sctp_clog;
152 
153 	sctp_clog.x.nagle.stcb = (void *)stcb;
154 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
155 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
156 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
157 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
158 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
159 	    SCTP_LOG_EVENT_NAGLE,
160 	    action,
161 	    sctp_clog.x.misc.log1,
162 	    sctp_clog.x.misc.log2,
163 	    sctp_clog.x.misc.log3,
164 	    sctp_clog.x.misc.log4);
165 }
166 
167 
168 void
169 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
170 {
171 	struct sctp_cwnd_log sctp_clog;
172 
173 	sctp_clog.x.sack.cumack = cumack;
174 	sctp_clog.x.sack.oldcumack = old_cumack;
175 	sctp_clog.x.sack.tsn = tsn;
176 	sctp_clog.x.sack.numGaps = gaps;
177 	sctp_clog.x.sack.numDups = dups;
178 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
179 	    SCTP_LOG_EVENT_SACK,
180 	    from,
181 	    sctp_clog.x.misc.log1,
182 	    sctp_clog.x.misc.log2,
183 	    sctp_clog.x.misc.log3,
184 	    sctp_clog.x.misc.log4);
185 }
186 
187 void
188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
189 {
190 	struct sctp_cwnd_log sctp_clog;
191 
192 	memset(&sctp_clog, 0, sizeof(sctp_clog));
193 	sctp_clog.x.map.base = map;
194 	sctp_clog.x.map.cum = cum;
195 	sctp_clog.x.map.high = high;
196 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
197 	    SCTP_LOG_EVENT_MAP,
198 	    from,
199 	    sctp_clog.x.misc.log1,
200 	    sctp_clog.x.misc.log2,
201 	    sctp_clog.x.misc.log3,
202 	    sctp_clog.x.misc.log4);
203 }
204 
205 void
206 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
207     int from)
208 {
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
213 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
214 	sctp_clog.x.fr.tsn = tsn;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_FR,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 
223 }
224 
225 
226 void
227 sctp_log_mb(struct mbuf *m, int from)
228 {
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	sctp_clog.x.mb.mp = m;
232 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
233 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
234 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
235 	if (SCTP_BUF_IS_EXTENDED(m)) {
236 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
237 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
238 	} else {
239 		sctp_clog.x.mb.ext = 0;
240 		sctp_clog.x.mb.refcnt = 0;
241 	}
242 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
243 	    SCTP_LOG_EVENT_MBUF,
244 	    from,
245 	    sctp_clog.x.misc.log1,
246 	    sctp_clog.x.misc.log2,
247 	    sctp_clog.x.misc.log3,
248 	    sctp_clog.x.misc.log4);
249 }
250 
251 
252 void
253 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
254     int from)
255 {
256 	struct sctp_cwnd_log sctp_clog;
257 
258 	if (control == NULL) {
259 		SCTP_PRINTF("Gak log of NULL?\n");
260 		return;
261 	}
262 	sctp_clog.x.strlog.stcb = control->stcb;
263 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
264 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
265 	sctp_clog.x.strlog.strm = control->sinfo_stream;
266 	if (poschk != NULL) {
267 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
268 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
269 	} else {
270 		sctp_clog.x.strlog.e_tsn = 0;
271 		sctp_clog.x.strlog.e_sseq = 0;
272 	}
273 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
274 	    SCTP_LOG_EVENT_STRM,
275 	    from,
276 	    sctp_clog.x.misc.log1,
277 	    sctp_clog.x.misc.log2,
278 	    sctp_clog.x.misc.log3,
279 	    sctp_clog.x.misc.log4);
280 
281 }
282 
283 void
284 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
285 {
286 	struct sctp_cwnd_log sctp_clog;
287 
288 	sctp_clog.x.cwnd.net = net;
289 	if (stcb->asoc.send_queue_cnt > 255)
290 		sctp_clog.x.cwnd.cnt_in_send = 255;
291 	else
292 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
293 	if (stcb->asoc.stream_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_str = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
297 
298 	if (net) {
299 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
300 		sctp_clog.x.cwnd.inflight = net->flight_size;
301 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
302 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
303 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
304 	}
305 	if (SCTP_CWNDLOG_PRESEND == from) {
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
307 	}
308 	sctp_clog.x.cwnd.cwnd_augment = augment;
309 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
310 	    SCTP_LOG_EVENT_CWND,
311 	    from,
312 	    sctp_clog.x.misc.log1,
313 	    sctp_clog.x.misc.log2,
314 	    sctp_clog.x.misc.log3,
315 	    sctp_clog.x.misc.log4);
316 
317 }
318 
319 void
320 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
321 {
322 	struct sctp_cwnd_log sctp_clog;
323 
324 	memset(&sctp_clog, 0, sizeof(sctp_clog));
325 	if (inp) {
326 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
327 
328 	} else {
329 		sctp_clog.x.lock.sock = (void *)NULL;
330 	}
331 	sctp_clog.x.lock.inp = (void *)inp;
332 	if (stcb) {
333 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
334 	} else {
335 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
336 	}
337 	if (inp) {
338 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
339 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
340 	} else {
341 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
342 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
343 	}
344 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
345 	if (inp && (inp->sctp_socket)) {
346 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
347 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
348 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
349 	} else {
350 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
351 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
352 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
353 	}
354 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
355 	    SCTP_LOG_LOCK_EVENT,
356 	    from,
357 	    sctp_clog.x.misc.log1,
358 	    sctp_clog.x.misc.log2,
359 	    sctp_clog.x.misc.log3,
360 	    sctp_clog.x.misc.log4);
361 
362 }
363 
364 void
365 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
366 {
367 	struct sctp_cwnd_log sctp_clog;
368 
369 	memset(&sctp_clog, 0, sizeof(sctp_clog));
370 	sctp_clog.x.cwnd.net = net;
371 	sctp_clog.x.cwnd.cwnd_new_value = error;
372 	sctp_clog.x.cwnd.inflight = net->flight_size;
373 	sctp_clog.x.cwnd.cwnd_augment = burst;
374 	if (stcb->asoc.send_queue_cnt > 255)
375 		sctp_clog.x.cwnd.cnt_in_send = 255;
376 	else
377 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
378 	if (stcb->asoc.stream_queue_cnt > 255)
379 		sctp_clog.x.cwnd.cnt_in_str = 255;
380 	else
381 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
382 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
383 	    SCTP_LOG_EVENT_MAXBURST,
384 	    from,
385 	    sctp_clog.x.misc.log1,
386 	    sctp_clog.x.misc.log2,
387 	    sctp_clog.x.misc.log3,
388 	    sctp_clog.x.misc.log4);
389 
390 }
391 
392 void
393 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
394 {
395 	struct sctp_cwnd_log sctp_clog;
396 
397 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
398 	sctp_clog.x.rwnd.send_size = snd_size;
399 	sctp_clog.x.rwnd.overhead = overhead;
400 	sctp_clog.x.rwnd.new_rwnd = 0;
401 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
402 	    SCTP_LOG_EVENT_RWND,
403 	    from,
404 	    sctp_clog.x.misc.log1,
405 	    sctp_clog.x.misc.log2,
406 	    sctp_clog.x.misc.log3,
407 	    sctp_clog.x.misc.log4);
408 }
409 
410 void
411 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
412 {
413 	struct sctp_cwnd_log sctp_clog;
414 
415 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
416 	sctp_clog.x.rwnd.send_size = flight_size;
417 	sctp_clog.x.rwnd.overhead = overhead;
418 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
419 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
420 	    SCTP_LOG_EVENT_RWND,
421 	    from,
422 	    sctp_clog.x.misc.log1,
423 	    sctp_clog.x.misc.log2,
424 	    sctp_clog.x.misc.log3,
425 	    sctp_clog.x.misc.log4);
426 }
427 
428 void
429 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
430 {
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
434 	sctp_clog.x.mbcnt.size_change = book;
435 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
436 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_MBCNT,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 
445 }
446 
447 void
448 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
449 {
450 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
451 	    SCTP_LOG_MISC_EVENT,
452 	    from,
453 	    a, b, c, d);
454 }
455 
456 void
457 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
458 {
459 	struct sctp_cwnd_log sctp_clog;
460 
461 	sctp_clog.x.wake.stcb = (void *)stcb;
462 	sctp_clog.x.wake.wake_cnt = wake_cnt;
463 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
464 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
465 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
466 
467 	if (stcb->asoc.stream_queue_cnt < 0xff)
468 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
469 	else
470 		sctp_clog.x.wake.stream_qcnt = 0xff;
471 
472 	if (stcb->asoc.chunks_on_out_queue < 0xff)
473 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
474 	else
475 		sctp_clog.x.wake.chunks_on_oque = 0xff;
476 
477 	sctp_clog.x.wake.sctpflags = 0;
478 	/* set in the defered mode stuff */
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
480 		sctp_clog.x.wake.sctpflags |= 1;
481 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
482 		sctp_clog.x.wake.sctpflags |= 2;
483 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
484 		sctp_clog.x.wake.sctpflags |= 4;
485 	/* what about the sb */
486 	if (stcb->sctp_socket) {
487 		struct socket *so = stcb->sctp_socket;
488 
489 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
490 	} else {
491 		sctp_clog.x.wake.sbflags = 0xff;
492 	}
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_EVENT_WAKE,
495 	    from,
496 	    sctp_clog.x.misc.log1,
497 	    sctp_clog.x.misc.log2,
498 	    sctp_clog.x.misc.log3,
499 	    sctp_clog.x.misc.log4);
500 
501 }
502 
503 void
504 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
505 {
506 	struct sctp_cwnd_log sctp_clog;
507 
508 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
509 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
510 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
511 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
512 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
513 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
514 	sctp_clog.x.blk.sndlen = sendlen;
515 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
516 	    SCTP_LOG_EVENT_BLOCK,
517 	    from,
518 	    sctp_clog.x.misc.log1,
519 	    sctp_clog.x.misc.log2,
520 	    sctp_clog.x.misc.log3,
521 	    sctp_clog.x.misc.log4);
522 
523 }
524 
525 int
526 sctp_fill_stat_log(void *optval, size_t *optsize)
527 {
528 	/* May need to fix this if ktrdump does not work */
529 	return (0);
530 }
531 
532 #ifdef SCTP_AUDITING_ENABLED
533 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
534 static int sctp_audit_indx = 0;
535 
536 static
537 void
538 sctp_print_audit_report(void)
539 {
540 	int i;
541 	int cnt;
542 
543 	cnt = 0;
544 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
545 		if ((sctp_audit_data[i][0] == 0xe0) &&
546 		    (sctp_audit_data[i][1] == 0x01)) {
547 			cnt = 0;
548 			SCTP_PRINTF("\n");
549 		} else if (sctp_audit_data[i][0] == 0xf0) {
550 			cnt = 0;
551 			SCTP_PRINTF("\n");
552 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
553 		    (sctp_audit_data[i][1] == 0x01)) {
554 			SCTP_PRINTF("\n");
555 			cnt = 0;
556 		}
557 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
558 		    (uint32_t) sctp_audit_data[i][1]);
559 		cnt++;
560 		if ((cnt % 14) == 0)
561 			SCTP_PRINTF("\n");
562 	}
563 	for (i = 0; i < sctp_audit_indx; i++) {
564 		if ((sctp_audit_data[i][0] == 0xe0) &&
565 		    (sctp_audit_data[i][1] == 0x01)) {
566 			cnt = 0;
567 			SCTP_PRINTF("\n");
568 		} else if (sctp_audit_data[i][0] == 0xf0) {
569 			cnt = 0;
570 			SCTP_PRINTF("\n");
571 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
572 		    (sctp_audit_data[i][1] == 0x01)) {
573 			SCTP_PRINTF("\n");
574 			cnt = 0;
575 		}
576 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
577 		    (uint32_t) sctp_audit_data[i][1]);
578 		cnt++;
579 		if ((cnt % 14) == 0)
580 			SCTP_PRINTF("\n");
581 	}
582 	SCTP_PRINTF("\n");
583 }
584 
585 void
586 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
587     struct sctp_nets *net)
588 {
589 	int resend_cnt, tot_out, rep, tot_book_cnt;
590 	struct sctp_nets *lnet;
591 	struct sctp_tmit_chunk *chk;
592 
593 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
594 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
595 	sctp_audit_indx++;
596 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
597 		sctp_audit_indx = 0;
598 	}
599 	if (inp == NULL) {
600 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
601 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
602 		sctp_audit_indx++;
603 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
604 			sctp_audit_indx = 0;
605 		}
606 		return;
607 	}
608 	if (stcb == NULL) {
609 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
610 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
611 		sctp_audit_indx++;
612 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
613 			sctp_audit_indx = 0;
614 		}
615 		return;
616 	}
617 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
618 	sctp_audit_data[sctp_audit_indx][1] =
619 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
620 	sctp_audit_indx++;
621 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
622 		sctp_audit_indx = 0;
623 	}
624 	rep = 0;
625 	tot_book_cnt = 0;
626 	resend_cnt = tot_out = 0;
627 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
628 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
629 			resend_cnt++;
630 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
631 			tot_out += chk->book_size;
632 			tot_book_cnt++;
633 		}
634 	}
635 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
636 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
637 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
638 		sctp_audit_indx++;
639 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
640 			sctp_audit_indx = 0;
641 		}
642 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
643 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
644 		rep = 1;
645 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
646 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
647 		sctp_audit_data[sctp_audit_indx][1] =
648 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
649 		sctp_audit_indx++;
650 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
651 			sctp_audit_indx = 0;
652 		}
653 	}
654 	if (tot_out != stcb->asoc.total_flight) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		rep = 1;
662 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
663 		    (int)stcb->asoc.total_flight);
664 		stcb->asoc.total_flight = tot_out;
665 	}
666 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
667 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
668 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
669 		sctp_audit_indx++;
670 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
671 			sctp_audit_indx = 0;
672 		}
673 		rep = 1;
674 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
675 
676 		stcb->asoc.total_flight_count = tot_book_cnt;
677 	}
678 	tot_out = 0;
679 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
680 		tot_out += lnet->flight_size;
681 	}
682 	if (tot_out != stcb->asoc.total_flight) {
683 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
684 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
685 		sctp_audit_indx++;
686 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
687 			sctp_audit_indx = 0;
688 		}
689 		rep = 1;
690 		SCTP_PRINTF("real flight:%d net total was %d\n",
691 		    stcb->asoc.total_flight, tot_out);
692 		/* now corrective action */
693 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
694 
695 			tot_out = 0;
696 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
697 				if ((chk->whoTo == lnet) &&
698 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
699 					tot_out += chk->book_size;
700 				}
701 			}
702 			if (lnet->flight_size != tot_out) {
703 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
704 				    lnet, lnet->flight_size,
705 				    tot_out);
706 				lnet->flight_size = tot_out;
707 			}
708 		}
709 	}
710 	if (rep) {
711 		sctp_print_audit_report();
712 	}
713 }
714 
715 void
716 sctp_audit_log(uint8_t ev, uint8_t fd)
717 {
718 
719 	sctp_audit_data[sctp_audit_indx][0] = ev;
720 	sctp_audit_data[sctp_audit_indx][1] = fd;
721 	sctp_audit_indx++;
722 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
723 		sctp_audit_indx = 0;
724 	}
725 }
726 
727 #endif
728 
729 /*
730  * sctp_stop_timers_for_shutdown() should be called
731  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
732  * state to make sure that all timers are stopped.
733  */
734 void
735 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
736 {
737 	struct sctp_association *asoc;
738 	struct sctp_nets *net;
739 
740 	asoc = &stcb->asoc;
741 
742 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
743 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
750 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
751 	}
752 }
753 
754 /*
755  * a list of sizes based on typical mtu's, used only if next hop size not
756  * returned.
757  */
758 static uint32_t sctp_mtu_sizes[] = {
759 	68,
760 	296,
761 	508,
762 	512,
763 	544,
764 	576,
765 	1006,
766 	1492,
767 	1500,
768 	1536,
769 	2002,
770 	2048,
771 	4352,
772 	4464,
773 	8166,
774 	17914,
775 	32000,
776 	65535
777 };
778 
779 /*
780  * Return the largest MTU smaller than val. If there is no
781  * entry, just return val.
782  */
783 uint32_t
784 sctp_get_prev_mtu(uint32_t val)
785 {
786 	uint32_t i;
787 
788 	if (val <= sctp_mtu_sizes[0]) {
789 		return (val);
790 	}
791 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792 		if (val <= sctp_mtu_sizes[i]) {
793 			break;
794 		}
795 	}
796 	return (sctp_mtu_sizes[i - 1]);
797 }
798 
799 /*
800  * Return the smallest MTU larger than val. If there is no
801  * entry, just return val.
802  */
803 uint32_t
804 sctp_get_next_mtu(struct sctp_inpcb *inp, uint32_t val)
805 {
806 	/* select another MTU that is just bigger than this one */
807 	uint32_t i;
808 
809 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810 		if (val < sctp_mtu_sizes[i]) {
811 			return (sctp_mtu_sizes[i]);
812 		}
813 	}
814 	return (val);
815 }
816 
817 void
818 sctp_fill_random_store(struct sctp_pcb *m)
819 {
820 	/*
821 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822 	 * our counter. The result becomes our good random numbers and we
823 	 * then setup to give these out. Note that we do no locking to
824 	 * protect this. This is ok, since if competing folks call this we
825 	 * will get more gobbled gook in the random store which is what we
826 	 * want. There is a danger that two guys will use the same random
827 	 * numbers, but thats ok too since that is random as well :->
828 	 */
829 	m->store_at = 0;
830 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833 	m->random_counter++;
834 }
835 
836 uint32_t
837 sctp_select_initial_TSN(struct sctp_pcb *inp)
838 {
839 	/*
840 	 * A true implementation should use random selection process to get
841 	 * the initial stream sequence number, using RFC1750 as a good
842 	 * guideline
843 	 */
844 	uint32_t x, *xp;
845 	uint8_t *p;
846 	int store_at, new_store;
847 
848 	if (inp->initial_sequence_debug != 0) {
849 		uint32_t ret;
850 
851 		ret = inp->initial_sequence_debug;
852 		inp->initial_sequence_debug++;
853 		return (ret);
854 	}
855 retry:
856 	store_at = inp->store_at;
857 	new_store = store_at + sizeof(uint32_t);
858 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859 		new_store = 0;
860 	}
861 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862 		goto retry;
863 	}
864 	if (new_store == 0) {
865 		/* Refill the random store */
866 		sctp_fill_random_store(inp);
867 	}
868 	p = &inp->random_store[store_at];
869 	xp = (uint32_t *) p;
870 	x = *xp;
871 	return (x);
872 }
873 
874 uint32_t
875 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
876 {
877 	uint32_t x, not_done;
878 	struct timeval now;
879 
880 	(void)SCTP_GETTIME_TIMEVAL(&now);
881 	not_done = 1;
882 	while (not_done) {
883 		x = sctp_select_initial_TSN(&inp->sctp_ep);
884 		if (x == 0) {
885 			/* we never use 0 */
886 			continue;
887 		}
888 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
889 			not_done = 0;
890 		}
891 	}
892 	return (x);
893 }
894 
895 int
896 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
897     uint32_t override_tag, uint32_t vrf_id)
898 {
899 	struct sctp_association *asoc;
900 
901 	/*
902 	 * Anything set to zero is taken care of by the allocation routine's
903 	 * bzero
904 	 */
905 
906 	/*
907 	 * Up front select what scoping to apply on addresses I tell my peer
908 	 * Not sure what to do with these right now, we will need to come up
909 	 * with a way to set them. We may need to pass them through from the
910 	 * caller in the sctp_aloc_assoc() function.
911 	 */
912 	int i;
913 
914 	asoc = &stcb->asoc;
915 	/* init all variables to a known value. */
916 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
917 	asoc->max_burst = m->sctp_ep.max_burst;
918 	asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
919 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
920 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
921 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
922 	asoc->ecn_allowed = m->sctp_ecn_enable;
923 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
924 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
925 	asoc->sctp_frag_point = m->sctp_frag_point;
926 #ifdef INET
927 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
928 #else
929 	asoc->default_tos = 0;
930 #endif
931 
932 #ifdef INET6
933 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
934 #else
935 	asoc->default_flowlabel = 0;
936 #endif
937 	asoc->sb_send_resv = 0;
938 	if (override_tag) {
939 		asoc->my_vtag = override_tag;
940 	} else {
941 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
942 	}
943 	/* Get the nonce tags */
944 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
945 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
946 	asoc->vrf_id = vrf_id;
947 
948 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
949 		asoc->hb_is_disabled = 1;
950 	else
951 		asoc->hb_is_disabled = 0;
952 
953 #ifdef SCTP_ASOCLOG_OF_TSNS
954 	asoc->tsn_in_at = 0;
955 	asoc->tsn_out_at = 0;
956 	asoc->tsn_in_wrapped = 0;
957 	asoc->tsn_out_wrapped = 0;
958 	asoc->cumack_log_at = 0;
959 	asoc->cumack_log_atsnt = 0;
960 #endif
961 #ifdef SCTP_FS_SPEC_LOG
962 	asoc->fs_index = 0;
963 #endif
964 	asoc->refcnt = 0;
965 	asoc->assoc_up_sent = 0;
966 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
967 	    sctp_select_initial_TSN(&m->sctp_ep);
968 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
969 	/* we are optimisitic here */
970 	asoc->peer_supports_pktdrop = 1;
971 	asoc->peer_supports_nat = 0;
972 	asoc->sent_queue_retran_cnt = 0;
973 
974 	/* for CMT */
975 	asoc->last_net_cmt_send_started = NULL;
976 
977 	/* This will need to be adjusted */
978 	asoc->last_acked_seq = asoc->init_seq_number - 1;
979 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
980 	asoc->asconf_seq_in = asoc->last_acked_seq;
981 
982 	/* here we are different, we hold the next one we expect */
983 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
984 
985 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
986 	asoc->initial_rto = m->sctp_ep.initial_rto;
987 
988 	asoc->max_init_times = m->sctp_ep.max_init_times;
989 	asoc->max_send_times = m->sctp_ep.max_send_times;
990 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
991 	asoc->free_chunk_cnt = 0;
992 
993 	asoc->iam_blocking = 0;
994 
995 	asoc->context = m->sctp_context;
996 	asoc->def_send = m->def_send;
997 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
998 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
999 	asoc->pr_sctp_cnt = 0;
1000 	asoc->total_output_queue_size = 0;
1001 
1002 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1003 		struct in6pcb *inp6;
1004 
1005 		/* Its a V6 socket */
1006 		inp6 = (struct in6pcb *)m;
1007 		asoc->ipv6_addr_legal = 1;
1008 		/* Now look at the binding flag to see if V4 will be legal */
1009 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1010 			asoc->ipv4_addr_legal = 1;
1011 		} else {
1012 			/* V4 addresses are NOT legal on the association */
1013 			asoc->ipv4_addr_legal = 0;
1014 		}
1015 	} else {
1016 		/* Its a V4 socket, no - V6 */
1017 		asoc->ipv4_addr_legal = 1;
1018 		asoc->ipv6_addr_legal = 0;
1019 	}
1020 
1021 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1022 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1023 
1024 	asoc->smallest_mtu = m->sctp_frag_point;
1025 	asoc->minrto = m->sctp_ep.sctp_minrto;
1026 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1027 
1028 	asoc->locked_on_sending = NULL;
1029 	asoc->stream_locked_on = 0;
1030 	asoc->ecn_echo_cnt_onq = 0;
1031 	asoc->stream_locked = 0;
1032 
1033 	asoc->send_sack = 1;
1034 
1035 	LIST_INIT(&asoc->sctp_restricted_addrs);
1036 
1037 	TAILQ_INIT(&asoc->nets);
1038 	TAILQ_INIT(&asoc->pending_reply_queue);
1039 	TAILQ_INIT(&asoc->asconf_ack_sent);
1040 	/* Setup to fill the hb random cache at first HB */
1041 	asoc->hb_random_idx = 4;
1042 
1043 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1044 
1045 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1046 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1047 
1048 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1049 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1050 
1051 	/*
1052 	 * Now the stream parameters, here we allocate space for all streams
1053 	 * that we request by default.
1054 	 */
1055 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1056 	    m->sctp_ep.pre_open_stream_count;
1057 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1058 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1059 	    SCTP_M_STRMO);
1060 	if (asoc->strmout == NULL) {
1061 		/* big trouble no memory */
1062 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1063 		return (ENOMEM);
1064 	}
1065 	for (i = 0; i < asoc->streamoutcnt; i++) {
1066 		/*
1067 		 * inbound side must be set to 0xffff, also NOTE when we get
1068 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1069 		 * count (streamoutcnt) but first check if we sent to any of
1070 		 * the upper streams that were dropped (if some were). Those
1071 		 * that were dropped must be notified to the upper layer as
1072 		 * failed to send.
1073 		 */
1074 		asoc->strmout[i].next_sequence_sent = 0x0;
1075 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1076 		asoc->strmout[i].stream_no = i;
1077 		asoc->strmout[i].last_msg_incomplete = 0;
1078 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1079 	}
1080 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1081 
1082 	/* Now the mapping array */
1083 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1084 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1085 	    SCTP_M_MAP);
1086 	if (asoc->mapping_array == NULL) {
1087 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1088 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1089 		return (ENOMEM);
1090 	}
1091 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1092 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1093 	    SCTP_M_MAP);
1094 	if (asoc->nr_mapping_array == NULL) {
1095 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1096 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1097 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1098 		return (ENOMEM);
1099 	}
1100 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1101 
1102 	/* Now the init of the other outqueues */
1103 	TAILQ_INIT(&asoc->free_chunks);
1104 	TAILQ_INIT(&asoc->control_send_queue);
1105 	TAILQ_INIT(&asoc->asconf_send_queue);
1106 	TAILQ_INIT(&asoc->send_queue);
1107 	TAILQ_INIT(&asoc->sent_queue);
1108 	TAILQ_INIT(&asoc->reasmqueue);
1109 	TAILQ_INIT(&asoc->resetHead);
1110 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1111 	TAILQ_INIT(&asoc->asconf_queue);
1112 	/* authentication fields */
1113 	asoc->authinfo.random = NULL;
1114 	asoc->authinfo.active_keyid = 0;
1115 	asoc->authinfo.assoc_key = NULL;
1116 	asoc->authinfo.assoc_keyid = 0;
1117 	asoc->authinfo.recv_key = NULL;
1118 	asoc->authinfo.recv_keyid = 0;
1119 	LIST_INIT(&asoc->shared_keys);
1120 	asoc->marked_retrans = 0;
1121 	asoc->timoinit = 0;
1122 	asoc->timodata = 0;
1123 	asoc->timosack = 0;
1124 	asoc->timoshutdown = 0;
1125 	asoc->timoheartbeat = 0;
1126 	asoc->timocookie = 0;
1127 	asoc->timoshutdownack = 0;
1128 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1129 	asoc->discontinuity_time = asoc->start_time;
1130 	/*
1131 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1132 	 * freed later when the association is freed.
1133 	 */
1134 	return (0);
1135 }
1136 
1137 void
1138 sctp_print_mapping_array(struct sctp_association *asoc)
1139 {
1140 	unsigned int i, limit;
1141 
1142 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1143 	    asoc->mapping_array_size,
1144 	    asoc->mapping_array_base_tsn,
1145 	    asoc->cumulative_tsn,
1146 	    asoc->highest_tsn_inside_map,
1147 	    asoc->highest_tsn_inside_nr_map);
1148 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1149 		if (asoc->mapping_array[limit - 1]) {
1150 			break;
1151 		}
1152 	}
1153 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1154 	for (i = 0; i < limit; i++) {
1155 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1156 	}
1157 	if (limit % 16)
1158 		printf("\n");
1159 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1160 		if (asoc->nr_mapping_array[limit - 1]) {
1161 			break;
1162 		}
1163 	}
1164 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1165 	for (i = 0; i < limit; i++) {
1166 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1167 	}
1168 	if (limit % 16)
1169 		printf("\n");
1170 }
1171 
1172 int
1173 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1174 {
1175 	/* mapping array needs to grow */
1176 	uint8_t *new_array1, *new_array2;
1177 	uint32_t new_size;
1178 
1179 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1180 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1181 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1182 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1183 		/* can't get more, forget it */
1184 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1185 		if (new_array1) {
1186 			SCTP_FREE(new_array1, SCTP_M_MAP);
1187 		}
1188 		if (new_array2) {
1189 			SCTP_FREE(new_array2, SCTP_M_MAP);
1190 		}
1191 		return (-1);
1192 	}
1193 	memset(new_array1, 0, new_size);
1194 	memset(new_array2, 0, new_size);
1195 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1196 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1197 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1198 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1199 	asoc->mapping_array = new_array1;
1200 	asoc->nr_mapping_array = new_array2;
1201 	asoc->mapping_array_size = new_size;
1202 	return (0);
1203 }
1204 
1205 
1206 static void
1207 sctp_iterator_work(struct sctp_iterator *it)
1208 {
1209 	int iteration_count = 0;
1210 	int inp_skip = 0;
1211 	int first_in = 1;
1212 	struct sctp_inpcb *tinp;
1213 
1214 	SCTP_INP_INFO_RLOCK();
1215 	SCTP_ITERATOR_LOCK();
1216 	if (it->inp) {
1217 		SCTP_INP_RLOCK(it->inp);
1218 		SCTP_INP_DECR_REF(it->inp);
1219 	}
1220 	if (it->inp == NULL) {
1221 		/* iterator is complete */
1222 done_with_iterator:
1223 		SCTP_ITERATOR_UNLOCK();
1224 		SCTP_INP_INFO_RUNLOCK();
1225 		if (it->function_atend != NULL) {
1226 			(*it->function_atend) (it->pointer, it->val);
1227 		}
1228 		SCTP_FREE(it, SCTP_M_ITER);
1229 		return;
1230 	}
1231 select_a_new_ep:
1232 	if (first_in) {
1233 		first_in = 0;
1234 	} else {
1235 		SCTP_INP_RLOCK(it->inp);
1236 	}
1237 	while (((it->pcb_flags) &&
1238 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1239 	    ((it->pcb_features) &&
1240 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1241 		/* endpoint flags or features don't match, so keep looking */
1242 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1243 			SCTP_INP_RUNLOCK(it->inp);
1244 			goto done_with_iterator;
1245 		}
1246 		tinp = it->inp;
1247 		it->inp = LIST_NEXT(it->inp, sctp_list);
1248 		SCTP_INP_RUNLOCK(tinp);
1249 		if (it->inp == NULL) {
1250 			goto done_with_iterator;
1251 		}
1252 		SCTP_INP_RLOCK(it->inp);
1253 	}
1254 	/* now go through each assoc which is in the desired state */
1255 	if (it->done_current_ep == 0) {
1256 		if (it->function_inp != NULL)
1257 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1258 		it->done_current_ep = 1;
1259 	}
1260 	if (it->stcb == NULL) {
1261 		/* run the per instance function */
1262 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1263 	}
1264 	if ((inp_skip) || it->stcb == NULL) {
1265 		if (it->function_inp_end != NULL) {
1266 			inp_skip = (*it->function_inp_end) (it->inp,
1267 			    it->pointer,
1268 			    it->val);
1269 		}
1270 		SCTP_INP_RUNLOCK(it->inp);
1271 		goto no_stcb;
1272 	}
1273 	while (it->stcb) {
1274 		SCTP_TCB_LOCK(it->stcb);
1275 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1276 			/* not in the right state... keep looking */
1277 			SCTP_TCB_UNLOCK(it->stcb);
1278 			goto next_assoc;
1279 		}
1280 		/* see if we have limited out the iterator loop */
1281 		iteration_count++;
1282 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1283 			/* Pause to let others grab the lock */
1284 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1285 			SCTP_TCB_UNLOCK(it->stcb);
1286 			SCTP_INP_INCR_REF(it->inp);
1287 			SCTP_INP_RUNLOCK(it->inp);
1288 			SCTP_ITERATOR_UNLOCK();
1289 			SCTP_INP_INFO_RUNLOCK();
1290 			SCTP_INP_INFO_RLOCK();
1291 			SCTP_ITERATOR_LOCK();
1292 			if (sctp_it_ctl.iterator_flags) {
1293 				/* We won't be staying here */
1294 				SCTP_INP_DECR_REF(it->inp);
1295 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1296 				if (sctp_it_ctl.iterator_flags &
1297 				    SCTP_ITERATOR_MUST_EXIT) {
1298 					goto done_with_iterator;
1299 				}
1300 				if (sctp_it_ctl.iterator_flags &
1301 				    SCTP_ITERATOR_STOP_CUR_IT) {
1302 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1303 					goto done_with_iterator;
1304 				}
1305 				if (sctp_it_ctl.iterator_flags &
1306 				    SCTP_ITERATOR_STOP_CUR_INP) {
1307 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1308 					goto no_stcb;
1309 				}
1310 				/* If we reach here huh? */
1311 				printf("Unknown it ctl flag %x\n",
1312 				    sctp_it_ctl.iterator_flags);
1313 				sctp_it_ctl.iterator_flags = 0;
1314 			}
1315 			SCTP_INP_RLOCK(it->inp);
1316 			SCTP_INP_DECR_REF(it->inp);
1317 			SCTP_TCB_LOCK(it->stcb);
1318 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1319 			iteration_count = 0;
1320 		}
1321 		/* run function on this one */
1322 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1323 
1324 		/*
1325 		 * we lie here, it really needs to have its own type but
1326 		 * first I must verify that this won't effect things :-0
1327 		 */
1328 		if (it->no_chunk_output == 0)
1329 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1330 
1331 		SCTP_TCB_UNLOCK(it->stcb);
1332 next_assoc:
1333 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1334 		if (it->stcb == NULL) {
1335 			/* Run last function */
1336 			if (it->function_inp_end != NULL) {
1337 				inp_skip = (*it->function_inp_end) (it->inp,
1338 				    it->pointer,
1339 				    it->val);
1340 			}
1341 		}
1342 	}
1343 	SCTP_INP_RUNLOCK(it->inp);
1344 no_stcb:
1345 	/* done with all assocs on this endpoint, move on to next endpoint */
1346 	it->done_current_ep = 0;
1347 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1348 		it->inp = NULL;
1349 	} else {
1350 		it->inp = LIST_NEXT(it->inp, sctp_list);
1351 	}
1352 	if (it->inp == NULL) {
1353 		goto done_with_iterator;
1354 	}
1355 	goto select_a_new_ep;
1356 }
1357 
1358 void
1359 sctp_iterator_worker(void)
1360 {
1361 	struct sctp_iterator *it, *nit;
1362 
1363 	/* This function is called with the WQ lock in place */
1364 
1365 	sctp_it_ctl.iterator_running = 1;
1366 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1367 		sctp_it_ctl.cur_it = it;
1368 		/* now lets work on this one */
1369 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1370 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1371 		CURVNET_SET(it->vn);
1372 		sctp_iterator_work(it);
1373 		sctp_it_ctl.cur_it = NULL;
1374 		CURVNET_RESTORE();
1375 		SCTP_IPI_ITERATOR_WQ_LOCK();
1376 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1377 			break;
1378 		}
1379 		/* sa_ignore FREED_MEMORY */
1380 	}
1381 	sctp_it_ctl.iterator_running = 0;
1382 	return;
1383 }
1384 
1385 
1386 static void
1387 sctp_handle_addr_wq(void)
1388 {
1389 	/* deal with the ADDR wq from the rtsock calls */
1390 	struct sctp_laddr *wi, *nwi;
1391 	struct sctp_asconf_iterator *asc;
1392 
1393 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1394 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1395 	if (asc == NULL) {
1396 		/* Try later, no memory */
1397 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1398 		    (struct sctp_inpcb *)NULL,
1399 		    (struct sctp_tcb *)NULL,
1400 		    (struct sctp_nets *)NULL);
1401 		return;
1402 	}
1403 	LIST_INIT(&asc->list_of_work);
1404 	asc->cnt = 0;
1405 
1406 	SCTP_WQ_ADDR_LOCK();
1407 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1408 		LIST_REMOVE(wi, sctp_nxt_addr);
1409 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1410 		asc->cnt++;
1411 	}
1412 	SCTP_WQ_ADDR_UNLOCK();
1413 
1414 	if (asc->cnt == 0) {
1415 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1416 	} else {
1417 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1418 		    sctp_asconf_iterator_stcb,
1419 		    NULL,	/* No ep end for boundall */
1420 		    SCTP_PCB_FLAGS_BOUNDALL,
1421 		    SCTP_PCB_ANY_FEATURES,
1422 		    SCTP_ASOC_ANY_STATE,
1423 		    (void *)asc, 0,
1424 		    sctp_asconf_iterator_end, NULL, 0);
1425 	}
1426 }
1427 
1428 int retcode = 0;
1429 int cur_oerr = 0;
1430 
1431 void
1432 sctp_timeout_handler(void *t)
1433 {
1434 	struct sctp_inpcb *inp;
1435 	struct sctp_tcb *stcb;
1436 	struct sctp_nets *net;
1437 	struct sctp_timer *tmr;
1438 
1439 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1440 	struct socket *so;
1441 
1442 #endif
1443 	int did_output, type;
1444 
1445 	tmr = (struct sctp_timer *)t;
1446 	inp = (struct sctp_inpcb *)tmr->ep;
1447 	stcb = (struct sctp_tcb *)tmr->tcb;
1448 	net = (struct sctp_nets *)tmr->net;
1449 	CURVNET_SET((struct vnet *)tmr->vnet);
1450 	did_output = 1;
1451 
1452 #ifdef SCTP_AUDITING_ENABLED
1453 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1454 	sctp_auditing(3, inp, stcb, net);
1455 #endif
1456 
1457 	/* sanity checks... */
1458 	if (tmr->self != (void *)tmr) {
1459 		/*
1460 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1461 		 * tmr);
1462 		 */
1463 		CURVNET_RESTORE();
1464 		return;
1465 	}
1466 	tmr->stopped_from = 0xa001;
1467 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1468 		/*
1469 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1470 		 * tmr->type);
1471 		 */
1472 		CURVNET_RESTORE();
1473 		return;
1474 	}
1475 	tmr->stopped_from = 0xa002;
1476 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1477 		CURVNET_RESTORE();
1478 		return;
1479 	}
1480 	/* if this is an iterator timeout, get the struct and clear inp */
1481 	tmr->stopped_from = 0xa003;
1482 	type = tmr->type;
1483 	if (inp) {
1484 		SCTP_INP_INCR_REF(inp);
1485 		if ((inp->sctp_socket == 0) &&
1486 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1487 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1488 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1489 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1490 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1491 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1492 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1493 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1494 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1495 		    ) {
1496 			SCTP_INP_DECR_REF(inp);
1497 			CURVNET_RESTORE();
1498 			return;
1499 		}
1500 	}
1501 	tmr->stopped_from = 0xa004;
1502 	if (stcb) {
1503 		atomic_add_int(&stcb->asoc.refcnt, 1);
1504 		if (stcb->asoc.state == 0) {
1505 			atomic_add_int(&stcb->asoc.refcnt, -1);
1506 			if (inp) {
1507 				SCTP_INP_DECR_REF(inp);
1508 			}
1509 			CURVNET_RESTORE();
1510 			return;
1511 		}
1512 	}
1513 	tmr->stopped_from = 0xa005;
1514 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1515 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1516 		if (inp) {
1517 			SCTP_INP_DECR_REF(inp);
1518 		}
1519 		if (stcb) {
1520 			atomic_add_int(&stcb->asoc.refcnt, -1);
1521 		}
1522 		CURVNET_RESTORE();
1523 		return;
1524 	}
1525 	tmr->stopped_from = 0xa006;
1526 
1527 	if (stcb) {
1528 		SCTP_TCB_LOCK(stcb);
1529 		atomic_add_int(&stcb->asoc.refcnt, -1);
1530 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1531 		    ((stcb->asoc.state == 0) ||
1532 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1533 			SCTP_TCB_UNLOCK(stcb);
1534 			if (inp) {
1535 				SCTP_INP_DECR_REF(inp);
1536 			}
1537 			CURVNET_RESTORE();
1538 			return;
1539 		}
1540 	}
1541 	/* record in stopped what t-o occured */
1542 	tmr->stopped_from = tmr->type;
1543 
1544 	/* mark as being serviced now */
1545 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1546 		/*
1547 		 * Callout has been rescheduled.
1548 		 */
1549 		goto get_out;
1550 	}
1551 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1552 		/*
1553 		 * Not active, so no action.
1554 		 */
1555 		goto get_out;
1556 	}
1557 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1558 
1559 	/* call the handler for the appropriate timer type */
1560 	switch (tmr->type) {
1561 	case SCTP_TIMER_TYPE_ZERO_COPY:
1562 		if (inp == NULL) {
1563 			break;
1564 		}
1565 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1566 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1567 		}
1568 		break;
1569 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1570 		if (inp == NULL) {
1571 			break;
1572 		}
1573 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1574 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1575 		}
1576 		break;
1577 	case SCTP_TIMER_TYPE_ADDR_WQ:
1578 		sctp_handle_addr_wq();
1579 		break;
1580 	case SCTP_TIMER_TYPE_SEND:
1581 		if ((stcb == NULL) || (inp == NULL)) {
1582 			break;
1583 		}
1584 		SCTP_STAT_INCR(sctps_timodata);
1585 		stcb->asoc.timodata++;
1586 		stcb->asoc.num_send_timers_up--;
1587 		if (stcb->asoc.num_send_timers_up < 0) {
1588 			stcb->asoc.num_send_timers_up = 0;
1589 		}
1590 		SCTP_TCB_LOCK_ASSERT(stcb);
1591 		cur_oerr = stcb->asoc.overall_error_count;
1592 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1593 		if (retcode) {
1594 			/* no need to unlock on tcb its gone */
1595 
1596 			goto out_decr;
1597 		}
1598 		SCTP_TCB_LOCK_ASSERT(stcb);
1599 #ifdef SCTP_AUDITING_ENABLED
1600 		sctp_auditing(4, inp, stcb, net);
1601 #endif
1602 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1603 		if ((stcb->asoc.num_send_timers_up == 0) &&
1604 		    (stcb->asoc.sent_queue_cnt > 0)) {
1605 			struct sctp_tmit_chunk *chk;
1606 
1607 			/*
1608 			 * safeguard. If there on some on the sent queue
1609 			 * somewhere but no timers running something is
1610 			 * wrong... so we start a timer on the first chunk
1611 			 * on the send queue on whatever net it is sent to.
1612 			 */
1613 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1614 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1615 			    chk->whoTo);
1616 		}
1617 		break;
1618 	case SCTP_TIMER_TYPE_INIT:
1619 		if ((stcb == NULL) || (inp == NULL)) {
1620 			break;
1621 		}
1622 		SCTP_STAT_INCR(sctps_timoinit);
1623 		stcb->asoc.timoinit++;
1624 		if (sctp_t1init_timer(inp, stcb, net)) {
1625 			/* no need to unlock on tcb its gone */
1626 			goto out_decr;
1627 		}
1628 		/* We do output but not here */
1629 		did_output = 0;
1630 		break;
1631 	case SCTP_TIMER_TYPE_RECV:
1632 		if ((stcb == NULL) || (inp == NULL)) {
1633 			break;
1634 		} {
1635 			SCTP_STAT_INCR(sctps_timosack);
1636 			stcb->asoc.timosack++;
1637 			sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1638 		}
1639 #ifdef SCTP_AUDITING_ENABLED
1640 		sctp_auditing(4, inp, stcb, net);
1641 #endif
1642 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1643 		break;
1644 	case SCTP_TIMER_TYPE_SHUTDOWN:
1645 		if ((stcb == NULL) || (inp == NULL)) {
1646 			break;
1647 		}
1648 		if (sctp_shutdown_timer(inp, stcb, net)) {
1649 			/* no need to unlock on tcb its gone */
1650 			goto out_decr;
1651 		}
1652 		SCTP_STAT_INCR(sctps_timoshutdown);
1653 		stcb->asoc.timoshutdown++;
1654 #ifdef SCTP_AUDITING_ENABLED
1655 		sctp_auditing(4, inp, stcb, net);
1656 #endif
1657 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1658 		break;
1659 	case SCTP_TIMER_TYPE_HEARTBEAT:
1660 		{
1661 			struct sctp_nets *lnet;
1662 			int cnt_of_unconf = 0;
1663 
1664 			if ((stcb == NULL) || (inp == NULL)) {
1665 				break;
1666 			}
1667 			SCTP_STAT_INCR(sctps_timoheartbeat);
1668 			stcb->asoc.timoheartbeat++;
1669 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1670 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1671 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1672 					cnt_of_unconf++;
1673 				}
1674 			}
1675 			if (cnt_of_unconf == 0) {
1676 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1677 				    cnt_of_unconf)) {
1678 					/* no need to unlock on tcb its gone */
1679 					goto out_decr;
1680 				}
1681 			}
1682 #ifdef SCTP_AUDITING_ENABLED
1683 			sctp_auditing(4, inp, stcb, lnet);
1684 #endif
1685 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1686 			    stcb->sctp_ep, stcb, lnet);
1687 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1688 		}
1689 		break;
1690 	case SCTP_TIMER_TYPE_COOKIE:
1691 		if ((stcb == NULL) || (inp == NULL)) {
1692 			break;
1693 		}
1694 		if (sctp_cookie_timer(inp, stcb, net)) {
1695 			/* no need to unlock on tcb its gone */
1696 			goto out_decr;
1697 		}
1698 		SCTP_STAT_INCR(sctps_timocookie);
1699 		stcb->asoc.timocookie++;
1700 #ifdef SCTP_AUDITING_ENABLED
1701 		sctp_auditing(4, inp, stcb, net);
1702 #endif
1703 		/*
1704 		 * We consider T3 and Cookie timer pretty much the same with
1705 		 * respect to where from in chunk_output.
1706 		 */
1707 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1708 		break;
1709 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1710 		{
1711 			struct timeval tv;
1712 			int i, secret;
1713 
1714 			if (inp == NULL) {
1715 				break;
1716 			}
1717 			SCTP_STAT_INCR(sctps_timosecret);
1718 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1719 			SCTP_INP_WLOCK(inp);
1720 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1721 			inp->sctp_ep.last_secret_number =
1722 			    inp->sctp_ep.current_secret_number;
1723 			inp->sctp_ep.current_secret_number++;
1724 			if (inp->sctp_ep.current_secret_number >=
1725 			    SCTP_HOW_MANY_SECRETS) {
1726 				inp->sctp_ep.current_secret_number = 0;
1727 			}
1728 			secret = (int)inp->sctp_ep.current_secret_number;
1729 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1730 				inp->sctp_ep.secret_key[secret][i] =
1731 				    sctp_select_initial_TSN(&inp->sctp_ep);
1732 			}
1733 			SCTP_INP_WUNLOCK(inp);
1734 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1735 		}
1736 		did_output = 0;
1737 		break;
1738 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1739 		if ((stcb == NULL) || (inp == NULL)) {
1740 			break;
1741 		}
1742 		SCTP_STAT_INCR(sctps_timopathmtu);
1743 		sctp_pathmtu_timer(inp, stcb, net);
1744 		did_output = 0;
1745 		break;
1746 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1747 		if ((stcb == NULL) || (inp == NULL)) {
1748 			break;
1749 		}
1750 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1751 			/* no need to unlock on tcb its gone */
1752 			goto out_decr;
1753 		}
1754 		SCTP_STAT_INCR(sctps_timoshutdownack);
1755 		stcb->asoc.timoshutdownack++;
1756 #ifdef SCTP_AUDITING_ENABLED
1757 		sctp_auditing(4, inp, stcb, net);
1758 #endif
1759 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1760 		break;
1761 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1762 		if ((stcb == NULL) || (inp == NULL)) {
1763 			break;
1764 		}
1765 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1766 		sctp_abort_an_association(inp, stcb,
1767 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1768 		/* no need to unlock on tcb its gone */
1769 		goto out_decr;
1770 
1771 	case SCTP_TIMER_TYPE_STRRESET:
1772 		if ((stcb == NULL) || (inp == NULL)) {
1773 			break;
1774 		}
1775 		if (sctp_strreset_timer(inp, stcb, net)) {
1776 			/* no need to unlock on tcb its gone */
1777 			goto out_decr;
1778 		}
1779 		SCTP_STAT_INCR(sctps_timostrmrst);
1780 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1781 		break;
1782 	case SCTP_TIMER_TYPE_EARLYFR:
1783 		/* Need to do FR of things for net */
1784 		if ((stcb == NULL) || (inp == NULL)) {
1785 			break;
1786 		}
1787 		SCTP_STAT_INCR(sctps_timoearlyfr);
1788 		sctp_early_fr_timer(inp, stcb, net);
1789 		break;
1790 	case SCTP_TIMER_TYPE_ASCONF:
1791 		if ((stcb == NULL) || (inp == NULL)) {
1792 			break;
1793 		}
1794 		if (sctp_asconf_timer(inp, stcb, net)) {
1795 			/* no need to unlock on tcb its gone */
1796 			goto out_decr;
1797 		}
1798 		SCTP_STAT_INCR(sctps_timoasconf);
1799 #ifdef SCTP_AUDITING_ENABLED
1800 		sctp_auditing(4, inp, stcb, net);
1801 #endif
1802 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1803 		break;
1804 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1805 		if ((stcb == NULL) || (inp == NULL)) {
1806 			break;
1807 		}
1808 		sctp_delete_prim_timer(inp, stcb, net);
1809 		SCTP_STAT_INCR(sctps_timodelprim);
1810 		break;
1811 
1812 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1813 		if ((stcb == NULL) || (inp == NULL)) {
1814 			break;
1815 		}
1816 		SCTP_STAT_INCR(sctps_timoautoclose);
1817 		sctp_autoclose_timer(inp, stcb, net);
1818 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1819 		did_output = 0;
1820 		break;
1821 	case SCTP_TIMER_TYPE_ASOCKILL:
1822 		if ((stcb == NULL) || (inp == NULL)) {
1823 			break;
1824 		}
1825 		SCTP_STAT_INCR(sctps_timoassockill);
1826 		/* Can we free it yet? */
1827 		SCTP_INP_DECR_REF(inp);
1828 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1829 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1830 		so = SCTP_INP_SO(inp);
1831 		atomic_add_int(&stcb->asoc.refcnt, 1);
1832 		SCTP_TCB_UNLOCK(stcb);
1833 		SCTP_SOCKET_LOCK(so, 1);
1834 		SCTP_TCB_LOCK(stcb);
1835 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1836 #endif
1837 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1838 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1839 		SCTP_SOCKET_UNLOCK(so, 1);
1840 #endif
1841 		/*
1842 		 * free asoc, always unlocks (or destroy's) so prevent
1843 		 * duplicate unlock or unlock of a free mtx :-0
1844 		 */
1845 		stcb = NULL;
1846 		goto out_no_decr;
1847 	case SCTP_TIMER_TYPE_INPKILL:
1848 		SCTP_STAT_INCR(sctps_timoinpkill);
1849 		if (inp == NULL) {
1850 			break;
1851 		}
1852 		/*
1853 		 * special case, take away our increment since WE are the
1854 		 * killer
1855 		 */
1856 		SCTP_INP_DECR_REF(inp);
1857 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1858 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1859 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1860 		inp = NULL;
1861 		goto out_no_decr;
1862 	default:
1863 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1864 		    tmr->type);
1865 		break;
1866 	};
1867 #ifdef SCTP_AUDITING_ENABLED
1868 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1869 	if (inp)
1870 		sctp_auditing(5, inp, stcb, net);
1871 #endif
1872 	if ((did_output) && stcb) {
1873 		/*
1874 		 * Now we need to clean up the control chunk chain if an
1875 		 * ECNE is on it. It must be marked as UNSENT again so next
1876 		 * call will continue to send it until such time that we get
1877 		 * a CWR, to remove it. It is, however, less likely that we
1878 		 * will find a ecn echo on the chain though.
1879 		 */
1880 		sctp_fix_ecn_echo(&stcb->asoc);
1881 	}
1882 get_out:
1883 	if (stcb) {
1884 		SCTP_TCB_UNLOCK(stcb);
1885 	}
1886 out_decr:
1887 	if (inp) {
1888 		SCTP_INP_DECR_REF(inp);
1889 	}
1890 out_no_decr:
1891 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1892 	    type);
1893 	CURVNET_RESTORE();
1894 }
1895 
1896 void
1897 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1898     struct sctp_nets *net)
1899 {
1900 	int to_ticks;
1901 	struct sctp_timer *tmr;
1902 
1903 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1904 		return;
1905 
1906 	to_ticks = 0;
1907 
1908 	tmr = NULL;
1909 	if (stcb) {
1910 		SCTP_TCB_LOCK_ASSERT(stcb);
1911 	}
1912 	switch (t_type) {
1913 	case SCTP_TIMER_TYPE_ZERO_COPY:
1914 		tmr = &inp->sctp_ep.zero_copy_timer;
1915 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1916 		break;
1917 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1918 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1919 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1920 		break;
1921 	case SCTP_TIMER_TYPE_ADDR_WQ:
1922 		/* Only 1 tick away :-) */
1923 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1924 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1925 		break;
1926 	case SCTP_TIMER_TYPE_SEND:
1927 		/* Here we use the RTO timer */
1928 		{
1929 			int rto_val;
1930 
1931 			if ((stcb == NULL) || (net == NULL)) {
1932 				return;
1933 			}
1934 			tmr = &net->rxt_timer;
1935 			if (net->RTO == 0) {
1936 				rto_val = stcb->asoc.initial_rto;
1937 			} else {
1938 				rto_val = net->RTO;
1939 			}
1940 			to_ticks = MSEC_TO_TICKS(rto_val);
1941 		}
1942 		break;
1943 	case SCTP_TIMER_TYPE_INIT:
1944 		/*
1945 		 * Here we use the INIT timer default usually about 1
1946 		 * minute.
1947 		 */
1948 		if ((stcb == NULL) || (net == NULL)) {
1949 			return;
1950 		}
1951 		tmr = &net->rxt_timer;
1952 		if (net->RTO == 0) {
1953 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1954 		} else {
1955 			to_ticks = MSEC_TO_TICKS(net->RTO);
1956 		}
1957 		break;
1958 	case SCTP_TIMER_TYPE_RECV:
1959 		/*
1960 		 * Here we use the Delayed-Ack timer value from the inp
1961 		 * ususually about 200ms.
1962 		 */
1963 		if (stcb == NULL) {
1964 			return;
1965 		}
1966 		tmr = &stcb->asoc.dack_timer;
1967 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1968 		break;
1969 	case SCTP_TIMER_TYPE_SHUTDOWN:
1970 		/* Here we use the RTO of the destination. */
1971 		if ((stcb == NULL) || (net == NULL)) {
1972 			return;
1973 		}
1974 		if (net->RTO == 0) {
1975 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1976 		} else {
1977 			to_ticks = MSEC_TO_TICKS(net->RTO);
1978 		}
1979 		tmr = &net->rxt_timer;
1980 		break;
1981 	case SCTP_TIMER_TYPE_HEARTBEAT:
1982 		/*
1983 		 * the net is used here so that we can add in the RTO. Even
1984 		 * though we use a different timer. We also add the HB timer
1985 		 * PLUS a random jitter.
1986 		 */
1987 		if ((inp == NULL) || (stcb == NULL)) {
1988 			return;
1989 		} else {
1990 			uint32_t rndval;
1991 			uint8_t this_random;
1992 			int cnt_of_unconf = 0;
1993 			struct sctp_nets *lnet;
1994 
1995 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1996 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1997 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1998 					cnt_of_unconf++;
1999 				}
2000 			}
2001 			if (cnt_of_unconf) {
2002 				net = lnet = NULL;
2003 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2004 			}
2005 			if (stcb->asoc.hb_random_idx > 3) {
2006 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2007 				memcpy(stcb->asoc.hb_random_values, &rndval,
2008 				    sizeof(stcb->asoc.hb_random_values));
2009 				stcb->asoc.hb_random_idx = 0;
2010 			}
2011 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2012 			stcb->asoc.hb_random_idx++;
2013 			stcb->asoc.hb_ect_randombit = 0;
2014 			/*
2015 			 * this_random will be 0 - 256 ms RTO is in ms.
2016 			 */
2017 			if ((stcb->asoc.hb_is_disabled) &&
2018 			    (cnt_of_unconf == 0)) {
2019 				return;
2020 			}
2021 			if (net) {
2022 				int delay;
2023 
2024 				delay = stcb->asoc.heart_beat_delay;
2025 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2026 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2027 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2028 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2029 						delay = 0;
2030 					}
2031 				}
2032 				if (net->RTO == 0) {
2033 					/* Never been checked */
2034 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2035 				} else {
2036 					/* set rto_val to the ms */
2037 					to_ticks = delay + net->RTO + this_random;
2038 				}
2039 			} else {
2040 				if (cnt_of_unconf) {
2041 					to_ticks = this_random + stcb->asoc.initial_rto;
2042 				} else {
2043 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2044 				}
2045 			}
2046 			/*
2047 			 * Now we must convert the to_ticks that are now in
2048 			 * ms to ticks.
2049 			 */
2050 			to_ticks = MSEC_TO_TICKS(to_ticks);
2051 			tmr = &stcb->asoc.hb_timer;
2052 		}
2053 		break;
2054 	case SCTP_TIMER_TYPE_COOKIE:
2055 		/*
2056 		 * Here we can use the RTO timer from the network since one
2057 		 * RTT was compelete. If a retran happened then we will be
2058 		 * using the RTO initial value.
2059 		 */
2060 		if ((stcb == NULL) || (net == NULL)) {
2061 			return;
2062 		}
2063 		if (net->RTO == 0) {
2064 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2065 		} else {
2066 			to_ticks = MSEC_TO_TICKS(net->RTO);
2067 		}
2068 		tmr = &net->rxt_timer;
2069 		break;
2070 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2071 		/*
2072 		 * nothing needed but the endpoint here ususually about 60
2073 		 * minutes.
2074 		 */
2075 		if (inp == NULL) {
2076 			return;
2077 		}
2078 		tmr = &inp->sctp_ep.signature_change;
2079 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2080 		break;
2081 	case SCTP_TIMER_TYPE_ASOCKILL:
2082 		if (stcb == NULL) {
2083 			return;
2084 		}
2085 		tmr = &stcb->asoc.strreset_timer;
2086 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2087 		break;
2088 	case SCTP_TIMER_TYPE_INPKILL:
2089 		/*
2090 		 * The inp is setup to die. We re-use the signature_chage
2091 		 * timer since that has stopped and we are in the GONE
2092 		 * state.
2093 		 */
2094 		if (inp == NULL) {
2095 			return;
2096 		}
2097 		tmr = &inp->sctp_ep.signature_change;
2098 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2099 		break;
2100 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2101 		/*
2102 		 * Here we use the value found in the EP for PMTU ususually
2103 		 * about 10 minutes.
2104 		 */
2105 		if ((stcb == NULL) || (inp == NULL)) {
2106 			return;
2107 		}
2108 		if (net == NULL) {
2109 			return;
2110 		}
2111 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2112 		tmr = &net->pmtu_timer;
2113 		break;
2114 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2115 		/* Here we use the RTO of the destination */
2116 		if ((stcb == NULL) || (net == NULL)) {
2117 			return;
2118 		}
2119 		if (net->RTO == 0) {
2120 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2121 		} else {
2122 			to_ticks = MSEC_TO_TICKS(net->RTO);
2123 		}
2124 		tmr = &net->rxt_timer;
2125 		break;
2126 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2127 		/*
2128 		 * Here we use the endpoints shutdown guard timer usually
2129 		 * about 3 minutes.
2130 		 */
2131 		if ((inp == NULL) || (stcb == NULL)) {
2132 			return;
2133 		}
2134 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2135 		tmr = &stcb->asoc.shut_guard_timer;
2136 		break;
2137 	case SCTP_TIMER_TYPE_STRRESET:
2138 		/*
2139 		 * Here the timer comes from the stcb but its value is from
2140 		 * the net's RTO.
2141 		 */
2142 		if ((stcb == NULL) || (net == NULL)) {
2143 			return;
2144 		}
2145 		if (net->RTO == 0) {
2146 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2147 		} else {
2148 			to_ticks = MSEC_TO_TICKS(net->RTO);
2149 		}
2150 		tmr = &stcb->asoc.strreset_timer;
2151 		break;
2152 
2153 	case SCTP_TIMER_TYPE_EARLYFR:
2154 		{
2155 			unsigned int msec;
2156 
2157 			if ((stcb == NULL) || (net == NULL)) {
2158 				return;
2159 			}
2160 			if (net->flight_size > net->cwnd) {
2161 				/* no need to start */
2162 				return;
2163 			}
2164 			SCTP_STAT_INCR(sctps_earlyfrstart);
2165 			if (net->lastsa == 0) {
2166 				/* Hmm no rtt estimate yet? */
2167 				msec = stcb->asoc.initial_rto >> 2;
2168 			} else {
2169 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2170 			}
2171 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2172 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2173 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2174 					msec = SCTP_MINFR_MSEC_FLOOR;
2175 				}
2176 			}
2177 			to_ticks = MSEC_TO_TICKS(msec);
2178 			tmr = &net->fr_timer;
2179 		}
2180 		break;
2181 	case SCTP_TIMER_TYPE_ASCONF:
2182 		/*
2183 		 * Here the timer comes from the stcb but its value is from
2184 		 * the net's RTO.
2185 		 */
2186 		if ((stcb == NULL) || (net == NULL)) {
2187 			return;
2188 		}
2189 		if (net->RTO == 0) {
2190 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2191 		} else {
2192 			to_ticks = MSEC_TO_TICKS(net->RTO);
2193 		}
2194 		tmr = &stcb->asoc.asconf_timer;
2195 		break;
2196 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2197 		if ((stcb == NULL) || (net != NULL)) {
2198 			return;
2199 		}
2200 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2201 		tmr = &stcb->asoc.delete_prim_timer;
2202 		break;
2203 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2204 		if (stcb == NULL) {
2205 			return;
2206 		}
2207 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2208 			/*
2209 			 * Really an error since stcb is NOT set to
2210 			 * autoclose
2211 			 */
2212 			return;
2213 		}
2214 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2215 		tmr = &stcb->asoc.autoclose_timer;
2216 		break;
2217 	default:
2218 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2219 		    __FUNCTION__, t_type);
2220 		return;
2221 		break;
2222 	};
2223 	if ((to_ticks <= 0) || (tmr == NULL)) {
2224 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2225 		    __FUNCTION__, t_type, to_ticks, tmr);
2226 		return;
2227 	}
2228 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2229 		/*
2230 		 * we do NOT allow you to have it already running. if it is
2231 		 * we leave the current one up unchanged
2232 		 */
2233 		return;
2234 	}
2235 	/* At this point we can proceed */
2236 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2237 		stcb->asoc.num_send_timers_up++;
2238 	}
2239 	tmr->stopped_from = 0;
2240 	tmr->type = t_type;
2241 	tmr->ep = (void *)inp;
2242 	tmr->tcb = (void *)stcb;
2243 	tmr->net = (void *)net;
2244 	tmr->self = (void *)tmr;
2245 	tmr->vnet = (void *)curvnet;
2246 	tmr->ticks = sctp_get_tick_count();
2247 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2248 	return;
2249 }
2250 
2251 void
2252 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2253     struct sctp_nets *net, uint32_t from)
2254 {
2255 	struct sctp_timer *tmr;
2256 
2257 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2258 	    (inp == NULL))
2259 		return;
2260 
2261 	tmr = NULL;
2262 	if (stcb) {
2263 		SCTP_TCB_LOCK_ASSERT(stcb);
2264 	}
2265 	switch (t_type) {
2266 	case SCTP_TIMER_TYPE_ZERO_COPY:
2267 		tmr = &inp->sctp_ep.zero_copy_timer;
2268 		break;
2269 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2270 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2271 		break;
2272 	case SCTP_TIMER_TYPE_ADDR_WQ:
2273 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2274 		break;
2275 	case SCTP_TIMER_TYPE_EARLYFR:
2276 		if ((stcb == NULL) || (net == NULL)) {
2277 			return;
2278 		}
2279 		tmr = &net->fr_timer;
2280 		SCTP_STAT_INCR(sctps_earlyfrstop);
2281 		break;
2282 	case SCTP_TIMER_TYPE_SEND:
2283 		if ((stcb == NULL) || (net == NULL)) {
2284 			return;
2285 		}
2286 		tmr = &net->rxt_timer;
2287 		break;
2288 	case SCTP_TIMER_TYPE_INIT:
2289 		if ((stcb == NULL) || (net == NULL)) {
2290 			return;
2291 		}
2292 		tmr = &net->rxt_timer;
2293 		break;
2294 	case SCTP_TIMER_TYPE_RECV:
2295 		if (stcb == NULL) {
2296 			return;
2297 		}
2298 		tmr = &stcb->asoc.dack_timer;
2299 		break;
2300 	case SCTP_TIMER_TYPE_SHUTDOWN:
2301 		if ((stcb == NULL) || (net == NULL)) {
2302 			return;
2303 		}
2304 		tmr = &net->rxt_timer;
2305 		break;
2306 	case SCTP_TIMER_TYPE_HEARTBEAT:
2307 		if (stcb == NULL) {
2308 			return;
2309 		}
2310 		tmr = &stcb->asoc.hb_timer;
2311 		break;
2312 	case SCTP_TIMER_TYPE_COOKIE:
2313 		if ((stcb == NULL) || (net == NULL)) {
2314 			return;
2315 		}
2316 		tmr = &net->rxt_timer;
2317 		break;
2318 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2319 		/* nothing needed but the endpoint here */
2320 		tmr = &inp->sctp_ep.signature_change;
2321 		/*
2322 		 * We re-use the newcookie timer for the INP kill timer. We
2323 		 * must assure that we do not kill it by accident.
2324 		 */
2325 		break;
2326 	case SCTP_TIMER_TYPE_ASOCKILL:
2327 		/*
2328 		 * Stop the asoc kill timer.
2329 		 */
2330 		if (stcb == NULL) {
2331 			return;
2332 		}
2333 		tmr = &stcb->asoc.strreset_timer;
2334 		break;
2335 
2336 	case SCTP_TIMER_TYPE_INPKILL:
2337 		/*
2338 		 * The inp is setup to die. We re-use the signature_chage
2339 		 * timer since that has stopped and we are in the GONE
2340 		 * state.
2341 		 */
2342 		tmr = &inp->sctp_ep.signature_change;
2343 		break;
2344 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2345 		if ((stcb == NULL) || (net == NULL)) {
2346 			return;
2347 		}
2348 		tmr = &net->pmtu_timer;
2349 		break;
2350 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2351 		if ((stcb == NULL) || (net == NULL)) {
2352 			return;
2353 		}
2354 		tmr = &net->rxt_timer;
2355 		break;
2356 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2357 		if (stcb == NULL) {
2358 			return;
2359 		}
2360 		tmr = &stcb->asoc.shut_guard_timer;
2361 		break;
2362 	case SCTP_TIMER_TYPE_STRRESET:
2363 		if (stcb == NULL) {
2364 			return;
2365 		}
2366 		tmr = &stcb->asoc.strreset_timer;
2367 		break;
2368 	case SCTP_TIMER_TYPE_ASCONF:
2369 		if (stcb == NULL) {
2370 			return;
2371 		}
2372 		tmr = &stcb->asoc.asconf_timer;
2373 		break;
2374 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2375 		if (stcb == NULL) {
2376 			return;
2377 		}
2378 		tmr = &stcb->asoc.delete_prim_timer;
2379 		break;
2380 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2381 		if (stcb == NULL) {
2382 			return;
2383 		}
2384 		tmr = &stcb->asoc.autoclose_timer;
2385 		break;
2386 	default:
2387 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2388 		    __FUNCTION__, t_type);
2389 		break;
2390 	};
2391 	if (tmr == NULL) {
2392 		return;
2393 	}
2394 	if ((tmr->type != t_type) && tmr->type) {
2395 		/*
2396 		 * Ok we have a timer that is under joint use. Cookie timer
2397 		 * per chance with the SEND timer. We therefore are NOT
2398 		 * running the timer that the caller wants stopped.  So just
2399 		 * return.
2400 		 */
2401 		return;
2402 	}
2403 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2404 		stcb->asoc.num_send_timers_up--;
2405 		if (stcb->asoc.num_send_timers_up < 0) {
2406 			stcb->asoc.num_send_timers_up = 0;
2407 		}
2408 	}
2409 	tmr->self = NULL;
2410 	tmr->stopped_from = from;
2411 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2412 	return;
2413 }
2414 
2415 uint32_t
2416 sctp_calculate_len(struct mbuf *m)
2417 {
2418 	uint32_t tlen = 0;
2419 	struct mbuf *at;
2420 
2421 	at = m;
2422 	while (at) {
2423 		tlen += SCTP_BUF_LEN(at);
2424 		at = SCTP_BUF_NEXT(at);
2425 	}
2426 	return (tlen);
2427 }
2428 
2429 void
2430 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2431     struct sctp_association *asoc, uint32_t mtu)
2432 {
2433 	/*
2434 	 * Reset the P-MTU size on this association, this involves changing
2435 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2436 	 * allow the DF flag to be cleared.
2437 	 */
2438 	struct sctp_tmit_chunk *chk;
2439 	unsigned int eff_mtu, ovh;
2440 
2441 	asoc->smallest_mtu = mtu;
2442 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2443 		ovh = SCTP_MIN_OVERHEAD;
2444 	} else {
2445 		ovh = SCTP_MIN_V4_OVERHEAD;
2446 	}
2447 	eff_mtu = mtu - ovh;
2448 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2449 		if (chk->send_size > eff_mtu) {
2450 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2451 		}
2452 	}
2453 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2454 		if (chk->send_size > eff_mtu) {
2455 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2456 		}
2457 	}
2458 }
2459 
2460 
2461 /*
2462  * given an association and starting time of the current RTT period return
2463  * RTO in number of msecs net should point to the current network
2464  */
2465 
2466 uint32_t
2467 sctp_calculate_rto(struct sctp_tcb *stcb,
2468     struct sctp_association *asoc,
2469     struct sctp_nets *net,
2470     struct timeval *told,
2471     int safe, int rtt_from_sack)
2472 {
2473 	/*-
2474 	 * given an association and the starting time of the current RTT
2475 	 * period (in value1/value2) return RTO in number of msecs.
2476 	 */
2477 	int32_t rtt;		/* RTT in ms */
2478 	uint32_t new_rto;
2479 	int first_measure = 0;
2480 	struct timeval now, then, *old;
2481 
2482 	/* Copy it out for sparc64 */
2483 	if (safe == sctp_align_unsafe_makecopy) {
2484 		old = &then;
2485 		memcpy(&then, told, sizeof(struct timeval));
2486 	} else if (safe == sctp_align_safe_nocopy) {
2487 		old = told;
2488 	} else {
2489 		/* error */
2490 		SCTP_PRINTF("Huh, bad rto calc call\n");
2491 		return (0);
2492 	}
2493 	/************************/
2494 	/* 1. calculate new RTT */
2495 	/************************/
2496 	/* get the current time */
2497 	if (stcb->asoc.use_precise_time) {
2498 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2499 	} else {
2500 		(void)SCTP_GETTIME_TIMEVAL(&now);
2501 	}
2502 	timevalsub(&now, old);
2503 	/* store the current RTT in us */
2504 	net->rtt = (uint64_t) 10000000 *(uint64_t) now.tv_sec +
2505 	         (uint64_t) now.tv_usec;
2506 
2507 	/* computer rtt in ms */
2508 	rtt = net->rtt / 1000;
2509 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2510 		/*
2511 		 * Tell the CC module that a new update has just occurred
2512 		 * from a sack
2513 		 */
2514 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2515 	}
2516 	/*
2517 	 * Do we need to determine the lan? We do this only on sacks i.e.
2518 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2519 	 */
2520 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2521 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2522 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2523 			net->lan_type = SCTP_LAN_INTERNET;
2524 		} else {
2525 			net->lan_type = SCTP_LAN_LOCAL;
2526 		}
2527 	}
2528 	/***************************/
2529 	/* 2. update RTTVAR & SRTT */
2530 	/***************************/
2531 	/*-
2532 	 * Compute the scaled average lastsa and the
2533 	 * scaled variance lastsv as described in van Jacobson
2534 	 * Paper "Congestion Avoidance and Control", Annex A.
2535 	 *
2536 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2537 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2538 	 */
2539 	if (net->RTO_measured) {
2540 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2541 		net->lastsa += rtt;
2542 		if (rtt < 0) {
2543 			rtt = -rtt;
2544 		}
2545 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2546 		net->lastsv += rtt;
2547 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2548 			rto_logging(net, SCTP_LOG_RTTVAR);
2549 		}
2550 	} else {
2551 		/* First RTO measurment */
2552 		net->RTO_measured = 1;
2553 		first_measure = 1;
2554 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2555 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2556 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2557 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2558 		}
2559 	}
2560 	if (net->lastsv == 0) {
2561 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2562 	}
2563 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2564 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2565 	    (stcb->asoc.sat_network_lockout == 0)) {
2566 		stcb->asoc.sat_network = 1;
2567 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2568 		stcb->asoc.sat_network = 0;
2569 		stcb->asoc.sat_network_lockout = 1;
2570 	}
2571 	/* bound it, per C6/C7 in Section 5.3.1 */
2572 	if (new_rto < stcb->asoc.minrto) {
2573 		new_rto = stcb->asoc.minrto;
2574 	}
2575 	if (new_rto > stcb->asoc.maxrto) {
2576 		new_rto = stcb->asoc.maxrto;
2577 	}
2578 	/* we are now returning the RTO */
2579 	return (new_rto);
2580 }
2581 
2582 /*
2583  * return a pointer to a contiguous piece of data from the given mbuf chain
2584  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2585  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2586  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2587  */
2588 caddr_t
2589 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2590 {
2591 	uint32_t count;
2592 	uint8_t *ptr;
2593 
2594 	ptr = in_ptr;
2595 	if ((off < 0) || (len <= 0))
2596 		return (NULL);
2597 
2598 	/* find the desired start location */
2599 	while ((m != NULL) && (off > 0)) {
2600 		if (off < SCTP_BUF_LEN(m))
2601 			break;
2602 		off -= SCTP_BUF_LEN(m);
2603 		m = SCTP_BUF_NEXT(m);
2604 	}
2605 	if (m == NULL)
2606 		return (NULL);
2607 
2608 	/* is the current mbuf large enough (eg. contiguous)? */
2609 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2610 		return (mtod(m, caddr_t)+off);
2611 	} else {
2612 		/* else, it spans more than one mbuf, so save a temp copy... */
2613 		while ((m != NULL) && (len > 0)) {
2614 			count = min(SCTP_BUF_LEN(m) - off, len);
2615 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2616 			len -= count;
2617 			ptr += count;
2618 			off = 0;
2619 			m = SCTP_BUF_NEXT(m);
2620 		}
2621 		if ((m == NULL) && (len > 0))
2622 			return (NULL);
2623 		else
2624 			return ((caddr_t)in_ptr);
2625 	}
2626 }
2627 
2628 
2629 
2630 struct sctp_paramhdr *
2631 sctp_get_next_param(struct mbuf *m,
2632     int offset,
2633     struct sctp_paramhdr *pull,
2634     int pull_limit)
2635 {
2636 	/* This just provides a typed signature to Peter's Pull routine */
2637 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2638 	    (uint8_t *) pull));
2639 }
2640 
2641 
2642 int
2643 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2644 {
2645 	/*
2646 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2647 	 * padlen is > 3 this routine will fail.
2648 	 */
2649 	uint8_t *dp;
2650 	int i;
2651 
2652 	if (padlen > 3) {
2653 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2654 		return (ENOBUFS);
2655 	}
2656 	if (padlen <= M_TRAILINGSPACE(m)) {
2657 		/*
2658 		 * The easy way. We hope the majority of the time we hit
2659 		 * here :)
2660 		 */
2661 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2662 		SCTP_BUF_LEN(m) += padlen;
2663 	} else {
2664 		/* Hard way we must grow the mbuf */
2665 		struct mbuf *tmp;
2666 
2667 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2668 		if (tmp == NULL) {
2669 			/* Out of space GAK! we are in big trouble. */
2670 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2671 			return (ENOSPC);
2672 		}
2673 		/* setup and insert in middle */
2674 		SCTP_BUF_LEN(tmp) = padlen;
2675 		SCTP_BUF_NEXT(tmp) = NULL;
2676 		SCTP_BUF_NEXT(m) = tmp;
2677 		dp = mtod(tmp, uint8_t *);
2678 	}
2679 	/* zero out the pad */
2680 	for (i = 0; i < padlen; i++) {
2681 		*dp = 0;
2682 		dp++;
2683 	}
2684 	return (0);
2685 }
2686 
2687 int
2688 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2689 {
2690 	/* find the last mbuf in chain and pad it */
2691 	struct mbuf *m_at;
2692 
2693 	m_at = m;
2694 	if (last_mbuf) {
2695 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2696 	} else {
2697 		while (m_at) {
2698 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2699 				return (sctp_add_pad_tombuf(m_at, padval));
2700 			}
2701 			m_at = SCTP_BUF_NEXT(m_at);
2702 		}
2703 	}
2704 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2705 	return (EFAULT);
2706 }
2707 
2708 static void
2709 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2710     uint32_t error, void *data, int so_locked
2711 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2712     SCTP_UNUSED
2713 #endif
2714 )
2715 {
2716 	struct mbuf *m_notify;
2717 	struct sctp_assoc_change *sac;
2718 	struct sctp_queued_to_read *control;
2719 
2720 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2721 	struct socket *so;
2722 
2723 #endif
2724 
2725 	/*
2726 	 * For TCP model AND UDP connected sockets we will send an error up
2727 	 * when an ABORT comes in.
2728 	 */
2729 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2730 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2731 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2732 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2733 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2734 			stcb->sctp_socket->so_error = ECONNREFUSED;
2735 		} else {
2736 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2737 			stcb->sctp_socket->so_error = ECONNRESET;
2738 		}
2739 		/* Wake ANY sleepers */
2740 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2741 		so = SCTP_INP_SO(stcb->sctp_ep);
2742 		if (!so_locked) {
2743 			atomic_add_int(&stcb->asoc.refcnt, 1);
2744 			SCTP_TCB_UNLOCK(stcb);
2745 			SCTP_SOCKET_LOCK(so, 1);
2746 			SCTP_TCB_LOCK(stcb);
2747 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2748 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2749 				SCTP_SOCKET_UNLOCK(so, 1);
2750 				return;
2751 			}
2752 		}
2753 #endif
2754 		socantrcvmore(stcb->sctp_socket);
2755 		sorwakeup(stcb->sctp_socket);
2756 		sowwakeup(stcb->sctp_socket);
2757 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2758 		if (!so_locked) {
2759 			SCTP_SOCKET_UNLOCK(so, 1);
2760 		}
2761 #endif
2762 	}
2763 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2764 		/* event not enabled */
2765 		return;
2766 	}
2767 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2768 	if (m_notify == NULL)
2769 		/* no space left */
2770 		return;
2771 	SCTP_BUF_LEN(m_notify) = 0;
2772 
2773 	sac = mtod(m_notify, struct sctp_assoc_change *);
2774 	sac->sac_type = SCTP_ASSOC_CHANGE;
2775 	sac->sac_flags = 0;
2776 	sac->sac_length = sizeof(struct sctp_assoc_change);
2777 	sac->sac_state = event;
2778 	sac->sac_error = error;
2779 	/* XXX verify these stream counts */
2780 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2781 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2782 	sac->sac_assoc_id = sctp_get_associd(stcb);
2783 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2784 	SCTP_BUF_NEXT(m_notify) = NULL;
2785 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2786 	    0, 0, 0, 0, 0, 0,
2787 	    m_notify);
2788 	if (control == NULL) {
2789 		/* no memory */
2790 		sctp_m_freem(m_notify);
2791 		return;
2792 	}
2793 	control->length = SCTP_BUF_LEN(m_notify);
2794 	/* not that we need this */
2795 	control->tail_mbuf = m_notify;
2796 	control->spec_flags = M_NOTIFICATION;
2797 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2798 	    control,
2799 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2800 	    so_locked);
2801 	if (event == SCTP_COMM_LOST) {
2802 		/* Wake up any sleeper */
2803 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2804 		so = SCTP_INP_SO(stcb->sctp_ep);
2805 		if (!so_locked) {
2806 			atomic_add_int(&stcb->asoc.refcnt, 1);
2807 			SCTP_TCB_UNLOCK(stcb);
2808 			SCTP_SOCKET_LOCK(so, 1);
2809 			SCTP_TCB_LOCK(stcb);
2810 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2811 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2812 				SCTP_SOCKET_UNLOCK(so, 1);
2813 				return;
2814 			}
2815 		}
2816 #endif
2817 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2818 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2819 		if (!so_locked) {
2820 			SCTP_SOCKET_UNLOCK(so, 1);
2821 		}
2822 #endif
2823 	}
2824 }
2825 
2826 static void
2827 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2828     struct sockaddr *sa, uint32_t error)
2829 {
2830 	struct mbuf *m_notify;
2831 	struct sctp_paddr_change *spc;
2832 	struct sctp_queued_to_read *control;
2833 
2834 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2835 		/* event not enabled */
2836 		return;
2837 	}
2838 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2839 	if (m_notify == NULL)
2840 		return;
2841 	SCTP_BUF_LEN(m_notify) = 0;
2842 	spc = mtod(m_notify, struct sctp_paddr_change *);
2843 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2844 	spc->spc_flags = 0;
2845 	spc->spc_length = sizeof(struct sctp_paddr_change);
2846 	switch (sa->sa_family) {
2847 #ifdef INET
2848 	case AF_INET:
2849 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2850 		break;
2851 #endif
2852 #ifdef INET6
2853 	case AF_INET6:
2854 		{
2855 			struct sockaddr_in6 *sin6;
2856 
2857 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2858 
2859 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2860 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2861 				if (sin6->sin6_scope_id == 0) {
2862 					/* recover scope_id for user */
2863 					(void)sa6_recoverscope(sin6);
2864 				} else {
2865 					/* clear embedded scope_id for user */
2866 					in6_clearscope(&sin6->sin6_addr);
2867 				}
2868 			}
2869 			break;
2870 		}
2871 #endif
2872 	default:
2873 		/* TSNH */
2874 		break;
2875 	}
2876 	spc->spc_state = state;
2877 	spc->spc_error = error;
2878 	spc->spc_assoc_id = sctp_get_associd(stcb);
2879 
2880 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2881 	SCTP_BUF_NEXT(m_notify) = NULL;
2882 
2883 	/* append to socket */
2884 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2885 	    0, 0, 0, 0, 0, 0,
2886 	    m_notify);
2887 	if (control == NULL) {
2888 		/* no memory */
2889 		sctp_m_freem(m_notify);
2890 		return;
2891 	}
2892 	control->length = SCTP_BUF_LEN(m_notify);
2893 	control->spec_flags = M_NOTIFICATION;
2894 	/* not that we need this */
2895 	control->tail_mbuf = m_notify;
2896 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2897 	    control,
2898 	    &stcb->sctp_socket->so_rcv, 1,
2899 	    SCTP_READ_LOCK_NOT_HELD,
2900 	    SCTP_SO_NOT_LOCKED);
2901 }
2902 
2903 
2904 static void
2905 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2906     struct sctp_tmit_chunk *chk, int so_locked
2907 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2908     SCTP_UNUSED
2909 #endif
2910 )
2911 {
2912 	struct mbuf *m_notify;
2913 	struct sctp_send_failed *ssf;
2914 	struct sctp_queued_to_read *control;
2915 	int length;
2916 
2917 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2918 		/* event not enabled */
2919 		return;
2920 	}
2921 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2922 	if (m_notify == NULL)
2923 		/* no space left */
2924 		return;
2925 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2926 	length -= sizeof(struct sctp_data_chunk);
2927 	SCTP_BUF_LEN(m_notify) = 0;
2928 	ssf = mtod(m_notify, struct sctp_send_failed *);
2929 	ssf->ssf_type = SCTP_SEND_FAILED;
2930 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2931 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2932 	else
2933 		ssf->ssf_flags = SCTP_DATA_SENT;
2934 	ssf->ssf_length = length;
2935 	ssf->ssf_error = error;
2936 	/* not exactly what the user sent in, but should be close :) */
2937 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2938 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2939 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2940 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2941 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2942 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2943 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2944 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2945 
2946 	if (chk->data) {
2947 		/*
2948 		 * trim off the sctp chunk header(it should be there)
2949 		 */
2950 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2951 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2952 			sctp_mbuf_crush(chk->data);
2953 			chk->send_size -= sizeof(struct sctp_data_chunk);
2954 		}
2955 	}
2956 	SCTP_BUF_NEXT(m_notify) = chk->data;
2957 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2958 	/* Steal off the mbuf */
2959 	chk->data = NULL;
2960 	/*
2961 	 * For this case, we check the actual socket buffer, since the assoc
2962 	 * is going away we don't want to overfill the socket buffer for a
2963 	 * non-reader
2964 	 */
2965 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2966 		sctp_m_freem(m_notify);
2967 		return;
2968 	}
2969 	/* append to socket */
2970 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2971 	    0, 0, 0, 0, 0, 0,
2972 	    m_notify);
2973 	if (control == NULL) {
2974 		/* no memory */
2975 		sctp_m_freem(m_notify);
2976 		return;
2977 	}
2978 	control->spec_flags = M_NOTIFICATION;
2979 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2980 	    control,
2981 	    &stcb->sctp_socket->so_rcv, 1,
2982 	    SCTP_READ_LOCK_NOT_HELD,
2983 	    so_locked);
2984 }
2985 
2986 
2987 static void
2988 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2989     struct sctp_stream_queue_pending *sp, int so_locked
2990 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2991     SCTP_UNUSED
2992 #endif
2993 )
2994 {
2995 	struct mbuf *m_notify;
2996 	struct sctp_send_failed *ssf;
2997 	struct sctp_queued_to_read *control;
2998 	int length;
2999 
3000 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3001 		/* event not enabled */
3002 		return;
3003 	}
3004 	length = sizeof(struct sctp_send_failed) + sp->length;
3005 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3006 	if (m_notify == NULL)
3007 		/* no space left */
3008 		return;
3009 	SCTP_BUF_LEN(m_notify) = 0;
3010 	ssf = mtod(m_notify, struct sctp_send_failed *);
3011 	ssf->ssf_type = SCTP_SEND_FAILED;
3012 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3013 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3014 	else
3015 		ssf->ssf_flags = SCTP_DATA_SENT;
3016 	ssf->ssf_length = length;
3017 	ssf->ssf_error = error;
3018 	/* not exactly what the user sent in, but should be close :) */
3019 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3020 	ssf->ssf_info.sinfo_stream = sp->stream;
3021 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3022 	if (sp->some_taken) {
3023 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3024 	} else {
3025 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3026 	}
3027 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3028 	ssf->ssf_info.sinfo_context = sp->context;
3029 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3030 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3031 	SCTP_BUF_NEXT(m_notify) = sp->data;
3032 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3033 
3034 	/* Steal off the mbuf */
3035 	sp->data = NULL;
3036 	/*
3037 	 * For this case, we check the actual socket buffer, since the assoc
3038 	 * is going away we don't want to overfill the socket buffer for a
3039 	 * non-reader
3040 	 */
3041 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3042 		sctp_m_freem(m_notify);
3043 		return;
3044 	}
3045 	/* append to socket */
3046 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3047 	    0, 0, 0, 0, 0, 0,
3048 	    m_notify);
3049 	if (control == NULL) {
3050 		/* no memory */
3051 		sctp_m_freem(m_notify);
3052 		return;
3053 	}
3054 	control->spec_flags = M_NOTIFICATION;
3055 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3056 	    control,
3057 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3058 }
3059 
3060 
3061 
3062 static void
3063 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3064     uint32_t error)
3065 {
3066 	struct mbuf *m_notify;
3067 	struct sctp_adaptation_event *sai;
3068 	struct sctp_queued_to_read *control;
3069 
3070 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3071 		/* event not enabled */
3072 		return;
3073 	}
3074 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3075 	if (m_notify == NULL)
3076 		/* no space left */
3077 		return;
3078 	SCTP_BUF_LEN(m_notify) = 0;
3079 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3080 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3081 	sai->sai_flags = 0;
3082 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3083 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3084 	sai->sai_assoc_id = sctp_get_associd(stcb);
3085 
3086 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3087 	SCTP_BUF_NEXT(m_notify) = NULL;
3088 
3089 	/* append to socket */
3090 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3091 	    0, 0, 0, 0, 0, 0,
3092 	    m_notify);
3093 	if (control == NULL) {
3094 		/* no memory */
3095 		sctp_m_freem(m_notify);
3096 		return;
3097 	}
3098 	control->length = SCTP_BUF_LEN(m_notify);
3099 	control->spec_flags = M_NOTIFICATION;
3100 	/* not that we need this */
3101 	control->tail_mbuf = m_notify;
3102 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3103 	    control,
3104 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3105 }
3106 
3107 /* This always must be called with the read-queue LOCKED in the INP */
3108 static void
3109 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3110     uint32_t val, int so_locked
3111 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3112     SCTP_UNUSED
3113 #endif
3114 )
3115 {
3116 	struct mbuf *m_notify;
3117 	struct sctp_pdapi_event *pdapi;
3118 	struct sctp_queued_to_read *control;
3119 	struct sockbuf *sb;
3120 
3121 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3122 		/* event not enabled */
3123 		return;
3124 	}
3125 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3126 		return;
3127 	}
3128 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3129 	if (m_notify == NULL)
3130 		/* no space left */
3131 		return;
3132 	SCTP_BUF_LEN(m_notify) = 0;
3133 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3134 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3135 	pdapi->pdapi_flags = 0;
3136 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3137 	pdapi->pdapi_indication = error;
3138 	pdapi->pdapi_stream = (val >> 16);
3139 	pdapi->pdapi_seq = (val & 0x0000ffff);
3140 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3141 
3142 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3143 	SCTP_BUF_NEXT(m_notify) = NULL;
3144 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3145 	    0, 0, 0, 0, 0, 0,
3146 	    m_notify);
3147 	if (control == NULL) {
3148 		/* no memory */
3149 		sctp_m_freem(m_notify);
3150 		return;
3151 	}
3152 	control->spec_flags = M_NOTIFICATION;
3153 	control->length = SCTP_BUF_LEN(m_notify);
3154 	/* not that we need this */
3155 	control->tail_mbuf = m_notify;
3156 	control->held_length = 0;
3157 	control->length = 0;
3158 	sb = &stcb->sctp_socket->so_rcv;
3159 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3160 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3161 	}
3162 	sctp_sballoc(stcb, sb, m_notify);
3163 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3164 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3165 	}
3166 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3167 	control->end_added = 1;
3168 	if (stcb->asoc.control_pdapi)
3169 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3170 	else {
3171 		/* we really should not see this case */
3172 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3173 	}
3174 	if (stcb->sctp_ep && stcb->sctp_socket) {
3175 		/* This should always be the case */
3176 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3177 		struct socket *so;
3178 
3179 		so = SCTP_INP_SO(stcb->sctp_ep);
3180 		if (!so_locked) {
3181 			atomic_add_int(&stcb->asoc.refcnt, 1);
3182 			SCTP_TCB_UNLOCK(stcb);
3183 			SCTP_SOCKET_LOCK(so, 1);
3184 			SCTP_TCB_LOCK(stcb);
3185 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3186 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3187 				SCTP_SOCKET_UNLOCK(so, 1);
3188 				return;
3189 			}
3190 		}
3191 #endif
3192 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3193 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3194 		if (!so_locked) {
3195 			SCTP_SOCKET_UNLOCK(so, 1);
3196 		}
3197 #endif
3198 	}
3199 }
3200 
3201 static void
3202 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3203 {
3204 	struct mbuf *m_notify;
3205 	struct sctp_shutdown_event *sse;
3206 	struct sctp_queued_to_read *control;
3207 
3208 	/*
3209 	 * For TCP model AND UDP connected sockets we will send an error up
3210 	 * when an SHUTDOWN completes
3211 	 */
3212 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3213 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3214 		/* mark socket closed for read/write and wakeup! */
3215 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3216 		struct socket *so;
3217 
3218 		so = SCTP_INP_SO(stcb->sctp_ep);
3219 		atomic_add_int(&stcb->asoc.refcnt, 1);
3220 		SCTP_TCB_UNLOCK(stcb);
3221 		SCTP_SOCKET_LOCK(so, 1);
3222 		SCTP_TCB_LOCK(stcb);
3223 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3224 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3225 			SCTP_SOCKET_UNLOCK(so, 1);
3226 			return;
3227 		}
3228 #endif
3229 		socantsendmore(stcb->sctp_socket);
3230 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3231 		SCTP_SOCKET_UNLOCK(so, 1);
3232 #endif
3233 	}
3234 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3235 		/* event not enabled */
3236 		return;
3237 	}
3238 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3239 	if (m_notify == NULL)
3240 		/* no space left */
3241 		return;
3242 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3243 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3244 	sse->sse_flags = 0;
3245 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3246 	sse->sse_assoc_id = sctp_get_associd(stcb);
3247 
3248 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3249 	SCTP_BUF_NEXT(m_notify) = NULL;
3250 
3251 	/* append to socket */
3252 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3253 	    0, 0, 0, 0, 0, 0,
3254 	    m_notify);
3255 	if (control == NULL) {
3256 		/* no memory */
3257 		sctp_m_freem(m_notify);
3258 		return;
3259 	}
3260 	control->spec_flags = M_NOTIFICATION;
3261 	control->length = SCTP_BUF_LEN(m_notify);
3262 	/* not that we need this */
3263 	control->tail_mbuf = m_notify;
3264 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3265 	    control,
3266 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3267 }
3268 
3269 static void
3270 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3271     int so_locked
3272 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3273     SCTP_UNUSED
3274 #endif
3275 )
3276 {
3277 	struct mbuf *m_notify;
3278 	struct sctp_sender_dry_event *event;
3279 	struct sctp_queued_to_read *control;
3280 
3281 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3282 		/* event not enabled */
3283 		return;
3284 	}
3285 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3286 	if (m_notify == NULL) {
3287 		/* no space left */
3288 		return;
3289 	}
3290 	SCTP_BUF_LEN(m_notify) = 0;
3291 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3292 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3293 	event->sender_dry_flags = 0;
3294 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3295 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3296 
3297 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3298 	SCTP_BUF_NEXT(m_notify) = NULL;
3299 
3300 	/* append to socket */
3301 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3302 	    0, 0, 0, 0, 0, 0, m_notify);
3303 	if (control == NULL) {
3304 		/* no memory */
3305 		sctp_m_freem(m_notify);
3306 		return;
3307 	}
3308 	control->length = SCTP_BUF_LEN(m_notify);
3309 	control->spec_flags = M_NOTIFICATION;
3310 	/* not that we need this */
3311 	control->tail_mbuf = m_notify;
3312 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3313 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3314 }
3315 
3316 
3317 static void
3318 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3319 {
3320 	struct mbuf *m_notify;
3321 	struct sctp_queued_to_read *control;
3322 	struct sctp_stream_reset_event *strreset;
3323 	int len;
3324 
3325 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3326 		/* event not enabled */
3327 		return;
3328 	}
3329 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3330 	if (m_notify == NULL)
3331 		/* no space left */
3332 		return;
3333 	SCTP_BUF_LEN(m_notify) = 0;
3334 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3335 	if (len > M_TRAILINGSPACE(m_notify)) {
3336 		/* never enough room */
3337 		sctp_m_freem(m_notify);
3338 		return;
3339 	}
3340 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3341 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3342 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3343 	strreset->strreset_length = len;
3344 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3345 	strreset->strreset_list[0] = number_entries;
3346 
3347 	SCTP_BUF_LEN(m_notify) = len;
3348 	SCTP_BUF_NEXT(m_notify) = NULL;
3349 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3350 		/* no space */
3351 		sctp_m_freem(m_notify);
3352 		return;
3353 	}
3354 	/* append to socket */
3355 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3356 	    0, 0, 0, 0, 0, 0,
3357 	    m_notify);
3358 	if (control == NULL) {
3359 		/* no memory */
3360 		sctp_m_freem(m_notify);
3361 		return;
3362 	}
3363 	control->spec_flags = M_NOTIFICATION;
3364 	control->length = SCTP_BUF_LEN(m_notify);
3365 	/* not that we need this */
3366 	control->tail_mbuf = m_notify;
3367 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3368 	    control,
3369 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3370 }
3371 
3372 
3373 static void
3374 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3375     int number_entries, uint16_t * list, int flag)
3376 {
3377 	struct mbuf *m_notify;
3378 	struct sctp_queued_to_read *control;
3379 	struct sctp_stream_reset_event *strreset;
3380 	int len;
3381 
3382 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3383 		/* event not enabled */
3384 		return;
3385 	}
3386 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3387 	if (m_notify == NULL)
3388 		/* no space left */
3389 		return;
3390 	SCTP_BUF_LEN(m_notify) = 0;
3391 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3392 	if (len > M_TRAILINGSPACE(m_notify)) {
3393 		/* never enough room */
3394 		sctp_m_freem(m_notify);
3395 		return;
3396 	}
3397 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3398 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3399 	if (number_entries == 0) {
3400 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3401 	} else {
3402 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3403 	}
3404 	strreset->strreset_length = len;
3405 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3406 	if (number_entries) {
3407 		int i;
3408 
3409 		for (i = 0; i < number_entries; i++) {
3410 			strreset->strreset_list[i] = ntohs(list[i]);
3411 		}
3412 	}
3413 	SCTP_BUF_LEN(m_notify) = len;
3414 	SCTP_BUF_NEXT(m_notify) = NULL;
3415 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3416 		/* no space */
3417 		sctp_m_freem(m_notify);
3418 		return;
3419 	}
3420 	/* append to socket */
3421 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3422 	    0, 0, 0, 0, 0, 0,
3423 	    m_notify);
3424 	if (control == NULL) {
3425 		/* no memory */
3426 		sctp_m_freem(m_notify);
3427 		return;
3428 	}
3429 	control->spec_flags = M_NOTIFICATION;
3430 	control->length = SCTP_BUF_LEN(m_notify);
3431 	/* not that we need this */
3432 	control->tail_mbuf = m_notify;
3433 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3434 	    control,
3435 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3436 }
3437 
3438 
3439 void
3440 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3441     uint32_t error, void *data, int so_locked
3442 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3443     SCTP_UNUSED
3444 #endif
3445 )
3446 {
3447 	if ((stcb == NULL) ||
3448 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3449 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3450 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3451 		/* If the socket is gone we are out of here */
3452 		return;
3453 	}
3454 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3455 		return;
3456 	}
3457 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3458 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3459 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3460 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3461 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3462 			/* Don't report these in front states */
3463 			return;
3464 		}
3465 	}
3466 	switch (notification) {
3467 	case SCTP_NOTIFY_ASSOC_UP:
3468 		if (stcb->asoc.assoc_up_sent == 0) {
3469 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3470 			stcb->asoc.assoc_up_sent = 1;
3471 		}
3472 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3473 			sctp_notify_adaptation_layer(stcb, error);
3474 		}
3475 		if (stcb->asoc.peer_supports_auth == 0) {
3476 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3477 			    NULL, so_locked);
3478 		}
3479 		break;
3480 	case SCTP_NOTIFY_ASSOC_DOWN:
3481 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3482 		break;
3483 	case SCTP_NOTIFY_INTERFACE_DOWN:
3484 		{
3485 			struct sctp_nets *net;
3486 
3487 			net = (struct sctp_nets *)data;
3488 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3489 			    (struct sockaddr *)&net->ro._l_addr, error);
3490 			break;
3491 		}
3492 	case SCTP_NOTIFY_INTERFACE_UP:
3493 		{
3494 			struct sctp_nets *net;
3495 
3496 			net = (struct sctp_nets *)data;
3497 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3498 			    (struct sockaddr *)&net->ro._l_addr, error);
3499 			break;
3500 		}
3501 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3502 		{
3503 			struct sctp_nets *net;
3504 
3505 			net = (struct sctp_nets *)data;
3506 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3507 			    (struct sockaddr *)&net->ro._l_addr, error);
3508 			break;
3509 		}
3510 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3511 		sctp_notify_send_failed2(stcb, error,
3512 		    (struct sctp_stream_queue_pending *)data, so_locked);
3513 		break;
3514 	case SCTP_NOTIFY_DG_FAIL:
3515 		sctp_notify_send_failed(stcb, error,
3516 		    (struct sctp_tmit_chunk *)data, so_locked);
3517 		break;
3518 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3519 		{
3520 			uint32_t val;
3521 
3522 			val = *((uint32_t *) data);
3523 
3524 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3525 			break;
3526 		}
3527 	case SCTP_NOTIFY_STRDATA_ERR:
3528 		break;
3529 	case SCTP_NOTIFY_ASSOC_ABORTED:
3530 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3531 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3532 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3533 		} else {
3534 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3535 		}
3536 		break;
3537 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3538 		break;
3539 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3540 		break;
3541 	case SCTP_NOTIFY_ASSOC_RESTART:
3542 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3543 		if (stcb->asoc.peer_supports_auth == 0) {
3544 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3545 			    NULL, so_locked);
3546 		}
3547 		break;
3548 	case SCTP_NOTIFY_HB_RESP:
3549 		break;
3550 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3551 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3552 		break;
3553 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3554 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3555 		break;
3556 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3557 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3558 		break;
3559 
3560 	case SCTP_NOTIFY_STR_RESET_SEND:
3561 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3562 		break;
3563 	case SCTP_NOTIFY_STR_RESET_RECV:
3564 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3565 		break;
3566 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3567 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3568 		break;
3569 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3570 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3571 		break;
3572 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3573 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3574 		    error);
3575 		break;
3576 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3577 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3578 		    error);
3579 		break;
3580 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3581 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3582 		    error);
3583 		break;
3584 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3585 		break;
3586 	case SCTP_NOTIFY_ASCONF_FAILED:
3587 		break;
3588 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3589 		sctp_notify_shutdown_event(stcb);
3590 		break;
3591 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3592 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3593 		    (uint16_t) (uintptr_t) data,
3594 		    so_locked);
3595 		break;
3596 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3597 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3598 		    (uint16_t) (uintptr_t) data,
3599 		    so_locked);
3600 		break;
3601 	case SCTP_NOTIFY_NO_PEER_AUTH:
3602 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3603 		    (uint16_t) (uintptr_t) data,
3604 		    so_locked);
3605 		break;
3606 	case SCTP_NOTIFY_SENDER_DRY:
3607 		sctp_notify_sender_dry_event(stcb, so_locked);
3608 		break;
3609 	default:
3610 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3611 		    __FUNCTION__, notification, notification);
3612 		break;
3613 	}			/* end switch */
3614 }
3615 
3616 void
3617 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3618 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3619     SCTP_UNUSED
3620 #endif
3621 )
3622 {
3623 	struct sctp_association *asoc;
3624 	struct sctp_stream_out *outs;
3625 	struct sctp_tmit_chunk *chk, *nchk;
3626 	struct sctp_stream_queue_pending *sp, *nsp;
3627 	int i;
3628 
3629 	if (stcb == NULL) {
3630 		return;
3631 	}
3632 	asoc = &stcb->asoc;
3633 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3634 		/* already being freed */
3635 		return;
3636 	}
3637 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3638 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3639 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3640 		return;
3641 	}
3642 	/* now through all the gunk freeing chunks */
3643 	if (holds_lock == 0) {
3644 		SCTP_TCB_SEND_LOCK(stcb);
3645 	}
3646 	/* sent queue SHOULD be empty */
3647 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3648 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3649 		asoc->sent_queue_cnt--;
3650 		if (chk->data != NULL) {
3651 			sctp_free_bufspace(stcb, asoc, chk, 1);
3652 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3653 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3654 			if (chk->data) {
3655 				sctp_m_freem(chk->data);
3656 				chk->data = NULL;
3657 			}
3658 		}
3659 		sctp_free_a_chunk(stcb, chk, so_locked);
3660 		/* sa_ignore FREED_MEMORY */
3661 	}
3662 	/* pending send queue SHOULD be empty */
3663 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3664 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3665 		asoc->send_queue_cnt--;
3666 		if (chk->data != NULL) {
3667 			sctp_free_bufspace(stcb, asoc, chk, 1);
3668 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3669 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3670 			if (chk->data) {
3671 				sctp_m_freem(chk->data);
3672 				chk->data = NULL;
3673 			}
3674 		}
3675 		sctp_free_a_chunk(stcb, chk, so_locked);
3676 		/* sa_ignore FREED_MEMORY */
3677 	}
3678 	for (i = 0; i < asoc->streamoutcnt; i++) {
3679 		/* For each stream */
3680 		outs = &asoc->strmout[i];
3681 		/* clean up any sends there */
3682 		asoc->locked_on_sending = NULL;
3683 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3684 			asoc->stream_queue_cnt--;
3685 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3686 			sctp_free_spbufspace(stcb, asoc, sp);
3687 			if (sp->data) {
3688 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3689 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3690 				if (sp->data) {
3691 					sctp_m_freem(sp->data);
3692 					sp->data = NULL;
3693 				}
3694 			}
3695 			if (sp->net) {
3696 				sctp_free_remote_addr(sp->net);
3697 				sp->net = NULL;
3698 			}
3699 			/* Free the chunk */
3700 			sctp_free_a_strmoq(stcb, sp, so_locked);
3701 			/* sa_ignore FREED_MEMORY */
3702 		}
3703 	}
3704 
3705 	if (holds_lock == 0) {
3706 		SCTP_TCB_SEND_UNLOCK(stcb);
3707 	}
3708 }
3709 
3710 void
3711 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3712 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3713     SCTP_UNUSED
3714 #endif
3715 )
3716 {
3717 
3718 	if (stcb == NULL) {
3719 		return;
3720 	}
3721 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3722 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3723 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3724 		return;
3725 	}
3726 	/* Tell them we lost the asoc */
3727 	sctp_report_all_outbound(stcb, 1, so_locked);
3728 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3729 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3730 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3731 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3732 	}
3733 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3734 }
3735 
3736 void
3737 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3738     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3739     uint32_t vrf_id, uint16_t port)
3740 {
3741 	uint32_t vtag;
3742 
3743 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3744 	struct socket *so;
3745 
3746 #endif
3747 
3748 	vtag = 0;
3749 	if (stcb != NULL) {
3750 		/* We have a TCB to abort, send notification too */
3751 		vtag = stcb->asoc.peer_vtag;
3752 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3753 		/* get the assoc vrf id and table id */
3754 		vrf_id = stcb->asoc.vrf_id;
3755 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3756 	}
3757 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3758 	if (stcb != NULL) {
3759 		/* Ok, now lets free it */
3760 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3761 		so = SCTP_INP_SO(inp);
3762 		atomic_add_int(&stcb->asoc.refcnt, 1);
3763 		SCTP_TCB_UNLOCK(stcb);
3764 		SCTP_SOCKET_LOCK(so, 1);
3765 		SCTP_TCB_LOCK(stcb);
3766 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3767 #endif
3768 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3769 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3770 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3771 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3772 		}
3773 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3774 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3775 		SCTP_SOCKET_UNLOCK(so, 1);
3776 #endif
3777 	}
3778 }
3779 
3780 #ifdef SCTP_ASOCLOG_OF_TSNS
3781 void
3782 sctp_print_out_track_log(struct sctp_tcb *stcb)
3783 {
3784 #ifdef NOSIY_PRINTS
3785 	int i;
3786 
3787 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3788 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3789 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3790 		SCTP_PRINTF("None rcvd\n");
3791 		goto none_in;
3792 	}
3793 	if (stcb->asoc.tsn_in_wrapped) {
3794 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3795 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3796 			    stcb->asoc.in_tsnlog[i].tsn,
3797 			    stcb->asoc.in_tsnlog[i].strm,
3798 			    stcb->asoc.in_tsnlog[i].seq,
3799 			    stcb->asoc.in_tsnlog[i].flgs,
3800 			    stcb->asoc.in_tsnlog[i].sz);
3801 		}
3802 	}
3803 	if (stcb->asoc.tsn_in_at) {
3804 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3805 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3806 			    stcb->asoc.in_tsnlog[i].tsn,
3807 			    stcb->asoc.in_tsnlog[i].strm,
3808 			    stcb->asoc.in_tsnlog[i].seq,
3809 			    stcb->asoc.in_tsnlog[i].flgs,
3810 			    stcb->asoc.in_tsnlog[i].sz);
3811 		}
3812 	}
3813 none_in:
3814 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3815 	if ((stcb->asoc.tsn_out_at == 0) &&
3816 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3817 		SCTP_PRINTF("None sent\n");
3818 	}
3819 	if (stcb->asoc.tsn_out_wrapped) {
3820 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3821 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3822 			    stcb->asoc.out_tsnlog[i].tsn,
3823 			    stcb->asoc.out_tsnlog[i].strm,
3824 			    stcb->asoc.out_tsnlog[i].seq,
3825 			    stcb->asoc.out_tsnlog[i].flgs,
3826 			    stcb->asoc.out_tsnlog[i].sz);
3827 		}
3828 	}
3829 	if (stcb->asoc.tsn_out_at) {
3830 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3831 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3832 			    stcb->asoc.out_tsnlog[i].tsn,
3833 			    stcb->asoc.out_tsnlog[i].strm,
3834 			    stcb->asoc.out_tsnlog[i].seq,
3835 			    stcb->asoc.out_tsnlog[i].flgs,
3836 			    stcb->asoc.out_tsnlog[i].sz);
3837 		}
3838 	}
3839 #endif
3840 }
3841 
3842 #endif
3843 
3844 void
3845 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3846     int error, struct mbuf *op_err,
3847     int so_locked
3848 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3849     SCTP_UNUSED
3850 #endif
3851 )
3852 {
3853 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3854 	struct socket *so;
3855 
3856 #endif
3857 
3858 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3859 	so = SCTP_INP_SO(inp);
3860 #endif
3861 	if (stcb == NULL) {
3862 		/* Got to have a TCB */
3863 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3864 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3865 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3866 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3867 			}
3868 		}
3869 		return;
3870 	} else {
3871 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3872 	}
3873 	/* notify the ulp */
3874 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3875 		sctp_abort_notification(stcb, error, so_locked);
3876 	/* notify the peer */
3877 #if defined(SCTP_PANIC_ON_ABORT)
3878 	panic("aborting an association");
3879 #endif
3880 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3881 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3882 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3883 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3884 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3885 	}
3886 	/* now free the asoc */
3887 #ifdef SCTP_ASOCLOG_OF_TSNS
3888 	sctp_print_out_track_log(stcb);
3889 #endif
3890 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3891 	if (!so_locked) {
3892 		atomic_add_int(&stcb->asoc.refcnt, 1);
3893 		SCTP_TCB_UNLOCK(stcb);
3894 		SCTP_SOCKET_LOCK(so, 1);
3895 		SCTP_TCB_LOCK(stcb);
3896 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3897 	}
3898 #endif
3899 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3900 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3901 	if (!so_locked) {
3902 		SCTP_SOCKET_UNLOCK(so, 1);
3903 	}
3904 #endif
3905 }
3906 
3907 void
3908 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3909     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3910 {
3911 	struct sctp_chunkhdr *ch, chunk_buf;
3912 	unsigned int chk_length;
3913 
3914 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3915 	/* Generate a TO address for future reference */
3916 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3917 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3918 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3919 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3920 		}
3921 	}
3922 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3923 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3924 	while (ch != NULL) {
3925 		chk_length = ntohs(ch->chunk_length);
3926 		if (chk_length < sizeof(*ch)) {
3927 			/* break to abort land */
3928 			break;
3929 		}
3930 		switch (ch->chunk_type) {
3931 		case SCTP_COOKIE_ECHO:
3932 			/* We hit here only if the assoc is being freed */
3933 			return;
3934 		case SCTP_PACKET_DROPPED:
3935 			/* we don't respond to pkt-dropped */
3936 			return;
3937 		case SCTP_ABORT_ASSOCIATION:
3938 			/* we don't respond with an ABORT to an ABORT */
3939 			return;
3940 		case SCTP_SHUTDOWN_COMPLETE:
3941 			/*
3942 			 * we ignore it since we are not waiting for it and
3943 			 * peer is gone
3944 			 */
3945 			return;
3946 		case SCTP_SHUTDOWN_ACK:
3947 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
3948 			return;
3949 		default:
3950 			break;
3951 		}
3952 		offset += SCTP_SIZE32(chk_length);
3953 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3954 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3955 	}
3956 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
3957 }
3958 
3959 /*
3960  * check the inbound datagram to make sure there is not an abort inside it,
3961  * if there is return 1, else return 0.
3962  */
3963 int
3964 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3965 {
3966 	struct sctp_chunkhdr *ch;
3967 	struct sctp_init_chunk *init_chk, chunk_buf;
3968 	int offset;
3969 	unsigned int chk_length;
3970 
3971 	offset = iphlen + sizeof(struct sctphdr);
3972 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3973 	    (uint8_t *) & chunk_buf);
3974 	while (ch != NULL) {
3975 		chk_length = ntohs(ch->chunk_length);
3976 		if (chk_length < sizeof(*ch)) {
3977 			/* packet is probably corrupt */
3978 			break;
3979 		}
3980 		/* we seem to be ok, is it an abort? */
3981 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3982 			/* yep, tell them */
3983 			return (1);
3984 		}
3985 		if (ch->chunk_type == SCTP_INITIATION) {
3986 			/* need to update the Vtag */
3987 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
3988 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
3989 			if (init_chk != NULL) {
3990 				*vtagfill = ntohl(init_chk->init.initiate_tag);
3991 			}
3992 		}
3993 		/* Nope, move to the next chunk */
3994 		offset += SCTP_SIZE32(chk_length);
3995 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3996 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3997 	}
3998 	return (0);
3999 }
4000 
4001 /*
4002  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4003  * set (i.e. it's 0) so, create this function to compare link local scopes
4004  */
4005 #ifdef INET6
4006 uint32_t
4007 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4008 {
4009 	struct sockaddr_in6 a, b;
4010 
4011 	/* save copies */
4012 	a = *addr1;
4013 	b = *addr2;
4014 
4015 	if (a.sin6_scope_id == 0)
4016 		if (sa6_recoverscope(&a)) {
4017 			/* can't get scope, so can't match */
4018 			return (0);
4019 		}
4020 	if (b.sin6_scope_id == 0)
4021 		if (sa6_recoverscope(&b)) {
4022 			/* can't get scope, so can't match */
4023 			return (0);
4024 		}
4025 	if (a.sin6_scope_id != b.sin6_scope_id)
4026 		return (0);
4027 
4028 	return (1);
4029 }
4030 
4031 /*
4032  * returns a sockaddr_in6 with embedded scope recovered and removed
4033  */
4034 struct sockaddr_in6 *
4035 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4036 {
4037 	/* check and strip embedded scope junk */
4038 	if (addr->sin6_family == AF_INET6) {
4039 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4040 			if (addr->sin6_scope_id == 0) {
4041 				*store = *addr;
4042 				if (!sa6_recoverscope(store)) {
4043 					/* use the recovered scope */
4044 					addr = store;
4045 				}
4046 			} else {
4047 				/* else, return the original "to" addr */
4048 				in6_clearscope(&addr->sin6_addr);
4049 			}
4050 		}
4051 	}
4052 	return (addr);
4053 }
4054 
4055 #endif
4056 
4057 /*
4058  * are the two addresses the same?  currently a "scopeless" check returns: 1
4059  * if same, 0 if not
4060  */
4061 int
4062 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4063 {
4064 
4065 	/* must be valid */
4066 	if (sa1 == NULL || sa2 == NULL)
4067 		return (0);
4068 
4069 	/* must be the same family */
4070 	if (sa1->sa_family != sa2->sa_family)
4071 		return (0);
4072 
4073 	switch (sa1->sa_family) {
4074 #ifdef INET6
4075 	case AF_INET6:
4076 		{
4077 			/* IPv6 addresses */
4078 			struct sockaddr_in6 *sin6_1, *sin6_2;
4079 
4080 			sin6_1 = (struct sockaddr_in6 *)sa1;
4081 			sin6_2 = (struct sockaddr_in6 *)sa2;
4082 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4083 			    sin6_2));
4084 		}
4085 #endif
4086 #ifdef INET
4087 	case AF_INET:
4088 		{
4089 			/* IPv4 addresses */
4090 			struct sockaddr_in *sin_1, *sin_2;
4091 
4092 			sin_1 = (struct sockaddr_in *)sa1;
4093 			sin_2 = (struct sockaddr_in *)sa2;
4094 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4095 		}
4096 #endif
4097 	default:
4098 		/* we don't do these... */
4099 		return (0);
4100 	}
4101 }
4102 
4103 void
4104 sctp_print_address(struct sockaddr *sa)
4105 {
4106 #ifdef INET6
4107 	char ip6buf[INET6_ADDRSTRLEN];
4108 
4109 	ip6buf[0] = 0;
4110 #endif
4111 
4112 	switch (sa->sa_family) {
4113 #ifdef INET6
4114 	case AF_INET6:
4115 		{
4116 			struct sockaddr_in6 *sin6;
4117 
4118 			sin6 = (struct sockaddr_in6 *)sa;
4119 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4120 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4121 			    ntohs(sin6->sin6_port),
4122 			    sin6->sin6_scope_id);
4123 			break;
4124 		}
4125 #endif
4126 #ifdef INET
4127 	case AF_INET:
4128 		{
4129 			struct sockaddr_in *sin;
4130 			unsigned char *p;
4131 
4132 			sin = (struct sockaddr_in *)sa;
4133 			p = (unsigned char *)&sin->sin_addr;
4134 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4135 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4136 			break;
4137 		}
4138 #endif
4139 	default:
4140 		SCTP_PRINTF("?\n");
4141 		break;
4142 	}
4143 }
4144 
4145 void
4146 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4147 {
4148 	switch (iph->ip_v) {
4149 #ifdef INET
4150 	case IPVERSION:
4151 		{
4152 			struct sockaddr_in lsa, fsa;
4153 
4154 			bzero(&lsa, sizeof(lsa));
4155 			lsa.sin_len = sizeof(lsa);
4156 			lsa.sin_family = AF_INET;
4157 			lsa.sin_addr = iph->ip_src;
4158 			lsa.sin_port = sh->src_port;
4159 			bzero(&fsa, sizeof(fsa));
4160 			fsa.sin_len = sizeof(fsa);
4161 			fsa.sin_family = AF_INET;
4162 			fsa.sin_addr = iph->ip_dst;
4163 			fsa.sin_port = sh->dest_port;
4164 			SCTP_PRINTF("src: ");
4165 			sctp_print_address((struct sockaddr *)&lsa);
4166 			SCTP_PRINTF("dest: ");
4167 			sctp_print_address((struct sockaddr *)&fsa);
4168 			break;
4169 		}
4170 #endif
4171 #ifdef INET6
4172 	case IPV6_VERSION >> 4:
4173 		{
4174 			struct ip6_hdr *ip6;
4175 			struct sockaddr_in6 lsa6, fsa6;
4176 
4177 			ip6 = (struct ip6_hdr *)iph;
4178 			bzero(&lsa6, sizeof(lsa6));
4179 			lsa6.sin6_len = sizeof(lsa6);
4180 			lsa6.sin6_family = AF_INET6;
4181 			lsa6.sin6_addr = ip6->ip6_src;
4182 			lsa6.sin6_port = sh->src_port;
4183 			bzero(&fsa6, sizeof(fsa6));
4184 			fsa6.sin6_len = sizeof(fsa6);
4185 			fsa6.sin6_family = AF_INET6;
4186 			fsa6.sin6_addr = ip6->ip6_dst;
4187 			fsa6.sin6_port = sh->dest_port;
4188 			SCTP_PRINTF("src: ");
4189 			sctp_print_address((struct sockaddr *)&lsa6);
4190 			SCTP_PRINTF("dest: ");
4191 			sctp_print_address((struct sockaddr *)&fsa6);
4192 			break;
4193 		}
4194 #endif
4195 	default:
4196 		/* TSNH */
4197 		break;
4198 	}
4199 }
4200 
4201 void
4202 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4203     struct sctp_inpcb *new_inp,
4204     struct sctp_tcb *stcb,
4205     int waitflags)
4206 {
4207 	/*
4208 	 * go through our old INP and pull off any control structures that
4209 	 * belong to stcb and move then to the new inp.
4210 	 */
4211 	struct socket *old_so, *new_so;
4212 	struct sctp_queued_to_read *control, *nctl;
4213 	struct sctp_readhead tmp_queue;
4214 	struct mbuf *m;
4215 	int error = 0;
4216 
4217 	old_so = old_inp->sctp_socket;
4218 	new_so = new_inp->sctp_socket;
4219 	TAILQ_INIT(&tmp_queue);
4220 	error = sblock(&old_so->so_rcv, waitflags);
4221 	if (error) {
4222 		/*
4223 		 * Gak, can't get sblock, we have a problem. data will be
4224 		 * left stranded.. and we don't dare look at it since the
4225 		 * other thread may be reading something. Oh well, its a
4226 		 * screwed up app that does a peeloff OR a accept while
4227 		 * reading from the main socket... actually its only the
4228 		 * peeloff() case, since I think read will fail on a
4229 		 * listening socket..
4230 		 */
4231 		return;
4232 	}
4233 	/* lock the socket buffers */
4234 	SCTP_INP_READ_LOCK(old_inp);
4235 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4236 		/* Pull off all for out target stcb */
4237 		if (control->stcb == stcb) {
4238 			/* remove it we want it */
4239 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4240 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4241 			m = control->data;
4242 			while (m) {
4243 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4244 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4245 				}
4246 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4247 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4248 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4249 				}
4250 				m = SCTP_BUF_NEXT(m);
4251 			}
4252 		}
4253 	}
4254 	SCTP_INP_READ_UNLOCK(old_inp);
4255 	/* Remove the sb-lock on the old socket */
4256 
4257 	sbunlock(&old_so->so_rcv);
4258 	/* Now we move them over to the new socket buffer */
4259 	SCTP_INP_READ_LOCK(new_inp);
4260 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4261 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4262 		m = control->data;
4263 		while (m) {
4264 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4265 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4266 			}
4267 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4268 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4269 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4270 			}
4271 			m = SCTP_BUF_NEXT(m);
4272 		}
4273 	}
4274 	SCTP_INP_READ_UNLOCK(new_inp);
4275 }
4276 
4277 void
4278 sctp_add_to_readq(struct sctp_inpcb *inp,
4279     struct sctp_tcb *stcb,
4280     struct sctp_queued_to_read *control,
4281     struct sockbuf *sb,
4282     int end,
4283     int inp_read_lock_held,
4284     int so_locked
4285 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4286     SCTP_UNUSED
4287 #endif
4288 )
4289 {
4290 	/*
4291 	 * Here we must place the control on the end of the socket read
4292 	 * queue AND increment sb_cc so that select will work properly on
4293 	 * read.
4294 	 */
4295 	struct mbuf *m, *prev = NULL;
4296 
4297 	if (inp == NULL) {
4298 		/* Gak, TSNH!! */
4299 #ifdef INVARIANTS
4300 		panic("Gak, inp NULL on add_to_readq");
4301 #endif
4302 		return;
4303 	}
4304 	if (inp_read_lock_held == 0)
4305 		SCTP_INP_READ_LOCK(inp);
4306 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4307 		sctp_free_remote_addr(control->whoFrom);
4308 		if (control->data) {
4309 			sctp_m_freem(control->data);
4310 			control->data = NULL;
4311 		}
4312 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4313 		if (inp_read_lock_held == 0)
4314 			SCTP_INP_READ_UNLOCK(inp);
4315 		return;
4316 	}
4317 	if (!(control->spec_flags & M_NOTIFICATION)) {
4318 		atomic_add_int(&inp->total_recvs, 1);
4319 		if (!control->do_not_ref_stcb) {
4320 			atomic_add_int(&stcb->total_recvs, 1);
4321 		}
4322 	}
4323 	m = control->data;
4324 	control->held_length = 0;
4325 	control->length = 0;
4326 	while (m) {
4327 		if (SCTP_BUF_LEN(m) == 0) {
4328 			/* Skip mbufs with NO length */
4329 			if (prev == NULL) {
4330 				/* First one */
4331 				control->data = sctp_m_free(m);
4332 				m = control->data;
4333 			} else {
4334 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4335 				m = SCTP_BUF_NEXT(prev);
4336 			}
4337 			if (m == NULL) {
4338 				control->tail_mbuf = prev;
4339 			}
4340 			continue;
4341 		}
4342 		prev = m;
4343 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4344 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4345 		}
4346 		sctp_sballoc(stcb, sb, m);
4347 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4348 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4349 		}
4350 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4351 		m = SCTP_BUF_NEXT(m);
4352 	}
4353 	if (prev != NULL) {
4354 		control->tail_mbuf = prev;
4355 	} else {
4356 		/* Everything got collapsed out?? */
4357 		sctp_free_remote_addr(control->whoFrom);
4358 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4359 		if (inp_read_lock_held == 0)
4360 			SCTP_INP_READ_UNLOCK(inp);
4361 		return;
4362 	}
4363 	if (end) {
4364 		control->end_added = 1;
4365 	}
4366 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4367 	if (inp_read_lock_held == 0)
4368 		SCTP_INP_READ_UNLOCK(inp);
4369 	if (inp && inp->sctp_socket) {
4370 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4371 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4372 		} else {
4373 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4374 			struct socket *so;
4375 
4376 			so = SCTP_INP_SO(inp);
4377 			if (!so_locked) {
4378 				atomic_add_int(&stcb->asoc.refcnt, 1);
4379 				SCTP_TCB_UNLOCK(stcb);
4380 				SCTP_SOCKET_LOCK(so, 1);
4381 				SCTP_TCB_LOCK(stcb);
4382 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4383 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4384 					SCTP_SOCKET_UNLOCK(so, 1);
4385 					return;
4386 				}
4387 			}
4388 #endif
4389 			sctp_sorwakeup(inp, inp->sctp_socket);
4390 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4391 			if (!so_locked) {
4392 				SCTP_SOCKET_UNLOCK(so, 1);
4393 			}
4394 #endif
4395 		}
4396 	}
4397 }
4398 
4399 
4400 int
4401 sctp_append_to_readq(struct sctp_inpcb *inp,
4402     struct sctp_tcb *stcb,
4403     struct sctp_queued_to_read *control,
4404     struct mbuf *m,
4405     int end,
4406     int ctls_cumack,
4407     struct sockbuf *sb)
4408 {
4409 	/*
4410 	 * A partial delivery API event is underway. OR we are appending on
4411 	 * the reassembly queue.
4412 	 *
4413 	 * If PDAPI this means we need to add m to the end of the data.
4414 	 * Increase the length in the control AND increment the sb_cc.
4415 	 * Otherwise sb is NULL and all we need to do is put it at the end
4416 	 * of the mbuf chain.
4417 	 */
4418 	int len = 0;
4419 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4420 
4421 	if (inp) {
4422 		SCTP_INP_READ_LOCK(inp);
4423 	}
4424 	if (control == NULL) {
4425 get_out:
4426 		if (inp) {
4427 			SCTP_INP_READ_UNLOCK(inp);
4428 		}
4429 		return (-1);
4430 	}
4431 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4432 		SCTP_INP_READ_UNLOCK(inp);
4433 		return 0;
4434 	}
4435 	if (control->end_added) {
4436 		/* huh this one is complete? */
4437 		goto get_out;
4438 	}
4439 	mm = m;
4440 	if (mm == NULL) {
4441 		goto get_out;
4442 	}
4443 	while (mm) {
4444 		if (SCTP_BUF_LEN(mm) == 0) {
4445 			/* Skip mbufs with NO lenght */
4446 			if (prev == NULL) {
4447 				/* First one */
4448 				m = sctp_m_free(mm);
4449 				mm = m;
4450 			} else {
4451 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4452 				mm = SCTP_BUF_NEXT(prev);
4453 			}
4454 			continue;
4455 		}
4456 		prev = mm;
4457 		len += SCTP_BUF_LEN(mm);
4458 		if (sb) {
4459 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4460 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4461 			}
4462 			sctp_sballoc(stcb, sb, mm);
4463 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4464 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4465 			}
4466 		}
4467 		mm = SCTP_BUF_NEXT(mm);
4468 	}
4469 	if (prev) {
4470 		tail = prev;
4471 	} else {
4472 		/* Really there should always be a prev */
4473 		if (m == NULL) {
4474 			/* Huh nothing left? */
4475 #ifdef INVARIANTS
4476 			panic("Nothing left to add?");
4477 #else
4478 			goto get_out;
4479 #endif
4480 		}
4481 		tail = m;
4482 	}
4483 	if (control->tail_mbuf) {
4484 		/* append */
4485 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4486 		control->tail_mbuf = tail;
4487 	} else {
4488 		/* nothing there */
4489 #ifdef INVARIANTS
4490 		if (control->data != NULL) {
4491 			panic("This should NOT happen");
4492 		}
4493 #endif
4494 		control->data = m;
4495 		control->tail_mbuf = tail;
4496 	}
4497 	atomic_add_int(&control->length, len);
4498 	if (end) {
4499 		/* message is complete */
4500 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4501 			stcb->asoc.control_pdapi = NULL;
4502 		}
4503 		control->held_length = 0;
4504 		control->end_added = 1;
4505 	}
4506 	if (stcb == NULL) {
4507 		control->do_not_ref_stcb = 1;
4508 	}
4509 	/*
4510 	 * When we are appending in partial delivery, the cum-ack is used
4511 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4512 	 * is populated in the outbound sinfo structure from the true cumack
4513 	 * if the association exists...
4514 	 */
4515 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4516 	if (inp) {
4517 		SCTP_INP_READ_UNLOCK(inp);
4518 	}
4519 	if (inp && inp->sctp_socket) {
4520 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4521 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4522 		} else {
4523 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4524 			struct socket *so;
4525 
4526 			so = SCTP_INP_SO(inp);
4527 			atomic_add_int(&stcb->asoc.refcnt, 1);
4528 			SCTP_TCB_UNLOCK(stcb);
4529 			SCTP_SOCKET_LOCK(so, 1);
4530 			SCTP_TCB_LOCK(stcb);
4531 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4532 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4533 				SCTP_SOCKET_UNLOCK(so, 1);
4534 				return (0);
4535 			}
4536 #endif
4537 			sctp_sorwakeup(inp, inp->sctp_socket);
4538 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4539 			SCTP_SOCKET_UNLOCK(so, 1);
4540 #endif
4541 		}
4542 	}
4543 	return (0);
4544 }
4545 
4546 
4547 
4548 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4549  *************ALTERNATE ROUTING CODE
4550  */
4551 
4552 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4553  *************ALTERNATE ROUTING CODE
4554  */
4555 
4556 struct mbuf *
4557 sctp_generate_invmanparam(int err)
4558 {
4559 	/* Return a MBUF with a invalid mandatory parameter */
4560 	struct mbuf *m;
4561 
4562 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4563 	if (m) {
4564 		struct sctp_paramhdr *ph;
4565 
4566 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4567 		ph = mtod(m, struct sctp_paramhdr *);
4568 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4569 		ph->param_type = htons(err);
4570 	}
4571 	return (m);
4572 }
4573 
4574 #ifdef SCTP_MBCNT_LOGGING
4575 void
4576 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4577     struct sctp_tmit_chunk *tp1, int chk_cnt)
4578 {
4579 	if (tp1->data == NULL) {
4580 		return;
4581 	}
4582 	asoc->chunks_on_out_queue -= chk_cnt;
4583 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4584 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4585 		    asoc->total_output_queue_size,
4586 		    tp1->book_size,
4587 		    0,
4588 		    tp1->mbcnt);
4589 	}
4590 	if (asoc->total_output_queue_size >= tp1->book_size) {
4591 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4592 	} else {
4593 		asoc->total_output_queue_size = 0;
4594 	}
4595 
4596 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4597 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4598 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4599 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4600 		} else {
4601 			stcb->sctp_socket->so_snd.sb_cc = 0;
4602 
4603 		}
4604 	}
4605 }
4606 
4607 #endif
4608 
4609 int
4610 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4611     int reason, int so_locked
4612 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4613     SCTP_UNUSED
4614 #endif
4615 )
4616 {
4617 	struct sctp_stream_out *strq;
4618 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4619 	struct sctp_stream_queue_pending *sp;
4620 	uint16_t stream = 0, seq = 0;
4621 	uint8_t foundeom = 0;
4622 	int ret_sz = 0;
4623 	int notdone;
4624 	int do_wakeup_routine = 0;
4625 
4626 	stream = tp1->rec.data.stream_number;
4627 	seq = tp1->rec.data.stream_seq;
4628 	do {
4629 		ret_sz += tp1->book_size;
4630 		if (tp1->data != NULL) {
4631 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4632 				sctp_flight_size_decrease(tp1);
4633 				sctp_total_flight_decrease(stcb, tp1);
4634 			}
4635 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4636 			stcb->asoc.peers_rwnd += tp1->send_size;
4637 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4638 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4639 			if (tp1->data) {
4640 				sctp_m_freem(tp1->data);
4641 				tp1->data = NULL;
4642 			}
4643 			do_wakeup_routine = 1;
4644 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4645 				stcb->asoc.sent_queue_cnt_removeable--;
4646 			}
4647 		}
4648 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4649 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4650 		    SCTP_DATA_NOT_FRAG) {
4651 			/* not frag'ed we ae done   */
4652 			notdone = 0;
4653 			foundeom = 1;
4654 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4655 			/* end of frag, we are done */
4656 			notdone = 0;
4657 			foundeom = 1;
4658 		} else {
4659 			/*
4660 			 * Its a begin or middle piece, we must mark all of
4661 			 * it
4662 			 */
4663 			notdone = 1;
4664 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4665 		}
4666 	} while (tp1 && notdone);
4667 	if (foundeom == 0) {
4668 		/*
4669 		 * The multi-part message was scattered across the send and
4670 		 * sent queue.
4671 		 */
4672 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4673 			if ((tp1->rec.data.stream_number != stream) ||
4674 			    (tp1->rec.data.stream_seq != seq)) {
4675 				break;
4676 			}
4677 			/*
4678 			 * save to chk in case we have some on stream out
4679 			 * queue. If so and we have an un-transmitted one we
4680 			 * don't have to fudge the TSN.
4681 			 */
4682 			chk = tp1;
4683 			ret_sz += tp1->book_size;
4684 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4685 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4686 			if (tp1->data) {
4687 				sctp_m_freem(tp1->data);
4688 				tp1->data = NULL;
4689 			}
4690 			/* No flight involved here book the size to 0 */
4691 			tp1->book_size = 0;
4692 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4693 				foundeom = 1;
4694 			}
4695 			do_wakeup_routine = 1;
4696 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4697 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4698 			/*
4699 			 * on to the sent queue so we can wait for it to be
4700 			 * passed by.
4701 			 */
4702 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4703 			    sctp_next);
4704 			stcb->asoc.send_queue_cnt--;
4705 			stcb->asoc.sent_queue_cnt++;
4706 		}
4707 	}
4708 	if (foundeom == 0) {
4709 		/*
4710 		 * Still no eom found. That means there is stuff left on the
4711 		 * stream out queue.. yuck.
4712 		 */
4713 		strq = &stcb->asoc.strmout[stream];
4714 		SCTP_TCB_SEND_LOCK(stcb);
4715 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4716 			/* FIXME: Shouldn't this be a serial number check? */
4717 			if (sp->strseq > seq) {
4718 				break;
4719 			}
4720 			/* Check if its our SEQ */
4721 			if (sp->strseq == seq) {
4722 				sp->discard_rest = 1;
4723 				/*
4724 				 * We may need to put a chunk on the queue
4725 				 * that holds the TSN that would have been
4726 				 * sent with the LAST bit.
4727 				 */
4728 				if (chk == NULL) {
4729 					/* Yep, we have to */
4730 					sctp_alloc_a_chunk(stcb, chk);
4731 					if (chk == NULL) {
4732 						/*
4733 						 * we are hosed. All we can
4734 						 * do is nothing.. which
4735 						 * will cause an abort if
4736 						 * the peer is paying
4737 						 * attention.
4738 						 */
4739 						goto oh_well;
4740 					}
4741 					memset(chk, 0, sizeof(*chk));
4742 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4743 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4744 					chk->asoc = &stcb->asoc;
4745 					chk->rec.data.stream_seq = sp->strseq;
4746 					chk->rec.data.stream_number = sp->stream;
4747 					chk->rec.data.payloadtype = sp->ppid;
4748 					chk->rec.data.context = sp->context;
4749 					chk->flags = sp->act_flags;
4750 					if (sp->net)
4751 						chk->whoTo = sp->net;
4752 					else
4753 						chk->whoTo = stcb->asoc.primary_destination;
4754 					atomic_add_int(&chk->whoTo->ref_count, 1);
4755 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4756 					stcb->asoc.pr_sctp_cnt++;
4757 					chk->pr_sctp_on = 1;
4758 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4759 					stcb->asoc.sent_queue_cnt++;
4760 					stcb->asoc.pr_sctp_cnt++;
4761 				} else {
4762 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4763 				}
4764 		oh_well:
4765 				if (sp->data) {
4766 					/*
4767 					 * Pull any data to free up the SB
4768 					 * and allow sender to "add more"
4769 					 * whilc we will throw away :-)
4770 					 */
4771 					sctp_free_spbufspace(stcb, &stcb->asoc,
4772 					    sp);
4773 					ret_sz += sp->length;
4774 					do_wakeup_routine = 1;
4775 					sp->some_taken = 1;
4776 					sctp_m_freem(sp->data);
4777 					sp->length = 0;
4778 					sp->data = NULL;
4779 					sp->tail_mbuf = NULL;
4780 				}
4781 				break;
4782 			}
4783 		}		/* End tailq_foreach */
4784 		SCTP_TCB_SEND_UNLOCK(stcb);
4785 	}
4786 	if (do_wakeup_routine) {
4787 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4788 		struct socket *so;
4789 
4790 		so = SCTP_INP_SO(stcb->sctp_ep);
4791 		if (!so_locked) {
4792 			atomic_add_int(&stcb->asoc.refcnt, 1);
4793 			SCTP_TCB_UNLOCK(stcb);
4794 			SCTP_SOCKET_LOCK(so, 1);
4795 			SCTP_TCB_LOCK(stcb);
4796 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4797 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4798 				/* assoc was freed while we were unlocked */
4799 				SCTP_SOCKET_UNLOCK(so, 1);
4800 				return (ret_sz);
4801 			}
4802 		}
4803 #endif
4804 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4805 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4806 		if (!so_locked) {
4807 			SCTP_SOCKET_UNLOCK(so, 1);
4808 		}
4809 #endif
4810 	}
4811 	return (ret_sz);
4812 }
4813 
4814 /*
4815  * checks to see if the given address, sa, is one that is currently known by
4816  * the kernel note: can't distinguish the same address on multiple interfaces
4817  * and doesn't handle multiple addresses with different zone/scope id's note:
4818  * ifa_ifwithaddr() compares the entire sockaddr struct
4819  */
4820 struct sctp_ifa *
4821 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4822     int holds_lock)
4823 {
4824 	struct sctp_laddr *laddr;
4825 
4826 	if (holds_lock == 0) {
4827 		SCTP_INP_RLOCK(inp);
4828 	}
4829 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4830 		if (laddr->ifa == NULL)
4831 			continue;
4832 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4833 			continue;
4834 #ifdef INET
4835 		if (addr->sa_family == AF_INET) {
4836 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4837 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4838 				/* found him. */
4839 				if (holds_lock == 0) {
4840 					SCTP_INP_RUNLOCK(inp);
4841 				}
4842 				return (laddr->ifa);
4843 				break;
4844 			}
4845 		}
4846 #endif
4847 #ifdef INET6
4848 		if (addr->sa_family == AF_INET6) {
4849 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4850 			    &laddr->ifa->address.sin6)) {
4851 				/* found him. */
4852 				if (holds_lock == 0) {
4853 					SCTP_INP_RUNLOCK(inp);
4854 				}
4855 				return (laddr->ifa);
4856 				break;
4857 			}
4858 		}
4859 #endif
4860 	}
4861 	if (holds_lock == 0) {
4862 		SCTP_INP_RUNLOCK(inp);
4863 	}
4864 	return (NULL);
4865 }
4866 
4867 uint32_t
4868 sctp_get_ifa_hash_val(struct sockaddr *addr)
4869 {
4870 	switch (addr->sa_family) {
4871 #ifdef INET
4872 	case AF_INET:
4873 		{
4874 			struct sockaddr_in *sin;
4875 
4876 			sin = (struct sockaddr_in *)addr;
4877 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4878 		}
4879 #endif
4880 #ifdef INET6
4881 	case INET6:
4882 		{
4883 			struct sockaddr_in6 *sin6;
4884 			uint32_t hash_of_addr;
4885 
4886 			sin6 = (struct sockaddr_in6 *)addr;
4887 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4888 			    sin6->sin6_addr.s6_addr32[1] +
4889 			    sin6->sin6_addr.s6_addr32[2] +
4890 			    sin6->sin6_addr.s6_addr32[3]);
4891 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4892 			return (hash_of_addr);
4893 		}
4894 #endif
4895 	default:
4896 		break;
4897 	}
4898 	return (0);
4899 }
4900 
4901 struct sctp_ifa *
4902 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4903 {
4904 	struct sctp_ifa *sctp_ifap;
4905 	struct sctp_vrf *vrf;
4906 	struct sctp_ifalist *hash_head;
4907 	uint32_t hash_of_addr;
4908 
4909 	if (holds_lock == 0)
4910 		SCTP_IPI_ADDR_RLOCK();
4911 
4912 	vrf = sctp_find_vrf(vrf_id);
4913 	if (vrf == NULL) {
4914 stage_right:
4915 		if (holds_lock == 0)
4916 			SCTP_IPI_ADDR_RUNLOCK();
4917 		return (NULL);
4918 	}
4919 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4920 
4921 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4922 	if (hash_head == NULL) {
4923 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4924 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4925 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4926 		sctp_print_address(addr);
4927 		SCTP_PRINTF("No such bucket for address\n");
4928 		if (holds_lock == 0)
4929 			SCTP_IPI_ADDR_RUNLOCK();
4930 
4931 		return (NULL);
4932 	}
4933 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4934 		if (sctp_ifap == NULL) {
4935 #ifdef INVARIANTS
4936 			panic("Huh LIST_FOREACH corrupt");
4937 			goto stage_right;
4938 #else
4939 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4940 			goto stage_right;
4941 #endif
4942 		}
4943 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4944 			continue;
4945 #ifdef INET
4946 		if (addr->sa_family == AF_INET) {
4947 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4948 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4949 				/* found him. */
4950 				if (holds_lock == 0)
4951 					SCTP_IPI_ADDR_RUNLOCK();
4952 				return (sctp_ifap);
4953 				break;
4954 			}
4955 		}
4956 #endif
4957 #ifdef INET6
4958 		if (addr->sa_family == AF_INET6) {
4959 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4960 			    &sctp_ifap->address.sin6)) {
4961 				/* found him. */
4962 				if (holds_lock == 0)
4963 					SCTP_IPI_ADDR_RUNLOCK();
4964 				return (sctp_ifap);
4965 				break;
4966 			}
4967 		}
4968 #endif
4969 	}
4970 	if (holds_lock == 0)
4971 		SCTP_IPI_ADDR_RUNLOCK();
4972 	return (NULL);
4973 }
4974 
4975 static void
4976 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4977     uint32_t rwnd_req)
4978 {
4979 	/* User pulled some data, do we need a rwnd update? */
4980 	int r_unlocked = 0;
4981 	uint32_t dif, rwnd;
4982 	struct socket *so = NULL;
4983 
4984 	if (stcb == NULL)
4985 		return;
4986 
4987 	atomic_add_int(&stcb->asoc.refcnt, 1);
4988 
4989 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4990 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4991 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4992 		/* Pre-check If we are freeing no update */
4993 		goto no_lock;
4994 	}
4995 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4996 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4997 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4998 		goto out;
4999 	}
5000 	so = stcb->sctp_socket;
5001 	if (so == NULL) {
5002 		goto out;
5003 	}
5004 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5005 	/* Have you have freed enough to look */
5006 	*freed_so_far = 0;
5007 	/* Yep, its worth a look and the lock overhead */
5008 
5009 	/* Figure out what the rwnd would be */
5010 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5011 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5012 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5013 	} else {
5014 		dif = 0;
5015 	}
5016 	if (dif >= rwnd_req) {
5017 		if (hold_rlock) {
5018 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5019 			r_unlocked = 1;
5020 		}
5021 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5022 			/*
5023 			 * One last check before we allow the guy possibly
5024 			 * to get in. There is a race, where the guy has not
5025 			 * reached the gate. In that case
5026 			 */
5027 			goto out;
5028 		}
5029 		SCTP_TCB_LOCK(stcb);
5030 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5031 			/* No reports here */
5032 			SCTP_TCB_UNLOCK(stcb);
5033 			goto out;
5034 		}
5035 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5036 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5037 
5038 		sctp_chunk_output(stcb->sctp_ep, stcb,
5039 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5040 		/* make sure no timer is running */
5041 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5042 		SCTP_TCB_UNLOCK(stcb);
5043 	} else {
5044 		/* Update how much we have pending */
5045 		stcb->freed_by_sorcv_sincelast = dif;
5046 	}
5047 out:
5048 	if (so && r_unlocked && hold_rlock) {
5049 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5050 	}
5051 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5052 no_lock:
5053 	atomic_add_int(&stcb->asoc.refcnt, -1);
5054 	return;
5055 }
5056 
5057 int
5058 sctp_sorecvmsg(struct socket *so,
5059     struct uio *uio,
5060     struct mbuf **mp,
5061     struct sockaddr *from,
5062     int fromlen,
5063     int *msg_flags,
5064     struct sctp_sndrcvinfo *sinfo,
5065     int filling_sinfo)
5066 {
5067 	/*
5068 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5069 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5070 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5071 	 * On the way out we may send out any combination of:
5072 	 * MSG_NOTIFICATION MSG_EOR
5073 	 *
5074 	 */
5075 	struct sctp_inpcb *inp = NULL;
5076 	int my_len = 0;
5077 	int cp_len = 0, error = 0;
5078 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5079 	struct mbuf *m = NULL;
5080 	struct sctp_tcb *stcb = NULL;
5081 	int wakeup_read_socket = 0;
5082 	int freecnt_applied = 0;
5083 	int out_flags = 0, in_flags = 0;
5084 	int block_allowed = 1;
5085 	uint32_t freed_so_far = 0;
5086 	uint32_t copied_so_far = 0;
5087 	int in_eeor_mode = 0;
5088 	int no_rcv_needed = 0;
5089 	uint32_t rwnd_req = 0;
5090 	int hold_sblock = 0;
5091 	int hold_rlock = 0;
5092 	int slen = 0;
5093 	uint32_t held_length = 0;
5094 	int sockbuf_lock = 0;
5095 
5096 	if (uio == NULL) {
5097 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5098 		return (EINVAL);
5099 	}
5100 	if (msg_flags) {
5101 		in_flags = *msg_flags;
5102 		if (in_flags & MSG_PEEK)
5103 			SCTP_STAT_INCR(sctps_read_peeks);
5104 	} else {
5105 		in_flags = 0;
5106 	}
5107 	slen = uio->uio_resid;
5108 
5109 	/* Pull in and set up our int flags */
5110 	if (in_flags & MSG_OOB) {
5111 		/* Out of band's NOT supported */
5112 		return (EOPNOTSUPP);
5113 	}
5114 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5115 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5116 		return (EINVAL);
5117 	}
5118 	if ((in_flags & (MSG_DONTWAIT
5119 	    | MSG_NBIO
5120 	    )) ||
5121 	    SCTP_SO_IS_NBIO(so)) {
5122 		block_allowed = 0;
5123 	}
5124 	/* setup the endpoint */
5125 	inp = (struct sctp_inpcb *)so->so_pcb;
5126 	if (inp == NULL) {
5127 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5128 		return (EFAULT);
5129 	}
5130 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5131 	/* Must be at least a MTU's worth */
5132 	if (rwnd_req < SCTP_MIN_RWND)
5133 		rwnd_req = SCTP_MIN_RWND;
5134 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5135 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5136 		sctp_misc_ints(SCTP_SORECV_ENTER,
5137 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5138 	}
5139 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5140 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5141 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5142 	}
5143 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5144 	sockbuf_lock = 1;
5145 	if (error) {
5146 		goto release_unlocked;
5147 	}
5148 restart:
5149 
5150 
5151 restart_nosblocks:
5152 	if (hold_sblock == 0) {
5153 		SOCKBUF_LOCK(&so->so_rcv);
5154 		hold_sblock = 1;
5155 	}
5156 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5157 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5158 		goto out;
5159 	}
5160 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5161 		if (so->so_error) {
5162 			error = so->so_error;
5163 			if ((in_flags & MSG_PEEK) == 0)
5164 				so->so_error = 0;
5165 			goto out;
5166 		} else {
5167 			if (so->so_rcv.sb_cc == 0) {
5168 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5169 				/* indicate EOF */
5170 				error = 0;
5171 				goto out;
5172 			}
5173 		}
5174 	}
5175 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5176 		/* we need to wait for data */
5177 		if ((so->so_rcv.sb_cc == 0) &&
5178 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5179 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5180 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5181 				/*
5182 				 * For active open side clear flags for
5183 				 * re-use passive open is blocked by
5184 				 * connect.
5185 				 */
5186 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5187 					/*
5188 					 * You were aborted, passive side
5189 					 * always hits here
5190 					 */
5191 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5192 					error = ECONNRESET;
5193 					/*
5194 					 * You get this once if you are
5195 					 * active open side
5196 					 */
5197 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5198 						/*
5199 						 * Remove flag if on the
5200 						 * active open side
5201 						 */
5202 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5203 					}
5204 				}
5205 				so->so_state &= ~(SS_ISCONNECTING |
5206 				    SS_ISDISCONNECTING |
5207 				    SS_ISCONFIRMING |
5208 				    SS_ISCONNECTED);
5209 				if (error == 0) {
5210 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5211 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5212 						error = ENOTCONN;
5213 					} else {
5214 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5215 					}
5216 				}
5217 				goto out;
5218 			}
5219 		}
5220 		error = sbwait(&so->so_rcv);
5221 		if (error) {
5222 			goto out;
5223 		}
5224 		held_length = 0;
5225 		goto restart_nosblocks;
5226 	} else if (so->so_rcv.sb_cc == 0) {
5227 		if (so->so_error) {
5228 			error = so->so_error;
5229 			if ((in_flags & MSG_PEEK) == 0)
5230 				so->so_error = 0;
5231 		} else {
5232 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5233 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5234 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5235 					/*
5236 					 * For active open side clear flags
5237 					 * for re-use passive open is
5238 					 * blocked by connect.
5239 					 */
5240 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5241 						/*
5242 						 * You were aborted, passive
5243 						 * side always hits here
5244 						 */
5245 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5246 						error = ECONNRESET;
5247 						/*
5248 						 * You get this once if you
5249 						 * are active open side
5250 						 */
5251 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5252 							/*
5253 							 * Remove flag if on
5254 							 * the active open
5255 							 * side
5256 							 */
5257 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5258 						}
5259 					}
5260 					so->so_state &= ~(SS_ISCONNECTING |
5261 					    SS_ISDISCONNECTING |
5262 					    SS_ISCONFIRMING |
5263 					    SS_ISCONNECTED);
5264 					if (error == 0) {
5265 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5266 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5267 							error = ENOTCONN;
5268 						} else {
5269 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5270 						}
5271 					}
5272 					goto out;
5273 				}
5274 			}
5275 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5276 			error = EWOULDBLOCK;
5277 		}
5278 		goto out;
5279 	}
5280 	if (hold_sblock == 1) {
5281 		SOCKBUF_UNLOCK(&so->so_rcv);
5282 		hold_sblock = 0;
5283 	}
5284 	/* we possibly have data we can read */
5285 	/* sa_ignore FREED_MEMORY */
5286 	control = TAILQ_FIRST(&inp->read_queue);
5287 	if (control == NULL) {
5288 		/*
5289 		 * This could be happening since the appender did the
5290 		 * increment but as not yet did the tailq insert onto the
5291 		 * read_queue
5292 		 */
5293 		if (hold_rlock == 0) {
5294 			SCTP_INP_READ_LOCK(inp);
5295 			hold_rlock = 1;
5296 		}
5297 		control = TAILQ_FIRST(&inp->read_queue);
5298 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5299 #ifdef INVARIANTS
5300 			panic("Huh, its non zero and nothing on control?");
5301 #endif
5302 			so->so_rcv.sb_cc = 0;
5303 		}
5304 		SCTP_INP_READ_UNLOCK(inp);
5305 		hold_rlock = 0;
5306 		goto restart;
5307 	}
5308 	if ((control->length == 0) &&
5309 	    (control->do_not_ref_stcb)) {
5310 		/*
5311 		 * Clean up code for freeing assoc that left behind a
5312 		 * pdapi.. maybe a peer in EEOR that just closed after
5313 		 * sending and never indicated a EOR.
5314 		 */
5315 		if (hold_rlock == 0) {
5316 			hold_rlock = 1;
5317 			SCTP_INP_READ_LOCK(inp);
5318 		}
5319 		control->held_length = 0;
5320 		if (control->data) {
5321 			/* Hmm there is data here .. fix */
5322 			struct mbuf *m_tmp;
5323 			int cnt = 0;
5324 
5325 			m_tmp = control->data;
5326 			while (m_tmp) {
5327 				cnt += SCTP_BUF_LEN(m_tmp);
5328 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5329 					control->tail_mbuf = m_tmp;
5330 					control->end_added = 1;
5331 				}
5332 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5333 			}
5334 			control->length = cnt;
5335 		} else {
5336 			/* remove it */
5337 			TAILQ_REMOVE(&inp->read_queue, control, next);
5338 			/* Add back any hiddend data */
5339 			sctp_free_remote_addr(control->whoFrom);
5340 			sctp_free_a_readq(stcb, control);
5341 		}
5342 		if (hold_rlock) {
5343 			hold_rlock = 0;
5344 			SCTP_INP_READ_UNLOCK(inp);
5345 		}
5346 		goto restart;
5347 	}
5348 	if ((control->length == 0) &&
5349 	    (control->end_added == 1)) {
5350 		/*
5351 		 * Do we also need to check for (control->pdapi_aborted ==
5352 		 * 1)?
5353 		 */
5354 		if (hold_rlock == 0) {
5355 			hold_rlock = 1;
5356 			SCTP_INP_READ_LOCK(inp);
5357 		}
5358 		TAILQ_REMOVE(&inp->read_queue, control, next);
5359 		if (control->data) {
5360 #ifdef INVARIANTS
5361 			panic("control->data not null but control->length == 0");
5362 #else
5363 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5364 			sctp_m_freem(control->data);
5365 			control->data = NULL;
5366 #endif
5367 		}
5368 		if (control->aux_data) {
5369 			sctp_m_free(control->aux_data);
5370 			control->aux_data = NULL;
5371 		}
5372 		sctp_free_remote_addr(control->whoFrom);
5373 		sctp_free_a_readq(stcb, control);
5374 		if (hold_rlock) {
5375 			hold_rlock = 0;
5376 			SCTP_INP_READ_UNLOCK(inp);
5377 		}
5378 		goto restart;
5379 	}
5380 	if (control->length == 0) {
5381 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5382 		    (filling_sinfo)) {
5383 			/* find a more suitable one then this */
5384 			ctl = TAILQ_NEXT(control, next);
5385 			while (ctl) {
5386 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5387 				    (ctl->some_taken ||
5388 				    (ctl->spec_flags & M_NOTIFICATION) ||
5389 				    ((ctl->do_not_ref_stcb == 0) &&
5390 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5391 				    ) {
5392 					/*-
5393 					 * If we have a different TCB next, and there is data
5394 					 * present. If we have already taken some (pdapi), OR we can
5395 					 * ref the tcb and no delivery as started on this stream, we
5396 					 * take it. Note we allow a notification on a different
5397 					 * assoc to be delivered..
5398 					 */
5399 					control = ctl;
5400 					goto found_one;
5401 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5402 					    (ctl->length) &&
5403 					    ((ctl->some_taken) ||
5404 					    ((ctl->do_not_ref_stcb == 0) &&
5405 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5406 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5407 					/*-
5408 					 * If we have the same tcb, and there is data present, and we
5409 					 * have the strm interleave feature present. Then if we have
5410 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5411 					 * not started a delivery for this stream, we can take it.
5412 					 * Note we do NOT allow a notificaiton on the same assoc to
5413 					 * be delivered.
5414 					 */
5415 					control = ctl;
5416 					goto found_one;
5417 				}
5418 				ctl = TAILQ_NEXT(ctl, next);
5419 			}
5420 		}
5421 		/*
5422 		 * if we reach here, not suitable replacement is available
5423 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5424 		 * into the our held count, and its time to sleep again.
5425 		 */
5426 		held_length = so->so_rcv.sb_cc;
5427 		control->held_length = so->so_rcv.sb_cc;
5428 		goto restart;
5429 	}
5430 	/* Clear the held length since there is something to read */
5431 	control->held_length = 0;
5432 	if (hold_rlock) {
5433 		SCTP_INP_READ_UNLOCK(inp);
5434 		hold_rlock = 0;
5435 	}
5436 found_one:
5437 	/*
5438 	 * If we reach here, control has a some data for us to read off.
5439 	 * Note that stcb COULD be NULL.
5440 	 */
5441 	control->some_taken++;
5442 	if (hold_sblock) {
5443 		SOCKBUF_UNLOCK(&so->so_rcv);
5444 		hold_sblock = 0;
5445 	}
5446 	stcb = control->stcb;
5447 	if (stcb) {
5448 		if ((control->do_not_ref_stcb == 0) &&
5449 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5450 			if (freecnt_applied == 0)
5451 				stcb = NULL;
5452 		} else if (control->do_not_ref_stcb == 0) {
5453 			/* you can't free it on me please */
5454 			/*
5455 			 * The lock on the socket buffer protects us so the
5456 			 * free code will stop. But since we used the
5457 			 * socketbuf lock and the sender uses the tcb_lock
5458 			 * to increment, we need to use the atomic add to
5459 			 * the refcnt
5460 			 */
5461 			if (freecnt_applied) {
5462 #ifdef INVARIANTS
5463 				panic("refcnt already incremented");
5464 #else
5465 				printf("refcnt already incremented?\n");
5466 #endif
5467 			} else {
5468 				atomic_add_int(&stcb->asoc.refcnt, 1);
5469 				freecnt_applied = 1;
5470 			}
5471 			/*
5472 			 * Setup to remember how much we have not yet told
5473 			 * the peer our rwnd has opened up. Note we grab the
5474 			 * value from the tcb from last time. Note too that
5475 			 * sack sending clears this when a sack is sent,
5476 			 * which is fine. Once we hit the rwnd_req, we then
5477 			 * will go to the sctp_user_rcvd() that will not
5478 			 * lock until it KNOWs it MUST send a WUP-SACK.
5479 			 */
5480 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5481 			stcb->freed_by_sorcv_sincelast = 0;
5482 		}
5483 	}
5484 	if (stcb &&
5485 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5486 	    control->do_not_ref_stcb == 0) {
5487 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5488 	}
5489 	/* First lets get off the sinfo and sockaddr info */
5490 	if ((sinfo) && filling_sinfo) {
5491 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5492 		nxt = TAILQ_NEXT(control, next);
5493 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5494 			struct sctp_extrcvinfo *s_extra;
5495 
5496 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5497 			if ((nxt) &&
5498 			    (nxt->length)) {
5499 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5500 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5501 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5502 				}
5503 				if (nxt->spec_flags & M_NOTIFICATION) {
5504 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5505 				}
5506 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5507 				s_extra->sreinfo_next_length = nxt->length;
5508 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5509 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5510 				if (nxt->tail_mbuf != NULL) {
5511 					if (nxt->end_added) {
5512 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5513 					}
5514 				}
5515 			} else {
5516 				/*
5517 				 * we explicitly 0 this, since the memcpy
5518 				 * got some other things beyond the older
5519 				 * sinfo_ that is on the control's structure
5520 				 * :-D
5521 				 */
5522 				nxt = NULL;
5523 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5524 				s_extra->sreinfo_next_aid = 0;
5525 				s_extra->sreinfo_next_length = 0;
5526 				s_extra->sreinfo_next_ppid = 0;
5527 				s_extra->sreinfo_next_stream = 0;
5528 			}
5529 		}
5530 		/*
5531 		 * update off the real current cum-ack, if we have an stcb.
5532 		 */
5533 		if ((control->do_not_ref_stcb == 0) && stcb)
5534 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5535 		/*
5536 		 * mask off the high bits, we keep the actual chunk bits in
5537 		 * there.
5538 		 */
5539 		sinfo->sinfo_flags &= 0x00ff;
5540 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5541 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5542 		}
5543 	}
5544 #ifdef SCTP_ASOCLOG_OF_TSNS
5545 	{
5546 		int index, newindex;
5547 		struct sctp_pcbtsn_rlog *entry;
5548 
5549 		do {
5550 			index = inp->readlog_index;
5551 			newindex = index + 1;
5552 			if (newindex >= SCTP_READ_LOG_SIZE) {
5553 				newindex = 0;
5554 			}
5555 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5556 		entry = &inp->readlog[index];
5557 		entry->vtag = control->sinfo_assoc_id;
5558 		entry->strm = control->sinfo_stream;
5559 		entry->seq = control->sinfo_ssn;
5560 		entry->sz = control->length;
5561 		entry->flgs = control->sinfo_flags;
5562 	}
5563 #endif
5564 	if (fromlen && from) {
5565 		struct sockaddr *to;
5566 
5567 #ifdef INET
5568 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5569 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5570 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5571 #else
5572 		/* No AF_INET use AF_INET6 */
5573 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5574 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5575 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5576 #endif
5577 
5578 		to = from;
5579 #if defined(INET) && defined(INET6)
5580 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5581 		    (to->sa_family == AF_INET) &&
5582 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5583 			struct sockaddr_in *sin;
5584 			struct sockaddr_in6 sin6;
5585 
5586 			sin = (struct sockaddr_in *)to;
5587 			bzero(&sin6, sizeof(sin6));
5588 			sin6.sin6_family = AF_INET6;
5589 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5590 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5591 			bcopy(&sin->sin_addr,
5592 			    &sin6.sin6_addr.s6_addr32[3],
5593 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5594 			sin6.sin6_port = sin->sin_port;
5595 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5596 		}
5597 #endif
5598 #if defined(INET6)
5599 		{
5600 			struct sockaddr_in6 lsa6, *to6;
5601 
5602 			to6 = (struct sockaddr_in6 *)to;
5603 			sctp_recover_scope_mac(to6, (&lsa6));
5604 		}
5605 #endif
5606 	}
5607 	/* now copy out what data we can */
5608 	if (mp == NULL) {
5609 		/* copy out each mbuf in the chain up to length */
5610 get_more_data:
5611 		m = control->data;
5612 		while (m) {
5613 			/* Move out all we can */
5614 			cp_len = (int)uio->uio_resid;
5615 			my_len = (int)SCTP_BUF_LEN(m);
5616 			if (cp_len > my_len) {
5617 				/* not enough in this buf */
5618 				cp_len = my_len;
5619 			}
5620 			if (hold_rlock) {
5621 				SCTP_INP_READ_UNLOCK(inp);
5622 				hold_rlock = 0;
5623 			}
5624 			if (cp_len > 0)
5625 				error = uiomove(mtod(m, char *), cp_len, uio);
5626 			/* re-read */
5627 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5628 				goto release;
5629 			}
5630 			if ((control->do_not_ref_stcb == 0) && stcb &&
5631 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5632 				no_rcv_needed = 1;
5633 			}
5634 			if (error) {
5635 				/* error we are out of here */
5636 				goto release;
5637 			}
5638 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5639 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5640 			    ((control->end_added == 0) ||
5641 			    (control->end_added &&
5642 			    (TAILQ_NEXT(control, next) == NULL)))
5643 			    ) {
5644 				SCTP_INP_READ_LOCK(inp);
5645 				hold_rlock = 1;
5646 			}
5647 			if (cp_len == SCTP_BUF_LEN(m)) {
5648 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5649 				    (control->end_added)) {
5650 					out_flags |= MSG_EOR;
5651 					if ((control->do_not_ref_stcb == 0) &&
5652 					    (control->stcb != NULL) &&
5653 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5654 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5655 				}
5656 				if (control->spec_flags & M_NOTIFICATION) {
5657 					out_flags |= MSG_NOTIFICATION;
5658 				}
5659 				/* we ate up the mbuf */
5660 				if (in_flags & MSG_PEEK) {
5661 					/* just looking */
5662 					m = SCTP_BUF_NEXT(m);
5663 					copied_so_far += cp_len;
5664 				} else {
5665 					/* dispose of the mbuf */
5666 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5667 						sctp_sblog(&so->so_rcv,
5668 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5669 					}
5670 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5671 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5672 						sctp_sblog(&so->so_rcv,
5673 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5674 					}
5675 					copied_so_far += cp_len;
5676 					freed_so_far += cp_len;
5677 					freed_so_far += MSIZE;
5678 					atomic_subtract_int(&control->length, cp_len);
5679 					control->data = sctp_m_free(m);
5680 					m = control->data;
5681 					/*
5682 					 * been through it all, must hold sb
5683 					 * lock ok to null tail
5684 					 */
5685 					if (control->data == NULL) {
5686 #ifdef INVARIANTS
5687 						if ((control->end_added == 0) ||
5688 						    (TAILQ_NEXT(control, next) == NULL)) {
5689 							/*
5690 							 * If the end is not
5691 							 * added, OR the
5692 							 * next is NOT null
5693 							 * we MUST have the
5694 							 * lock.
5695 							 */
5696 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5697 								panic("Hmm we don't own the lock?");
5698 							}
5699 						}
5700 #endif
5701 						control->tail_mbuf = NULL;
5702 #ifdef INVARIANTS
5703 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5704 							panic("end_added, nothing left and no MSG_EOR");
5705 						}
5706 #endif
5707 					}
5708 				}
5709 			} else {
5710 				/* Do we need to trim the mbuf? */
5711 				if (control->spec_flags & M_NOTIFICATION) {
5712 					out_flags |= MSG_NOTIFICATION;
5713 				}
5714 				if ((in_flags & MSG_PEEK) == 0) {
5715 					SCTP_BUF_RESV_UF(m, cp_len);
5716 					SCTP_BUF_LEN(m) -= cp_len;
5717 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5718 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5719 					}
5720 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5721 					if ((control->do_not_ref_stcb == 0) &&
5722 					    stcb) {
5723 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5724 					}
5725 					copied_so_far += cp_len;
5726 					freed_so_far += cp_len;
5727 					freed_so_far += MSIZE;
5728 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5729 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5730 						    SCTP_LOG_SBRESULT, 0);
5731 					}
5732 					atomic_subtract_int(&control->length, cp_len);
5733 				} else {
5734 					copied_so_far += cp_len;
5735 				}
5736 			}
5737 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5738 				break;
5739 			}
5740 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5741 			    (control->do_not_ref_stcb == 0) &&
5742 			    (freed_so_far >= rwnd_req)) {
5743 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5744 			}
5745 		}		/* end while(m) */
5746 		/*
5747 		 * At this point we have looked at it all and we either have
5748 		 * a MSG_EOR/or read all the user wants... <OR>
5749 		 * control->length == 0.
5750 		 */
5751 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5752 			/* we are done with this control */
5753 			if (control->length == 0) {
5754 				if (control->data) {
5755 #ifdef INVARIANTS
5756 					panic("control->data not null at read eor?");
5757 #else
5758 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5759 					sctp_m_freem(control->data);
5760 					control->data = NULL;
5761 #endif
5762 				}
5763 		done_with_control:
5764 				if (TAILQ_NEXT(control, next) == NULL) {
5765 					/*
5766 					 * If we don't have a next we need a
5767 					 * lock, if there is a next
5768 					 * interrupt is filling ahead of us
5769 					 * and we don't need a lock to
5770 					 * remove this guy (which is the
5771 					 * head of the queue).
5772 					 */
5773 					if (hold_rlock == 0) {
5774 						SCTP_INP_READ_LOCK(inp);
5775 						hold_rlock = 1;
5776 					}
5777 				}
5778 				TAILQ_REMOVE(&inp->read_queue, control, next);
5779 				/* Add back any hiddend data */
5780 				if (control->held_length) {
5781 					held_length = 0;
5782 					control->held_length = 0;
5783 					wakeup_read_socket = 1;
5784 				}
5785 				if (control->aux_data) {
5786 					sctp_m_free(control->aux_data);
5787 					control->aux_data = NULL;
5788 				}
5789 				no_rcv_needed = control->do_not_ref_stcb;
5790 				sctp_free_remote_addr(control->whoFrom);
5791 				control->data = NULL;
5792 				sctp_free_a_readq(stcb, control);
5793 				control = NULL;
5794 				if ((freed_so_far >= rwnd_req) &&
5795 				    (no_rcv_needed == 0))
5796 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5797 
5798 			} else {
5799 				/*
5800 				 * The user did not read all of this
5801 				 * message, turn off the returned MSG_EOR
5802 				 * since we are leaving more behind on the
5803 				 * control to read.
5804 				 */
5805 #ifdef INVARIANTS
5806 				if (control->end_added &&
5807 				    (control->data == NULL) &&
5808 				    (control->tail_mbuf == NULL)) {
5809 					panic("Gak, control->length is corrupt?");
5810 				}
5811 #endif
5812 				no_rcv_needed = control->do_not_ref_stcb;
5813 				out_flags &= ~MSG_EOR;
5814 			}
5815 		}
5816 		if (out_flags & MSG_EOR) {
5817 			goto release;
5818 		}
5819 		if ((uio->uio_resid == 0) ||
5820 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5821 		    ) {
5822 			goto release;
5823 		}
5824 		/*
5825 		 * If I hit here the receiver wants more and this message is
5826 		 * NOT done (pd-api). So two questions. Can we block? if not
5827 		 * we are done. Did the user NOT set MSG_WAITALL?
5828 		 */
5829 		if (block_allowed == 0) {
5830 			goto release;
5831 		}
5832 		/*
5833 		 * We need to wait for more data a few things: - We don't
5834 		 * sbunlock() so we don't get someone else reading. - We
5835 		 * must be sure to account for the case where what is added
5836 		 * is NOT to our control when we wakeup.
5837 		 */
5838 
5839 		/*
5840 		 * Do we need to tell the transport a rwnd update might be
5841 		 * needed before we go to sleep?
5842 		 */
5843 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5844 		    ((freed_so_far >= rwnd_req) &&
5845 		    (control->do_not_ref_stcb == 0) &&
5846 		    (no_rcv_needed == 0))) {
5847 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5848 		}
5849 wait_some_more:
5850 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5851 			goto release;
5852 		}
5853 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5854 			goto release;
5855 
5856 		if (hold_rlock == 1) {
5857 			SCTP_INP_READ_UNLOCK(inp);
5858 			hold_rlock = 0;
5859 		}
5860 		if (hold_sblock == 0) {
5861 			SOCKBUF_LOCK(&so->so_rcv);
5862 			hold_sblock = 1;
5863 		}
5864 		if ((copied_so_far) && (control->length == 0) &&
5865 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5866 			goto release;
5867 		}
5868 		if (so->so_rcv.sb_cc <= control->held_length) {
5869 			error = sbwait(&so->so_rcv);
5870 			if (error) {
5871 				goto release;
5872 			}
5873 			control->held_length = 0;
5874 		}
5875 		if (hold_sblock) {
5876 			SOCKBUF_UNLOCK(&so->so_rcv);
5877 			hold_sblock = 0;
5878 		}
5879 		if (control->length == 0) {
5880 			/* still nothing here */
5881 			if (control->end_added == 1) {
5882 				/* he aborted, or is done i.e.did a shutdown */
5883 				out_flags |= MSG_EOR;
5884 				if (control->pdapi_aborted) {
5885 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5886 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5887 
5888 					out_flags |= MSG_TRUNC;
5889 				} else {
5890 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5891 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5892 				}
5893 				goto done_with_control;
5894 			}
5895 			if (so->so_rcv.sb_cc > held_length) {
5896 				control->held_length = so->so_rcv.sb_cc;
5897 				held_length = 0;
5898 			}
5899 			goto wait_some_more;
5900 		} else if (control->data == NULL) {
5901 			/*
5902 			 * we must re-sync since data is probably being
5903 			 * added
5904 			 */
5905 			SCTP_INP_READ_LOCK(inp);
5906 			if ((control->length > 0) && (control->data == NULL)) {
5907 				/*
5908 				 * big trouble.. we have the lock and its
5909 				 * corrupt?
5910 				 */
5911 #ifdef INVARIANTS
5912 				panic("Impossible data==NULL length !=0");
5913 #endif
5914 				out_flags |= MSG_EOR;
5915 				out_flags |= MSG_TRUNC;
5916 				control->length = 0;
5917 				SCTP_INP_READ_UNLOCK(inp);
5918 				goto done_with_control;
5919 			}
5920 			SCTP_INP_READ_UNLOCK(inp);
5921 			/* We will fall around to get more data */
5922 		}
5923 		goto get_more_data;
5924 	} else {
5925 		/*-
5926 		 * Give caller back the mbuf chain,
5927 		 * store in uio_resid the length
5928 		 */
5929 		wakeup_read_socket = 0;
5930 		if ((control->end_added == 0) ||
5931 		    (TAILQ_NEXT(control, next) == NULL)) {
5932 			/* Need to get rlock */
5933 			if (hold_rlock == 0) {
5934 				SCTP_INP_READ_LOCK(inp);
5935 				hold_rlock = 1;
5936 			}
5937 		}
5938 		if (control->end_added) {
5939 			out_flags |= MSG_EOR;
5940 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5941 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5942 		}
5943 		if (control->spec_flags & M_NOTIFICATION) {
5944 			out_flags |= MSG_NOTIFICATION;
5945 		}
5946 		uio->uio_resid = control->length;
5947 		*mp = control->data;
5948 		m = control->data;
5949 		while (m) {
5950 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5951 				sctp_sblog(&so->so_rcv,
5952 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5953 			}
5954 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5955 			freed_so_far += SCTP_BUF_LEN(m);
5956 			freed_so_far += MSIZE;
5957 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5958 				sctp_sblog(&so->so_rcv,
5959 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5960 			}
5961 			m = SCTP_BUF_NEXT(m);
5962 		}
5963 		control->data = control->tail_mbuf = NULL;
5964 		control->length = 0;
5965 		if (out_flags & MSG_EOR) {
5966 			/* Done with this control */
5967 			goto done_with_control;
5968 		}
5969 	}
5970 release:
5971 	if (hold_rlock == 1) {
5972 		SCTP_INP_READ_UNLOCK(inp);
5973 		hold_rlock = 0;
5974 	}
5975 	if (hold_sblock == 1) {
5976 		SOCKBUF_UNLOCK(&so->so_rcv);
5977 		hold_sblock = 0;
5978 	}
5979 	sbunlock(&so->so_rcv);
5980 	sockbuf_lock = 0;
5981 
5982 release_unlocked:
5983 	if (hold_sblock) {
5984 		SOCKBUF_UNLOCK(&so->so_rcv);
5985 		hold_sblock = 0;
5986 	}
5987 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5988 		if ((freed_so_far >= rwnd_req) &&
5989 		    (control && (control->do_not_ref_stcb == 0)) &&
5990 		    (no_rcv_needed == 0))
5991 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5992 	}
5993 out:
5994 	if (msg_flags) {
5995 		*msg_flags = out_flags;
5996 	}
5997 	if (((out_flags & MSG_EOR) == 0) &&
5998 	    ((in_flags & MSG_PEEK) == 0) &&
5999 	    (sinfo) &&
6000 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
6001 		struct sctp_extrcvinfo *s_extra;
6002 
6003 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6004 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6005 	}
6006 	if (hold_rlock == 1) {
6007 		SCTP_INP_READ_UNLOCK(inp);
6008 		hold_rlock = 0;
6009 	}
6010 	if (hold_sblock) {
6011 		SOCKBUF_UNLOCK(&so->so_rcv);
6012 		hold_sblock = 0;
6013 	}
6014 	if (sockbuf_lock) {
6015 		sbunlock(&so->so_rcv);
6016 	}
6017 	if (freecnt_applied) {
6018 		/*
6019 		 * The lock on the socket buffer protects us so the free
6020 		 * code will stop. But since we used the socketbuf lock and
6021 		 * the sender uses the tcb_lock to increment, we need to use
6022 		 * the atomic add to the refcnt.
6023 		 */
6024 		if (stcb == NULL) {
6025 #ifdef INVARIANTS
6026 			panic("stcb for refcnt has gone NULL?");
6027 			goto stage_left;
6028 #else
6029 			goto stage_left;
6030 #endif
6031 		}
6032 		atomic_add_int(&stcb->asoc.refcnt, -1);
6033 		freecnt_applied = 0;
6034 		/* Save the value back for next time */
6035 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6036 	}
6037 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6038 		if (stcb) {
6039 			sctp_misc_ints(SCTP_SORECV_DONE,
6040 			    freed_so_far,
6041 			    ((uio) ? (slen - uio->uio_resid) : slen),
6042 			    stcb->asoc.my_rwnd,
6043 			    so->so_rcv.sb_cc);
6044 		} else {
6045 			sctp_misc_ints(SCTP_SORECV_DONE,
6046 			    freed_so_far,
6047 			    ((uio) ? (slen - uio->uio_resid) : slen),
6048 			    0,
6049 			    so->so_rcv.sb_cc);
6050 		}
6051 	}
6052 stage_left:
6053 	if (wakeup_read_socket) {
6054 		sctp_sorwakeup(inp, so);
6055 	}
6056 	return (error);
6057 }
6058 
6059 
6060 #ifdef SCTP_MBUF_LOGGING
6061 struct mbuf *
6062 sctp_m_free(struct mbuf *m)
6063 {
6064 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6065 		if (SCTP_BUF_IS_EXTENDED(m)) {
6066 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6067 		}
6068 	}
6069 	return (m_free(m));
6070 }
6071 
6072 void
6073 sctp_m_freem(struct mbuf *mb)
6074 {
6075 	while (mb != NULL)
6076 		mb = sctp_m_free(mb);
6077 }
6078 
6079 #endif
6080 
6081 int
6082 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6083 {
6084 	/*
6085 	 * Given a local address. For all associations that holds the
6086 	 * address, request a peer-set-primary.
6087 	 */
6088 	struct sctp_ifa *ifa;
6089 	struct sctp_laddr *wi;
6090 
6091 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6092 	if (ifa == NULL) {
6093 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6094 		return (EADDRNOTAVAIL);
6095 	}
6096 	/*
6097 	 * Now that we have the ifa we must awaken the iterator with this
6098 	 * message.
6099 	 */
6100 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6101 	if (wi == NULL) {
6102 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6103 		return (ENOMEM);
6104 	}
6105 	/* Now incr the count and int wi structure */
6106 	SCTP_INCR_LADDR_COUNT();
6107 	bzero(wi, sizeof(*wi));
6108 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6109 	wi->ifa = ifa;
6110 	wi->action = SCTP_SET_PRIM_ADDR;
6111 	atomic_add_int(&ifa->refcount, 1);
6112 
6113 	/* Now add it to the work queue */
6114 	SCTP_WQ_ADDR_LOCK();
6115 	/*
6116 	 * Should this really be a tailq? As it is we will process the
6117 	 * newest first :-0
6118 	 */
6119 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6120 	SCTP_WQ_ADDR_UNLOCK();
6121 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6122 	    (struct sctp_inpcb *)NULL,
6123 	    (struct sctp_tcb *)NULL,
6124 	    (struct sctp_nets *)NULL);
6125 	return (0);
6126 }
6127 
6128 
6129 int
6130 sctp_soreceive(struct socket *so,
6131     struct sockaddr **psa,
6132     struct uio *uio,
6133     struct mbuf **mp0,
6134     struct mbuf **controlp,
6135     int *flagsp)
6136 {
6137 	int error, fromlen;
6138 	uint8_t sockbuf[256];
6139 	struct sockaddr *from;
6140 	struct sctp_extrcvinfo sinfo;
6141 	int filling_sinfo = 1;
6142 	struct sctp_inpcb *inp;
6143 
6144 	inp = (struct sctp_inpcb *)so->so_pcb;
6145 	/* pickup the assoc we are reading from */
6146 	if (inp == NULL) {
6147 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6148 		return (EINVAL);
6149 	}
6150 	if ((sctp_is_feature_off(inp,
6151 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6152 	    (controlp == NULL)) {
6153 		/* user does not want the sndrcv ctl */
6154 		filling_sinfo = 0;
6155 	}
6156 	if (psa) {
6157 		from = (struct sockaddr *)sockbuf;
6158 		fromlen = sizeof(sockbuf);
6159 		from->sa_len = 0;
6160 	} else {
6161 		from = NULL;
6162 		fromlen = 0;
6163 	}
6164 
6165 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6166 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6167 	if ((controlp) && (filling_sinfo)) {
6168 		/* copy back the sinfo in a CMSG format */
6169 		if (filling_sinfo)
6170 			*controlp = sctp_build_ctl_nchunk(inp,
6171 			    (struct sctp_sndrcvinfo *)&sinfo);
6172 		else
6173 			*controlp = NULL;
6174 	}
6175 	if (psa) {
6176 		/* copy back the address info */
6177 		if (from && from->sa_len) {
6178 			*psa = sodupsockaddr(from, M_NOWAIT);
6179 		} else {
6180 			*psa = NULL;
6181 		}
6182 	}
6183 	return (error);
6184 }
6185 
6186 
6187 
6188 
6189 
6190 int
6191 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6192     int totaddr, int *error)
6193 {
6194 	int added = 0;
6195 	int i;
6196 	struct sctp_inpcb *inp;
6197 	struct sockaddr *sa;
6198 	size_t incr = 0;
6199 
6200 	sa = addr;
6201 	inp = stcb->sctp_ep;
6202 	*error = 0;
6203 	for (i = 0; i < totaddr; i++) {
6204 		switch (sa->sa_family) {
6205 #ifdef INET
6206 		case AF_INET:
6207 			incr = sizeof(struct sockaddr_in);
6208 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6209 				/* assoc gone no un-lock */
6210 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6211 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6212 				*error = ENOBUFS;
6213 				goto out_now;
6214 			}
6215 			added++;
6216 			break;
6217 #endif
6218 #ifdef INET6
6219 		case AF_INET6:
6220 			incr = sizeof(struct sockaddr_in6);
6221 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6222 				/* assoc gone no un-lock */
6223 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6224 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6225 				*error = ENOBUFS;
6226 				goto out_now;
6227 			}
6228 			added++;
6229 			break;
6230 #endif
6231 		default:
6232 			break;
6233 		}
6234 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6235 	}
6236 out_now:
6237 	return (added);
6238 }
6239 
6240 struct sctp_tcb *
6241 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6242     int *totaddr, int *num_v4, int *num_v6, int *error,
6243     int limit, int *bad_addr)
6244 {
6245 	struct sockaddr *sa;
6246 	struct sctp_tcb *stcb = NULL;
6247 	size_t incr, at, i;
6248 
6249 	at = incr = 0;
6250 	sa = addr;
6251 
6252 	*error = *num_v6 = *num_v4 = 0;
6253 	/* account and validate addresses */
6254 	for (i = 0; i < (size_t)*totaddr; i++) {
6255 		switch (sa->sa_family) {
6256 #ifdef INET
6257 		case AF_INET:
6258 			(*num_v4) += 1;
6259 			incr = sizeof(struct sockaddr_in);
6260 			if (sa->sa_len != incr) {
6261 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6262 				*error = EINVAL;
6263 				*bad_addr = 1;
6264 				return (NULL);
6265 			}
6266 			break;
6267 #endif
6268 #ifdef INET6
6269 		case AF_INET6:
6270 			{
6271 				struct sockaddr_in6 *sin6;
6272 
6273 				sin6 = (struct sockaddr_in6 *)sa;
6274 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6275 					/* Must be non-mapped for connectx */
6276 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6277 					*error = EINVAL;
6278 					*bad_addr = 1;
6279 					return (NULL);
6280 				}
6281 				(*num_v6) += 1;
6282 				incr = sizeof(struct sockaddr_in6);
6283 				if (sa->sa_len != incr) {
6284 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6285 					*error = EINVAL;
6286 					*bad_addr = 1;
6287 					return (NULL);
6288 				}
6289 				break;
6290 			}
6291 #endif
6292 		default:
6293 			*totaddr = i;
6294 			/* we are done */
6295 			break;
6296 		}
6297 		if (i == (size_t)*totaddr) {
6298 			break;
6299 		}
6300 		SCTP_INP_INCR_REF(inp);
6301 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6302 		if (stcb != NULL) {
6303 			/* Already have or am bring up an association */
6304 			return (stcb);
6305 		} else {
6306 			SCTP_INP_DECR_REF(inp);
6307 		}
6308 		if ((at + incr) > (size_t)limit) {
6309 			*totaddr = i;
6310 			break;
6311 		}
6312 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6313 	}
6314 	return ((struct sctp_tcb *)NULL);
6315 }
6316 
6317 /*
6318  * sctp_bindx(ADD) for one address.
6319  * assumes all arguments are valid/checked by caller.
6320  */
6321 void
6322 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6323     struct sockaddr *sa, sctp_assoc_t assoc_id,
6324     uint32_t vrf_id, int *error, void *p)
6325 {
6326 	struct sockaddr *addr_touse;
6327 
6328 #ifdef INET6
6329 	struct sockaddr_in sin;
6330 
6331 #endif
6332 
6333 	/* see if we're bound all already! */
6334 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6335 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6336 		*error = EINVAL;
6337 		return;
6338 	}
6339 	addr_touse = sa;
6340 #ifdef INET6
6341 	if (sa->sa_family == AF_INET6) {
6342 		struct sockaddr_in6 *sin6;
6343 
6344 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6345 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6346 			*error = EINVAL;
6347 			return;
6348 		}
6349 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6350 			/* can only bind v6 on PF_INET6 sockets */
6351 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6352 			*error = EINVAL;
6353 			return;
6354 		}
6355 		sin6 = (struct sockaddr_in6 *)addr_touse;
6356 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6357 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6358 			    SCTP_IPV6_V6ONLY(inp)) {
6359 				/* can't bind v4-mapped on PF_INET sockets */
6360 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6361 				*error = EINVAL;
6362 				return;
6363 			}
6364 			in6_sin6_2_sin(&sin, sin6);
6365 			addr_touse = (struct sockaddr *)&sin;
6366 		}
6367 	}
6368 #endif
6369 #ifdef INET
6370 	if (sa->sa_family == AF_INET) {
6371 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6372 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6373 			*error = EINVAL;
6374 			return;
6375 		}
6376 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6377 		    SCTP_IPV6_V6ONLY(inp)) {
6378 			/* can't bind v4 on PF_INET sockets */
6379 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6380 			*error = EINVAL;
6381 			return;
6382 		}
6383 	}
6384 #endif
6385 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6386 		if (p == NULL) {
6387 			/* Can't get proc for Net/Open BSD */
6388 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6389 			*error = EINVAL;
6390 			return;
6391 		}
6392 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6393 		return;
6394 	}
6395 	/*
6396 	 * No locks required here since bind and mgmt_ep_sa all do their own
6397 	 * locking. If we do something for the FIX: below we may need to
6398 	 * lock in that case.
6399 	 */
6400 	if (assoc_id == 0) {
6401 		/* add the address */
6402 		struct sctp_inpcb *lep;
6403 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6404 
6405 		/* validate the incoming port */
6406 		if ((lsin->sin_port != 0) &&
6407 		    (lsin->sin_port != inp->sctp_lport)) {
6408 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6409 			*error = EINVAL;
6410 			return;
6411 		} else {
6412 			/* user specified 0 port, set it to existing port */
6413 			lsin->sin_port = inp->sctp_lport;
6414 		}
6415 
6416 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6417 		if (lep != NULL) {
6418 			/*
6419 			 * We must decrement the refcount since we have the
6420 			 * ep already and are binding. No remove going on
6421 			 * here.
6422 			 */
6423 			SCTP_INP_DECR_REF(lep);
6424 		}
6425 		if (lep == inp) {
6426 			/* already bound to it.. ok */
6427 			return;
6428 		} else if (lep == NULL) {
6429 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6430 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6431 			    SCTP_ADD_IP_ADDRESS,
6432 			    vrf_id, NULL);
6433 		} else {
6434 			*error = EADDRINUSE;
6435 		}
6436 		if (*error)
6437 			return;
6438 	} else {
6439 		/*
6440 		 * FIX: decide whether we allow assoc based bindx
6441 		 */
6442 	}
6443 }
6444 
6445 /*
6446  * sctp_bindx(DELETE) for one address.
6447  * assumes all arguments are valid/checked by caller.
6448  */
6449 void
6450 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6451     struct sockaddr *sa, sctp_assoc_t assoc_id,
6452     uint32_t vrf_id, int *error)
6453 {
6454 	struct sockaddr *addr_touse;
6455 
6456 #ifdef INET6
6457 	struct sockaddr_in sin;
6458 
6459 #endif
6460 
6461 	/* see if we're bound all already! */
6462 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6463 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6464 		*error = EINVAL;
6465 		return;
6466 	}
6467 	addr_touse = sa;
6468 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6469 	if (sa->sa_family == AF_INET6) {
6470 		struct sockaddr_in6 *sin6;
6471 
6472 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6473 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6474 			*error = EINVAL;
6475 			return;
6476 		}
6477 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6478 			/* can only bind v6 on PF_INET6 sockets */
6479 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6480 			*error = EINVAL;
6481 			return;
6482 		}
6483 		sin6 = (struct sockaddr_in6 *)addr_touse;
6484 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6485 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6486 			    SCTP_IPV6_V6ONLY(inp)) {
6487 				/* can't bind mapped-v4 on PF_INET sockets */
6488 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6489 				*error = EINVAL;
6490 				return;
6491 			}
6492 			in6_sin6_2_sin(&sin, sin6);
6493 			addr_touse = (struct sockaddr *)&sin;
6494 		}
6495 	}
6496 #endif
6497 #ifdef INET
6498 	if (sa->sa_family == AF_INET) {
6499 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6500 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6501 			*error = EINVAL;
6502 			return;
6503 		}
6504 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6505 		    SCTP_IPV6_V6ONLY(inp)) {
6506 			/* can't bind v4 on PF_INET sockets */
6507 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6508 			*error = EINVAL;
6509 			return;
6510 		}
6511 	}
6512 #endif
6513 	/*
6514 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6515 	 * below is ever changed we may need to lock before calling
6516 	 * association level binding.
6517 	 */
6518 	if (assoc_id == 0) {
6519 		/* delete the address */
6520 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6521 		    SCTP_DEL_IP_ADDRESS,
6522 		    vrf_id, NULL);
6523 	} else {
6524 		/*
6525 		 * FIX: decide whether we allow assoc based bindx
6526 		 */
6527 	}
6528 }
6529 
6530 /*
6531  * returns the valid local address count for an assoc, taking into account
6532  * all scoping rules
6533  */
6534 int
6535 sctp_local_addr_count(struct sctp_tcb *stcb)
6536 {
6537 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6538 	int ipv4_addr_legal, ipv6_addr_legal;
6539 	struct sctp_vrf *vrf;
6540 	struct sctp_ifn *sctp_ifn;
6541 	struct sctp_ifa *sctp_ifa;
6542 	int count = 0;
6543 
6544 	/* Turn on all the appropriate scopes */
6545 	loopback_scope = stcb->asoc.loopback_scope;
6546 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6547 	local_scope = stcb->asoc.local_scope;
6548 	site_scope = stcb->asoc.site_scope;
6549 	ipv4_addr_legal = ipv6_addr_legal = 0;
6550 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6551 		ipv6_addr_legal = 1;
6552 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6553 			ipv4_addr_legal = 1;
6554 		}
6555 	} else {
6556 		ipv4_addr_legal = 1;
6557 	}
6558 
6559 	SCTP_IPI_ADDR_RLOCK();
6560 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6561 	if (vrf == NULL) {
6562 		/* no vrf, no addresses */
6563 		SCTP_IPI_ADDR_RUNLOCK();
6564 		return (0);
6565 	}
6566 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6567 		/*
6568 		 * bound all case: go through all ifns on the vrf
6569 		 */
6570 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6571 			if ((loopback_scope == 0) &&
6572 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6573 				continue;
6574 			}
6575 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6576 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6577 					continue;
6578 				switch (sctp_ifa->address.sa.sa_family) {
6579 #ifdef INET
6580 				case AF_INET:
6581 					if (ipv4_addr_legal) {
6582 						struct sockaddr_in *sin;
6583 
6584 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6585 						if (sin->sin_addr.s_addr == 0) {
6586 							/*
6587 							 * skip unspecified
6588 							 * addrs
6589 							 */
6590 							continue;
6591 						}
6592 						if ((ipv4_local_scope == 0) &&
6593 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6594 							continue;
6595 						}
6596 						/* count this one */
6597 						count++;
6598 					} else {
6599 						continue;
6600 					}
6601 					break;
6602 #endif
6603 #ifdef INET6
6604 				case AF_INET6:
6605 					if (ipv6_addr_legal) {
6606 						struct sockaddr_in6 *sin6;
6607 
6608 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6609 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6610 							continue;
6611 						}
6612 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6613 							if (local_scope == 0)
6614 								continue;
6615 							if (sin6->sin6_scope_id == 0) {
6616 								if (sa6_recoverscope(sin6) != 0)
6617 									/*
6618 									 *
6619 									 * bad
6620 									 *
6621 									 * li
6622 									 * nk
6623 									 *
6624 									 * loc
6625 									 * al
6626 									 *
6627 									 * add
6628 									 * re
6629 									 * ss
6630 									 * */
6631 									continue;
6632 							}
6633 						}
6634 						if ((site_scope == 0) &&
6635 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6636 							continue;
6637 						}
6638 						/* count this one */
6639 						count++;
6640 					}
6641 					break;
6642 #endif
6643 				default:
6644 					/* TSNH */
6645 					break;
6646 				}
6647 			}
6648 		}
6649 	} else {
6650 		/*
6651 		 * subset bound case
6652 		 */
6653 		struct sctp_laddr *laddr;
6654 
6655 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6656 		    sctp_nxt_addr) {
6657 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6658 				continue;
6659 			}
6660 			/* count this one */
6661 			count++;
6662 		}
6663 	}
6664 	SCTP_IPI_ADDR_RUNLOCK();
6665 	return (count);
6666 }
6667 
6668 #if defined(SCTP_LOCAL_TRACE_BUF)
6669 
6670 void
6671 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6672 {
6673 	uint32_t saveindex, newindex;
6674 
6675 	do {
6676 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6677 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6678 			newindex = 1;
6679 		} else {
6680 			newindex = saveindex + 1;
6681 		}
6682 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6683 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6684 		saveindex = 0;
6685 	}
6686 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6687 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6688 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6689 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6690 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6691 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6692 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6693 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6694 }
6695 
6696 #endif
6697 /* XXX: Remove the #ifdef after tunneling over IPv6 works also on FreeBSD. */
6698 #ifdef INET
6699 /* We will need to add support
6700  * to bind the ports and such here
6701  * so we can do UDP tunneling. In
6702  * the mean-time, we return error
6703  */
6704 #include <netinet/udp.h>
6705 #include <netinet/udp_var.h>
6706 #include <sys/proc.h>
6707 #ifdef INET6
6708 #include <netinet6/sctp6_var.h>
6709 #endif
6710 
6711 static void
6712 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6713 {
6714 	struct ip *iph;
6715 	struct mbuf *sp, *last;
6716 	struct udphdr *uhdr;
6717 	uint16_t port = 0;
6718 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6719 
6720 	/*
6721 	 * Split out the mbuf chain. Leave the IP header in m, place the
6722 	 * rest in the sp.
6723 	 */
6724 	if ((m->m_flags & M_PKTHDR) == 0) {
6725 		/* Can't handle one that is not a pkt hdr */
6726 		goto out;
6727 	}
6728 	/* pull the src port */
6729 	iph = mtod(m, struct ip *);
6730 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6731 
6732 	port = uhdr->uh_sport;
6733 	sp = m_split(m, off, M_DONTWAIT);
6734 	if (sp == NULL) {
6735 		/* Gak, drop packet, we can't do a split */
6736 		goto out;
6737 	}
6738 	if (sp->m_pkthdr.len < header_size) {
6739 		/* Gak, packet can't have an SCTP header in it - to small */
6740 		m_freem(sp);
6741 		goto out;
6742 	}
6743 	/* ok now pull up the UDP header and SCTP header together */
6744 	sp = m_pullup(sp, header_size);
6745 	if (sp == NULL) {
6746 		/* Gak pullup failed */
6747 		goto out;
6748 	}
6749 	/* trim out the UDP header */
6750 	m_adj(sp, sizeof(struct udphdr));
6751 
6752 	/* Now reconstruct the mbuf chain */
6753 	/* 1) find last one */
6754 	last = m;
6755 	while (last->m_next != NULL) {
6756 		last = last->m_next;
6757 	}
6758 	last->m_next = sp;
6759 	m->m_pkthdr.len += sp->m_pkthdr.len;
6760 	last = m;
6761 	while (last != NULL) {
6762 		last = last->m_next;
6763 	}
6764 	/* Now its ready for sctp_input or sctp6_input */
6765 	iph = mtod(m, struct ip *);
6766 	switch (iph->ip_v) {
6767 #ifdef INET
6768 	case IPVERSION:
6769 		{
6770 			uint16_t len;
6771 
6772 			/* its IPv4 */
6773 			len = SCTP_GET_IPV4_LENGTH(iph);
6774 			len -= sizeof(struct udphdr);
6775 			SCTP_GET_IPV4_LENGTH(iph) = len;
6776 			sctp_input_with_port(m, off, port);
6777 			break;
6778 		}
6779 #endif
6780 #ifdef INET6
6781 	case IPV6_VERSION >> 4:
6782 		{
6783 			/* its IPv6 - NOT supported */
6784 			goto out;
6785 			break;
6786 
6787 		}
6788 #endif
6789 	default:
6790 		{
6791 			m_freem(m);
6792 			break;
6793 		}
6794 	}
6795 	return;
6796 out:
6797 	m_freem(m);
6798 }
6799 
6800 void
6801 sctp_over_udp_stop(void)
6802 {
6803 	struct socket *sop;
6804 
6805 	/*
6806 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6807 	 * for writting!
6808 	 */
6809 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6810 		/* Nothing to do */
6811 		return;
6812 	}
6813 	sop = SCTP_BASE_INFO(udp_tun_socket);
6814 	soclose(sop);
6815 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6816 }
6817 
6818 int
6819 sctp_over_udp_start(void)
6820 {
6821 	uint16_t port;
6822 	int ret;
6823 	struct sockaddr_in sin;
6824 	struct socket *sop = NULL;
6825 	struct thread *th;
6826 	struct ucred *cred;
6827 
6828 	/*
6829 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6830 	 * for writting!
6831 	 */
6832 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6833 	if (port == 0) {
6834 		/* Must have a port set */
6835 		return (EINVAL);
6836 	}
6837 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6838 		/* Already running -- must stop first */
6839 		return (EALREADY);
6840 	}
6841 	th = curthread;
6842 	cred = th->td_ucred;
6843 	if ((ret = socreate(PF_INET, &sop,
6844 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6845 		return (ret);
6846 	}
6847 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6848 	/* call the special UDP hook */
6849 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6850 	if (ret) {
6851 		goto exit_stage_left;
6852 	}
6853 	/* Ok we have a socket, bind it to the port */
6854 	memset(&sin, 0, sizeof(sin));
6855 	sin.sin_len = sizeof(sin);
6856 	sin.sin_family = AF_INET;
6857 	sin.sin_port = htons(port);
6858 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6859 	if (ret) {
6860 		/* Close up we cant get the port */
6861 exit_stage_left:
6862 		sctp_over_udp_stop();
6863 		return (ret);
6864 	}
6865 	/*
6866 	 * Ok we should now get UDP packets directly to our input routine
6867 	 * sctp_recv_upd_tunneled_packet().
6868 	 */
6869 	return (0);
6870 }
6871 
6872 #endif
6873