xref: /freebsd/sys/netinet/sctputil.c (revision aa0a1e58f0189b0fde359a8bda032887e72057fa)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *   this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *   the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #endif
45 #include <netinet/sctp_header.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_bsd_addr.h>
53 
54 
55 #ifndef KTR_SCTP
56 #define KTR_SCTP KTR_SUBSYS
57 #endif
58 
59 extern struct sctp_cc_functions sctp_cc_functions[];
60 extern struct sctp_ss_functions sctp_ss_functions[];
61 
62 void
63 sctp_sblog(struct sockbuf *sb,
64     struct sctp_tcb *stcb, int from, int incr)
65 {
66 	struct sctp_cwnd_log sctp_clog;
67 
68 	sctp_clog.x.sb.stcb = stcb;
69 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
70 	if (stcb)
71 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
72 	else
73 		sctp_clog.x.sb.stcb_sbcc = 0;
74 	sctp_clog.x.sb.incr = incr;
75 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
76 	    SCTP_LOG_EVENT_SB,
77 	    from,
78 	    sctp_clog.x.misc.log1,
79 	    sctp_clog.x.misc.log2,
80 	    sctp_clog.x.misc.log3,
81 	    sctp_clog.x.misc.log4);
82 }
83 
84 void
85 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
86 {
87 	struct sctp_cwnd_log sctp_clog;
88 
89 	sctp_clog.x.close.inp = (void *)inp;
90 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
91 	if (stcb) {
92 		sctp_clog.x.close.stcb = (void *)stcb;
93 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
94 	} else {
95 		sctp_clog.x.close.stcb = 0;
96 		sctp_clog.x.close.state = 0;
97 	}
98 	sctp_clog.x.close.loc = loc;
99 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
100 	    SCTP_LOG_EVENT_CLOSE,
101 	    0,
102 	    sctp_clog.x.misc.log1,
103 	    sctp_clog.x.misc.log2,
104 	    sctp_clog.x.misc.log3,
105 	    sctp_clog.x.misc.log4);
106 }
107 
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 
125 }
126 
127 void
128 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
129 {
130 	struct sctp_cwnd_log sctp_clog;
131 
132 	sctp_clog.x.strlog.stcb = stcb;
133 	sctp_clog.x.strlog.n_tsn = tsn;
134 	sctp_clog.x.strlog.n_sseq = sseq;
135 	sctp_clog.x.strlog.e_tsn = 0;
136 	sctp_clog.x.strlog.e_sseq = 0;
137 	sctp_clog.x.strlog.strm = stream;
138 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
139 	    SCTP_LOG_EVENT_STRM,
140 	    from,
141 	    sctp_clog.x.misc.log1,
142 	    sctp_clog.x.misc.log2,
143 	    sctp_clog.x.misc.log3,
144 	    sctp_clog.x.misc.log4);
145 
146 }
147 
148 void
149 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
150 {
151 	struct sctp_cwnd_log sctp_clog;
152 
153 	sctp_clog.x.nagle.stcb = (void *)stcb;
154 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
155 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
156 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
157 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
158 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
159 	    SCTP_LOG_EVENT_NAGLE,
160 	    action,
161 	    sctp_clog.x.misc.log1,
162 	    sctp_clog.x.misc.log2,
163 	    sctp_clog.x.misc.log3,
164 	    sctp_clog.x.misc.log4);
165 }
166 
167 
168 void
169 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
170 {
171 	struct sctp_cwnd_log sctp_clog;
172 
173 	sctp_clog.x.sack.cumack = cumack;
174 	sctp_clog.x.sack.oldcumack = old_cumack;
175 	sctp_clog.x.sack.tsn = tsn;
176 	sctp_clog.x.sack.numGaps = gaps;
177 	sctp_clog.x.sack.numDups = dups;
178 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
179 	    SCTP_LOG_EVENT_SACK,
180 	    from,
181 	    sctp_clog.x.misc.log1,
182 	    sctp_clog.x.misc.log2,
183 	    sctp_clog.x.misc.log3,
184 	    sctp_clog.x.misc.log4);
185 }
186 
187 void
188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
189 {
190 	struct sctp_cwnd_log sctp_clog;
191 
192 	memset(&sctp_clog, 0, sizeof(sctp_clog));
193 	sctp_clog.x.map.base = map;
194 	sctp_clog.x.map.cum = cum;
195 	sctp_clog.x.map.high = high;
196 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
197 	    SCTP_LOG_EVENT_MAP,
198 	    from,
199 	    sctp_clog.x.misc.log1,
200 	    sctp_clog.x.misc.log2,
201 	    sctp_clog.x.misc.log3,
202 	    sctp_clog.x.misc.log4);
203 }
204 
205 void
206 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
207     int from)
208 {
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
213 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
214 	sctp_clog.x.fr.tsn = tsn;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_FR,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 
223 }
224 
225 
226 void
227 sctp_log_mb(struct mbuf *m, int from)
228 {
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	sctp_clog.x.mb.mp = m;
232 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
233 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
234 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
235 	if (SCTP_BUF_IS_EXTENDED(m)) {
236 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
237 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
238 	} else {
239 		sctp_clog.x.mb.ext = 0;
240 		sctp_clog.x.mb.refcnt = 0;
241 	}
242 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
243 	    SCTP_LOG_EVENT_MBUF,
244 	    from,
245 	    sctp_clog.x.misc.log1,
246 	    sctp_clog.x.misc.log2,
247 	    sctp_clog.x.misc.log3,
248 	    sctp_clog.x.misc.log4);
249 }
250 
251 
252 void
253 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
254     int from)
255 {
256 	struct sctp_cwnd_log sctp_clog;
257 
258 	if (control == NULL) {
259 		SCTP_PRINTF("Gak log of NULL?\n");
260 		return;
261 	}
262 	sctp_clog.x.strlog.stcb = control->stcb;
263 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
264 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
265 	sctp_clog.x.strlog.strm = control->sinfo_stream;
266 	if (poschk != NULL) {
267 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
268 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
269 	} else {
270 		sctp_clog.x.strlog.e_tsn = 0;
271 		sctp_clog.x.strlog.e_sseq = 0;
272 	}
273 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
274 	    SCTP_LOG_EVENT_STRM,
275 	    from,
276 	    sctp_clog.x.misc.log1,
277 	    sctp_clog.x.misc.log2,
278 	    sctp_clog.x.misc.log3,
279 	    sctp_clog.x.misc.log4);
280 
281 }
282 
283 void
284 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
285 {
286 	struct sctp_cwnd_log sctp_clog;
287 
288 	sctp_clog.x.cwnd.net = net;
289 	if (stcb->asoc.send_queue_cnt > 255)
290 		sctp_clog.x.cwnd.cnt_in_send = 255;
291 	else
292 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
293 	if (stcb->asoc.stream_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_str = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
297 
298 	if (net) {
299 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
300 		sctp_clog.x.cwnd.inflight = net->flight_size;
301 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
302 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
303 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
304 	}
305 	if (SCTP_CWNDLOG_PRESEND == from) {
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
307 	}
308 	sctp_clog.x.cwnd.cwnd_augment = augment;
309 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
310 	    SCTP_LOG_EVENT_CWND,
311 	    from,
312 	    sctp_clog.x.misc.log1,
313 	    sctp_clog.x.misc.log2,
314 	    sctp_clog.x.misc.log3,
315 	    sctp_clog.x.misc.log4);
316 
317 }
318 
319 void
320 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
321 {
322 	struct sctp_cwnd_log sctp_clog;
323 
324 	memset(&sctp_clog, 0, sizeof(sctp_clog));
325 	if (inp) {
326 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
327 
328 	} else {
329 		sctp_clog.x.lock.sock = (void *)NULL;
330 	}
331 	sctp_clog.x.lock.inp = (void *)inp;
332 	if (stcb) {
333 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
334 	} else {
335 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
336 	}
337 	if (inp) {
338 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
339 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
340 	} else {
341 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
342 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
343 	}
344 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
345 	if (inp && (inp->sctp_socket)) {
346 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
347 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
348 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
349 	} else {
350 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
351 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
352 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
353 	}
354 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
355 	    SCTP_LOG_LOCK_EVENT,
356 	    from,
357 	    sctp_clog.x.misc.log1,
358 	    sctp_clog.x.misc.log2,
359 	    sctp_clog.x.misc.log3,
360 	    sctp_clog.x.misc.log4);
361 
362 }
363 
364 void
365 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
366 {
367 	struct sctp_cwnd_log sctp_clog;
368 
369 	memset(&sctp_clog, 0, sizeof(sctp_clog));
370 	sctp_clog.x.cwnd.net = net;
371 	sctp_clog.x.cwnd.cwnd_new_value = error;
372 	sctp_clog.x.cwnd.inflight = net->flight_size;
373 	sctp_clog.x.cwnd.cwnd_augment = burst;
374 	if (stcb->asoc.send_queue_cnt > 255)
375 		sctp_clog.x.cwnd.cnt_in_send = 255;
376 	else
377 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
378 	if (stcb->asoc.stream_queue_cnt > 255)
379 		sctp_clog.x.cwnd.cnt_in_str = 255;
380 	else
381 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
382 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
383 	    SCTP_LOG_EVENT_MAXBURST,
384 	    from,
385 	    sctp_clog.x.misc.log1,
386 	    sctp_clog.x.misc.log2,
387 	    sctp_clog.x.misc.log3,
388 	    sctp_clog.x.misc.log4);
389 
390 }
391 
392 void
393 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
394 {
395 	struct sctp_cwnd_log sctp_clog;
396 
397 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
398 	sctp_clog.x.rwnd.send_size = snd_size;
399 	sctp_clog.x.rwnd.overhead = overhead;
400 	sctp_clog.x.rwnd.new_rwnd = 0;
401 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
402 	    SCTP_LOG_EVENT_RWND,
403 	    from,
404 	    sctp_clog.x.misc.log1,
405 	    sctp_clog.x.misc.log2,
406 	    sctp_clog.x.misc.log3,
407 	    sctp_clog.x.misc.log4);
408 }
409 
410 void
411 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
412 {
413 	struct sctp_cwnd_log sctp_clog;
414 
415 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
416 	sctp_clog.x.rwnd.send_size = flight_size;
417 	sctp_clog.x.rwnd.overhead = overhead;
418 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
419 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
420 	    SCTP_LOG_EVENT_RWND,
421 	    from,
422 	    sctp_clog.x.misc.log1,
423 	    sctp_clog.x.misc.log2,
424 	    sctp_clog.x.misc.log3,
425 	    sctp_clog.x.misc.log4);
426 }
427 
428 void
429 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
430 {
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
434 	sctp_clog.x.mbcnt.size_change = book;
435 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
436 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_MBCNT,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 
445 }
446 
447 void
448 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
449 {
450 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
451 	    SCTP_LOG_MISC_EVENT,
452 	    from,
453 	    a, b, c, d);
454 }
455 
456 void
457 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
458 {
459 	struct sctp_cwnd_log sctp_clog;
460 
461 	sctp_clog.x.wake.stcb = (void *)stcb;
462 	sctp_clog.x.wake.wake_cnt = wake_cnt;
463 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
464 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
465 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
466 
467 	if (stcb->asoc.stream_queue_cnt < 0xff)
468 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
469 	else
470 		sctp_clog.x.wake.stream_qcnt = 0xff;
471 
472 	if (stcb->asoc.chunks_on_out_queue < 0xff)
473 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
474 	else
475 		sctp_clog.x.wake.chunks_on_oque = 0xff;
476 
477 	sctp_clog.x.wake.sctpflags = 0;
478 	/* set in the defered mode stuff */
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
480 		sctp_clog.x.wake.sctpflags |= 1;
481 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
482 		sctp_clog.x.wake.sctpflags |= 2;
483 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
484 		sctp_clog.x.wake.sctpflags |= 4;
485 	/* what about the sb */
486 	if (stcb->sctp_socket) {
487 		struct socket *so = stcb->sctp_socket;
488 
489 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
490 	} else {
491 		sctp_clog.x.wake.sbflags = 0xff;
492 	}
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_EVENT_WAKE,
495 	    from,
496 	    sctp_clog.x.misc.log1,
497 	    sctp_clog.x.misc.log2,
498 	    sctp_clog.x.misc.log3,
499 	    sctp_clog.x.misc.log4);
500 
501 }
502 
503 void
504 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
505 {
506 	struct sctp_cwnd_log sctp_clog;
507 
508 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
509 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
510 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
511 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
512 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
513 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
514 	sctp_clog.x.blk.sndlen = sendlen;
515 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
516 	    SCTP_LOG_EVENT_BLOCK,
517 	    from,
518 	    sctp_clog.x.misc.log1,
519 	    sctp_clog.x.misc.log2,
520 	    sctp_clog.x.misc.log3,
521 	    sctp_clog.x.misc.log4);
522 
523 }
524 
525 int
526 sctp_fill_stat_log(void *optval, size_t *optsize)
527 {
528 	/* May need to fix this if ktrdump does not work */
529 	return (0);
530 }
531 
532 #ifdef SCTP_AUDITING_ENABLED
533 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
534 static int sctp_audit_indx = 0;
535 
536 static
537 void
538 sctp_print_audit_report(void)
539 {
540 	int i;
541 	int cnt;
542 
543 	cnt = 0;
544 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
545 		if ((sctp_audit_data[i][0] == 0xe0) &&
546 		    (sctp_audit_data[i][1] == 0x01)) {
547 			cnt = 0;
548 			SCTP_PRINTF("\n");
549 		} else if (sctp_audit_data[i][0] == 0xf0) {
550 			cnt = 0;
551 			SCTP_PRINTF("\n");
552 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
553 		    (sctp_audit_data[i][1] == 0x01)) {
554 			SCTP_PRINTF("\n");
555 			cnt = 0;
556 		}
557 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
558 		    (uint32_t) sctp_audit_data[i][1]);
559 		cnt++;
560 		if ((cnt % 14) == 0)
561 			SCTP_PRINTF("\n");
562 	}
563 	for (i = 0; i < sctp_audit_indx; i++) {
564 		if ((sctp_audit_data[i][0] == 0xe0) &&
565 		    (sctp_audit_data[i][1] == 0x01)) {
566 			cnt = 0;
567 			SCTP_PRINTF("\n");
568 		} else if (sctp_audit_data[i][0] == 0xf0) {
569 			cnt = 0;
570 			SCTP_PRINTF("\n");
571 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
572 		    (sctp_audit_data[i][1] == 0x01)) {
573 			SCTP_PRINTF("\n");
574 			cnt = 0;
575 		}
576 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
577 		    (uint32_t) sctp_audit_data[i][1]);
578 		cnt++;
579 		if ((cnt % 14) == 0)
580 			SCTP_PRINTF("\n");
581 	}
582 	SCTP_PRINTF("\n");
583 }
584 
585 void
586 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
587     struct sctp_nets *net)
588 {
589 	int resend_cnt, tot_out, rep, tot_book_cnt;
590 	struct sctp_nets *lnet;
591 	struct sctp_tmit_chunk *chk;
592 
593 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
594 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
595 	sctp_audit_indx++;
596 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
597 		sctp_audit_indx = 0;
598 	}
599 	if (inp == NULL) {
600 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
601 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
602 		sctp_audit_indx++;
603 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
604 			sctp_audit_indx = 0;
605 		}
606 		return;
607 	}
608 	if (stcb == NULL) {
609 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
610 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
611 		sctp_audit_indx++;
612 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
613 			sctp_audit_indx = 0;
614 		}
615 		return;
616 	}
617 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
618 	sctp_audit_data[sctp_audit_indx][1] =
619 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
620 	sctp_audit_indx++;
621 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
622 		sctp_audit_indx = 0;
623 	}
624 	rep = 0;
625 	tot_book_cnt = 0;
626 	resend_cnt = tot_out = 0;
627 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
628 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
629 			resend_cnt++;
630 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
631 			tot_out += chk->book_size;
632 			tot_book_cnt++;
633 		}
634 	}
635 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
636 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
637 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
638 		sctp_audit_indx++;
639 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
640 			sctp_audit_indx = 0;
641 		}
642 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
643 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
644 		rep = 1;
645 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
646 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
647 		sctp_audit_data[sctp_audit_indx][1] =
648 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
649 		sctp_audit_indx++;
650 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
651 			sctp_audit_indx = 0;
652 		}
653 	}
654 	if (tot_out != stcb->asoc.total_flight) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		rep = 1;
662 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
663 		    (int)stcb->asoc.total_flight);
664 		stcb->asoc.total_flight = tot_out;
665 	}
666 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
667 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
668 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
669 		sctp_audit_indx++;
670 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
671 			sctp_audit_indx = 0;
672 		}
673 		rep = 1;
674 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
675 
676 		stcb->asoc.total_flight_count = tot_book_cnt;
677 	}
678 	tot_out = 0;
679 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
680 		tot_out += lnet->flight_size;
681 	}
682 	if (tot_out != stcb->asoc.total_flight) {
683 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
684 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
685 		sctp_audit_indx++;
686 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
687 			sctp_audit_indx = 0;
688 		}
689 		rep = 1;
690 		SCTP_PRINTF("real flight:%d net total was %d\n",
691 		    stcb->asoc.total_flight, tot_out);
692 		/* now corrective action */
693 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
694 
695 			tot_out = 0;
696 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
697 				if ((chk->whoTo == lnet) &&
698 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
699 					tot_out += chk->book_size;
700 				}
701 			}
702 			if (lnet->flight_size != tot_out) {
703 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
704 				    lnet, lnet->flight_size,
705 				    tot_out);
706 				lnet->flight_size = tot_out;
707 			}
708 		}
709 	}
710 	if (rep) {
711 		sctp_print_audit_report();
712 	}
713 }
714 
715 void
716 sctp_audit_log(uint8_t ev, uint8_t fd)
717 {
718 
719 	sctp_audit_data[sctp_audit_indx][0] = ev;
720 	sctp_audit_data[sctp_audit_indx][1] = fd;
721 	sctp_audit_indx++;
722 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
723 		sctp_audit_indx = 0;
724 	}
725 }
726 
727 #endif
728 
729 /*
730  * sctp_stop_timers_for_shutdown() should be called
731  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
732  * state to make sure that all timers are stopped.
733  */
734 void
735 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
736 {
737 	struct sctp_association *asoc;
738 	struct sctp_nets *net;
739 
740 	asoc = &stcb->asoc;
741 
742 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
743 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
750 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
751 	}
752 }
753 
754 /*
755  * a list of sizes based on typical mtu's, used only if next hop size not
756  * returned.
757  */
758 static uint32_t sctp_mtu_sizes[] = {
759 	68,
760 	296,
761 	508,
762 	512,
763 	544,
764 	576,
765 	1006,
766 	1492,
767 	1500,
768 	1536,
769 	2002,
770 	2048,
771 	4352,
772 	4464,
773 	8166,
774 	17914,
775 	32000,
776 	65535
777 };
778 
779 /*
780  * Return the largest MTU smaller than val. If there is no
781  * entry, just return val.
782  */
783 uint32_t
784 sctp_get_prev_mtu(uint32_t val)
785 {
786 	uint32_t i;
787 
788 	if (val <= sctp_mtu_sizes[0]) {
789 		return (val);
790 	}
791 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792 		if (val <= sctp_mtu_sizes[i]) {
793 			break;
794 		}
795 	}
796 	return (sctp_mtu_sizes[i - 1]);
797 }
798 
799 /*
800  * Return the smallest MTU larger than val. If there is no
801  * entry, just return val.
802  */
803 uint32_t
804 sctp_get_next_mtu(struct sctp_inpcb *inp, uint32_t val)
805 {
806 	/* select another MTU that is just bigger than this one */
807 	uint32_t i;
808 
809 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810 		if (val < sctp_mtu_sizes[i]) {
811 			return (sctp_mtu_sizes[i]);
812 		}
813 	}
814 	return (val);
815 }
816 
817 void
818 sctp_fill_random_store(struct sctp_pcb *m)
819 {
820 	/*
821 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822 	 * our counter. The result becomes our good random numbers and we
823 	 * then setup to give these out. Note that we do no locking to
824 	 * protect this. This is ok, since if competing folks call this we
825 	 * will get more gobbled gook in the random store which is what we
826 	 * want. There is a danger that two guys will use the same random
827 	 * numbers, but thats ok too since that is random as well :->
828 	 */
829 	m->store_at = 0;
830 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833 	m->random_counter++;
834 }
835 
836 uint32_t
837 sctp_select_initial_TSN(struct sctp_pcb *inp)
838 {
839 	/*
840 	 * A true implementation should use random selection process to get
841 	 * the initial stream sequence number, using RFC1750 as a good
842 	 * guideline
843 	 */
844 	uint32_t x, *xp;
845 	uint8_t *p;
846 	int store_at, new_store;
847 
848 	if (inp->initial_sequence_debug != 0) {
849 		uint32_t ret;
850 
851 		ret = inp->initial_sequence_debug;
852 		inp->initial_sequence_debug++;
853 		return (ret);
854 	}
855 retry:
856 	store_at = inp->store_at;
857 	new_store = store_at + sizeof(uint32_t);
858 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859 		new_store = 0;
860 	}
861 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862 		goto retry;
863 	}
864 	if (new_store == 0) {
865 		/* Refill the random store */
866 		sctp_fill_random_store(inp);
867 	}
868 	p = &inp->random_store[store_at];
869 	xp = (uint32_t *) p;
870 	x = *xp;
871 	return (x);
872 }
873 
874 uint32_t
875 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
876 {
877 	uint32_t x, not_done;
878 	struct timeval now;
879 
880 	(void)SCTP_GETTIME_TIMEVAL(&now);
881 	not_done = 1;
882 	while (not_done) {
883 		x = sctp_select_initial_TSN(&inp->sctp_ep);
884 		if (x == 0) {
885 			/* we never use 0 */
886 			continue;
887 		}
888 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
889 			not_done = 0;
890 		}
891 	}
892 	return (x);
893 }
894 
895 int
896 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
897     uint32_t override_tag, uint32_t vrf_id)
898 {
899 	struct sctp_association *asoc;
900 
901 	/*
902 	 * Anything set to zero is taken care of by the allocation routine's
903 	 * bzero
904 	 */
905 
906 	/*
907 	 * Up front select what scoping to apply on addresses I tell my peer
908 	 * Not sure what to do with these right now, we will need to come up
909 	 * with a way to set them. We may need to pass them through from the
910 	 * caller in the sctp_aloc_assoc() function.
911 	 */
912 	int i;
913 
914 	asoc = &stcb->asoc;
915 	/* init all variables to a known value. */
916 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
917 	asoc->max_burst = m->sctp_ep.max_burst;
918 	asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
919 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
920 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
921 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
922 	asoc->ecn_allowed = m->sctp_ecn_enable;
923 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
924 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
925 	asoc->sctp_frag_point = m->sctp_frag_point;
926 #ifdef INET
927 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
928 #else
929 	asoc->default_tos = 0;
930 #endif
931 
932 #ifdef INET6
933 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
934 #else
935 	asoc->default_flowlabel = 0;
936 #endif
937 	asoc->sb_send_resv = 0;
938 	if (override_tag) {
939 		asoc->my_vtag = override_tag;
940 	} else {
941 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
942 	}
943 	/* Get the nonce tags */
944 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
945 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
946 	asoc->vrf_id = vrf_id;
947 
948 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
949 		asoc->hb_is_disabled = 1;
950 	else
951 		asoc->hb_is_disabled = 0;
952 
953 #ifdef SCTP_ASOCLOG_OF_TSNS
954 	asoc->tsn_in_at = 0;
955 	asoc->tsn_out_at = 0;
956 	asoc->tsn_in_wrapped = 0;
957 	asoc->tsn_out_wrapped = 0;
958 	asoc->cumack_log_at = 0;
959 	asoc->cumack_log_atsnt = 0;
960 #endif
961 #ifdef SCTP_FS_SPEC_LOG
962 	asoc->fs_index = 0;
963 #endif
964 	asoc->refcnt = 0;
965 	asoc->assoc_up_sent = 0;
966 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
967 	    sctp_select_initial_TSN(&m->sctp_ep);
968 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
969 	/* we are optimisitic here */
970 	asoc->peer_supports_pktdrop = 1;
971 	asoc->peer_supports_nat = 0;
972 	asoc->sent_queue_retran_cnt = 0;
973 
974 	/* for CMT */
975 	asoc->last_net_cmt_send_started = NULL;
976 
977 	/* This will need to be adjusted */
978 	asoc->last_acked_seq = asoc->init_seq_number - 1;
979 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
980 	asoc->asconf_seq_in = asoc->last_acked_seq;
981 
982 	/* here we are different, we hold the next one we expect */
983 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
984 
985 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
986 	asoc->initial_rto = m->sctp_ep.initial_rto;
987 
988 	asoc->max_init_times = m->sctp_ep.max_init_times;
989 	asoc->max_send_times = m->sctp_ep.max_send_times;
990 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
991 	asoc->free_chunk_cnt = 0;
992 
993 	asoc->iam_blocking = 0;
994 
995 	asoc->context = m->sctp_context;
996 	asoc->def_send = m->def_send;
997 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
998 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
999 	asoc->pr_sctp_cnt = 0;
1000 	asoc->total_output_queue_size = 0;
1001 
1002 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1003 		struct in6pcb *inp6;
1004 
1005 		/* Its a V6 socket */
1006 		inp6 = (struct in6pcb *)m;
1007 		asoc->ipv6_addr_legal = 1;
1008 		/* Now look at the binding flag to see if V4 will be legal */
1009 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1010 			asoc->ipv4_addr_legal = 1;
1011 		} else {
1012 			/* V4 addresses are NOT legal on the association */
1013 			asoc->ipv4_addr_legal = 0;
1014 		}
1015 	} else {
1016 		/* Its a V4 socket, no - V6 */
1017 		asoc->ipv4_addr_legal = 1;
1018 		asoc->ipv6_addr_legal = 0;
1019 	}
1020 
1021 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1022 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1023 
1024 	asoc->smallest_mtu = m->sctp_frag_point;
1025 	asoc->minrto = m->sctp_ep.sctp_minrto;
1026 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1027 
1028 	asoc->locked_on_sending = NULL;
1029 	asoc->stream_locked_on = 0;
1030 	asoc->ecn_echo_cnt_onq = 0;
1031 	asoc->stream_locked = 0;
1032 
1033 	asoc->send_sack = 1;
1034 
1035 	LIST_INIT(&asoc->sctp_restricted_addrs);
1036 
1037 	TAILQ_INIT(&asoc->nets);
1038 	TAILQ_INIT(&asoc->pending_reply_queue);
1039 	TAILQ_INIT(&asoc->asconf_ack_sent);
1040 	/* Setup to fill the hb random cache at first HB */
1041 	asoc->hb_random_idx = 4;
1042 
1043 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1044 
1045 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1046 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1047 
1048 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1049 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1050 
1051 	/*
1052 	 * Now the stream parameters, here we allocate space for all streams
1053 	 * that we request by default.
1054 	 */
1055 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1056 	    m->sctp_ep.pre_open_stream_count;
1057 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1058 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1059 	    SCTP_M_STRMO);
1060 	if (asoc->strmout == NULL) {
1061 		/* big trouble no memory */
1062 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1063 		return (ENOMEM);
1064 	}
1065 	for (i = 0; i < asoc->streamoutcnt; i++) {
1066 		/*
1067 		 * inbound side must be set to 0xffff, also NOTE when we get
1068 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1069 		 * count (streamoutcnt) but first check if we sent to any of
1070 		 * the upper streams that were dropped (if some were). Those
1071 		 * that were dropped must be notified to the upper layer as
1072 		 * failed to send.
1073 		 */
1074 		asoc->strmout[i].next_sequence_sent = 0x0;
1075 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1076 		asoc->strmout[i].stream_no = i;
1077 		asoc->strmout[i].last_msg_incomplete = 0;
1078 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1079 	}
1080 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1081 
1082 	/* Now the mapping array */
1083 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1084 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1085 	    SCTP_M_MAP);
1086 	if (asoc->mapping_array == NULL) {
1087 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1088 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1089 		return (ENOMEM);
1090 	}
1091 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1092 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1093 	    SCTP_M_MAP);
1094 	if (asoc->nr_mapping_array == NULL) {
1095 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1096 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1097 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1098 		return (ENOMEM);
1099 	}
1100 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1101 
1102 	/* Now the init of the other outqueues */
1103 	TAILQ_INIT(&asoc->free_chunks);
1104 	TAILQ_INIT(&asoc->control_send_queue);
1105 	TAILQ_INIT(&asoc->asconf_send_queue);
1106 	TAILQ_INIT(&asoc->send_queue);
1107 	TAILQ_INIT(&asoc->sent_queue);
1108 	TAILQ_INIT(&asoc->reasmqueue);
1109 	TAILQ_INIT(&asoc->resetHead);
1110 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1111 	TAILQ_INIT(&asoc->asconf_queue);
1112 	/* authentication fields */
1113 	asoc->authinfo.random = NULL;
1114 	asoc->authinfo.active_keyid = 0;
1115 	asoc->authinfo.assoc_key = NULL;
1116 	asoc->authinfo.assoc_keyid = 0;
1117 	asoc->authinfo.recv_key = NULL;
1118 	asoc->authinfo.recv_keyid = 0;
1119 	LIST_INIT(&asoc->shared_keys);
1120 	asoc->marked_retrans = 0;
1121 	asoc->timoinit = 0;
1122 	asoc->timodata = 0;
1123 	asoc->timosack = 0;
1124 	asoc->timoshutdown = 0;
1125 	asoc->timoheartbeat = 0;
1126 	asoc->timocookie = 0;
1127 	asoc->timoshutdownack = 0;
1128 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1129 	asoc->discontinuity_time = asoc->start_time;
1130 	/*
1131 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1132 	 * freed later when the association is freed.
1133 	 */
1134 	return (0);
1135 }
1136 
1137 void
1138 sctp_print_mapping_array(struct sctp_association *asoc)
1139 {
1140 	unsigned int i, limit;
1141 
1142 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1143 	    asoc->mapping_array_size,
1144 	    asoc->mapping_array_base_tsn,
1145 	    asoc->cumulative_tsn,
1146 	    asoc->highest_tsn_inside_map,
1147 	    asoc->highest_tsn_inside_nr_map);
1148 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1149 		if (asoc->mapping_array[limit - 1]) {
1150 			break;
1151 		}
1152 	}
1153 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1154 	for (i = 0; i < limit; i++) {
1155 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1156 	}
1157 	if (limit % 16)
1158 		printf("\n");
1159 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1160 		if (asoc->nr_mapping_array[limit - 1]) {
1161 			break;
1162 		}
1163 	}
1164 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1165 	for (i = 0; i < limit; i++) {
1166 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1167 	}
1168 	if (limit % 16)
1169 		printf("\n");
1170 }
1171 
1172 int
1173 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1174 {
1175 	/* mapping array needs to grow */
1176 	uint8_t *new_array1, *new_array2;
1177 	uint32_t new_size;
1178 
1179 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1180 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1181 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1182 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1183 		/* can't get more, forget it */
1184 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1185 		if (new_array1) {
1186 			SCTP_FREE(new_array1, SCTP_M_MAP);
1187 		}
1188 		if (new_array2) {
1189 			SCTP_FREE(new_array2, SCTP_M_MAP);
1190 		}
1191 		return (-1);
1192 	}
1193 	memset(new_array1, 0, new_size);
1194 	memset(new_array2, 0, new_size);
1195 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1196 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1197 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1198 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1199 	asoc->mapping_array = new_array1;
1200 	asoc->nr_mapping_array = new_array2;
1201 	asoc->mapping_array_size = new_size;
1202 	return (0);
1203 }
1204 
1205 
1206 static void
1207 sctp_iterator_work(struct sctp_iterator *it)
1208 {
1209 	int iteration_count = 0;
1210 	int inp_skip = 0;
1211 	int first_in = 1;
1212 	struct sctp_inpcb *tinp;
1213 
1214 	SCTP_INP_INFO_RLOCK();
1215 	SCTP_ITERATOR_LOCK();
1216 	if (it->inp) {
1217 		SCTP_INP_RLOCK(it->inp);
1218 		SCTP_INP_DECR_REF(it->inp);
1219 	}
1220 	if (it->inp == NULL) {
1221 		/* iterator is complete */
1222 done_with_iterator:
1223 		SCTP_ITERATOR_UNLOCK();
1224 		SCTP_INP_INFO_RUNLOCK();
1225 		if (it->function_atend != NULL) {
1226 			(*it->function_atend) (it->pointer, it->val);
1227 		}
1228 		SCTP_FREE(it, SCTP_M_ITER);
1229 		return;
1230 	}
1231 select_a_new_ep:
1232 	if (first_in) {
1233 		first_in = 0;
1234 	} else {
1235 		SCTP_INP_RLOCK(it->inp);
1236 	}
1237 	while (((it->pcb_flags) &&
1238 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1239 	    ((it->pcb_features) &&
1240 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1241 		/* endpoint flags or features don't match, so keep looking */
1242 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1243 			SCTP_INP_RUNLOCK(it->inp);
1244 			goto done_with_iterator;
1245 		}
1246 		tinp = it->inp;
1247 		it->inp = LIST_NEXT(it->inp, sctp_list);
1248 		SCTP_INP_RUNLOCK(tinp);
1249 		if (it->inp == NULL) {
1250 			goto done_with_iterator;
1251 		}
1252 		SCTP_INP_RLOCK(it->inp);
1253 	}
1254 	/* now go through each assoc which is in the desired state */
1255 	if (it->done_current_ep == 0) {
1256 		if (it->function_inp != NULL)
1257 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1258 		it->done_current_ep = 1;
1259 	}
1260 	if (it->stcb == NULL) {
1261 		/* run the per instance function */
1262 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1263 	}
1264 	if ((inp_skip) || it->stcb == NULL) {
1265 		if (it->function_inp_end != NULL) {
1266 			inp_skip = (*it->function_inp_end) (it->inp,
1267 			    it->pointer,
1268 			    it->val);
1269 		}
1270 		SCTP_INP_RUNLOCK(it->inp);
1271 		goto no_stcb;
1272 	}
1273 	while (it->stcb) {
1274 		SCTP_TCB_LOCK(it->stcb);
1275 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1276 			/* not in the right state... keep looking */
1277 			SCTP_TCB_UNLOCK(it->stcb);
1278 			goto next_assoc;
1279 		}
1280 		/* see if we have limited out the iterator loop */
1281 		iteration_count++;
1282 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1283 			/* Pause to let others grab the lock */
1284 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1285 			SCTP_TCB_UNLOCK(it->stcb);
1286 			SCTP_INP_INCR_REF(it->inp);
1287 			SCTP_INP_RUNLOCK(it->inp);
1288 			SCTP_ITERATOR_UNLOCK();
1289 			SCTP_INP_INFO_RUNLOCK();
1290 			SCTP_INP_INFO_RLOCK();
1291 			SCTP_ITERATOR_LOCK();
1292 			if (sctp_it_ctl.iterator_flags) {
1293 				/* We won't be staying here */
1294 				SCTP_INP_DECR_REF(it->inp);
1295 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1296 				if (sctp_it_ctl.iterator_flags &
1297 				    SCTP_ITERATOR_MUST_EXIT) {
1298 					goto done_with_iterator;
1299 				}
1300 				if (sctp_it_ctl.iterator_flags &
1301 				    SCTP_ITERATOR_STOP_CUR_IT) {
1302 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1303 					goto done_with_iterator;
1304 				}
1305 				if (sctp_it_ctl.iterator_flags &
1306 				    SCTP_ITERATOR_STOP_CUR_INP) {
1307 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1308 					goto no_stcb;
1309 				}
1310 				/* If we reach here huh? */
1311 				printf("Unknown it ctl flag %x\n",
1312 				    sctp_it_ctl.iterator_flags);
1313 				sctp_it_ctl.iterator_flags = 0;
1314 			}
1315 			SCTP_INP_RLOCK(it->inp);
1316 			SCTP_INP_DECR_REF(it->inp);
1317 			SCTP_TCB_LOCK(it->stcb);
1318 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1319 			iteration_count = 0;
1320 		}
1321 		/* run function on this one */
1322 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1323 
1324 		/*
1325 		 * we lie here, it really needs to have its own type but
1326 		 * first I must verify that this won't effect things :-0
1327 		 */
1328 		if (it->no_chunk_output == 0)
1329 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1330 
1331 		SCTP_TCB_UNLOCK(it->stcb);
1332 next_assoc:
1333 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1334 		if (it->stcb == NULL) {
1335 			/* Run last function */
1336 			if (it->function_inp_end != NULL) {
1337 				inp_skip = (*it->function_inp_end) (it->inp,
1338 				    it->pointer,
1339 				    it->val);
1340 			}
1341 		}
1342 	}
1343 	SCTP_INP_RUNLOCK(it->inp);
1344 no_stcb:
1345 	/* done with all assocs on this endpoint, move on to next endpoint */
1346 	it->done_current_ep = 0;
1347 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1348 		it->inp = NULL;
1349 	} else {
1350 		it->inp = LIST_NEXT(it->inp, sctp_list);
1351 	}
1352 	if (it->inp == NULL) {
1353 		goto done_with_iterator;
1354 	}
1355 	goto select_a_new_ep;
1356 }
1357 
1358 void
1359 sctp_iterator_worker(void)
1360 {
1361 	struct sctp_iterator *it, *nit;
1362 
1363 	/* This function is called with the WQ lock in place */
1364 
1365 	sctp_it_ctl.iterator_running = 1;
1366 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1367 		sctp_it_ctl.cur_it = it;
1368 		/* now lets work on this one */
1369 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1370 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1371 		CURVNET_SET(it->vn);
1372 		sctp_iterator_work(it);
1373 		sctp_it_ctl.cur_it = NULL;
1374 		CURVNET_RESTORE();
1375 		SCTP_IPI_ITERATOR_WQ_LOCK();
1376 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1377 			break;
1378 		}
1379 		/* sa_ignore FREED_MEMORY */
1380 	}
1381 	sctp_it_ctl.iterator_running = 0;
1382 	return;
1383 }
1384 
1385 
1386 static void
1387 sctp_handle_addr_wq(void)
1388 {
1389 	/* deal with the ADDR wq from the rtsock calls */
1390 	struct sctp_laddr *wi, *nwi;
1391 	struct sctp_asconf_iterator *asc;
1392 
1393 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1394 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1395 	if (asc == NULL) {
1396 		/* Try later, no memory */
1397 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1398 		    (struct sctp_inpcb *)NULL,
1399 		    (struct sctp_tcb *)NULL,
1400 		    (struct sctp_nets *)NULL);
1401 		return;
1402 	}
1403 	LIST_INIT(&asc->list_of_work);
1404 	asc->cnt = 0;
1405 
1406 	SCTP_WQ_ADDR_LOCK();
1407 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1408 		LIST_REMOVE(wi, sctp_nxt_addr);
1409 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1410 		asc->cnt++;
1411 	}
1412 	SCTP_WQ_ADDR_UNLOCK();
1413 
1414 	if (asc->cnt == 0) {
1415 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1416 	} else {
1417 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1418 		    sctp_asconf_iterator_stcb,
1419 		    NULL,	/* No ep end for boundall */
1420 		    SCTP_PCB_FLAGS_BOUNDALL,
1421 		    SCTP_PCB_ANY_FEATURES,
1422 		    SCTP_ASOC_ANY_STATE,
1423 		    (void *)asc, 0,
1424 		    sctp_asconf_iterator_end, NULL, 0);
1425 	}
1426 }
1427 
1428 int retcode = 0;
1429 int cur_oerr = 0;
1430 
1431 void
1432 sctp_timeout_handler(void *t)
1433 {
1434 	struct sctp_inpcb *inp;
1435 	struct sctp_tcb *stcb;
1436 	struct sctp_nets *net;
1437 	struct sctp_timer *tmr;
1438 
1439 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1440 	struct socket *so;
1441 
1442 #endif
1443 	int did_output, type;
1444 
1445 	tmr = (struct sctp_timer *)t;
1446 	inp = (struct sctp_inpcb *)tmr->ep;
1447 	stcb = (struct sctp_tcb *)tmr->tcb;
1448 	net = (struct sctp_nets *)tmr->net;
1449 	CURVNET_SET((struct vnet *)tmr->vnet);
1450 	did_output = 1;
1451 
1452 #ifdef SCTP_AUDITING_ENABLED
1453 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1454 	sctp_auditing(3, inp, stcb, net);
1455 #endif
1456 
1457 	/* sanity checks... */
1458 	if (tmr->self != (void *)tmr) {
1459 		/*
1460 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1461 		 * tmr);
1462 		 */
1463 		CURVNET_RESTORE();
1464 		return;
1465 	}
1466 	tmr->stopped_from = 0xa001;
1467 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1468 		/*
1469 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1470 		 * tmr->type);
1471 		 */
1472 		CURVNET_RESTORE();
1473 		return;
1474 	}
1475 	tmr->stopped_from = 0xa002;
1476 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1477 		CURVNET_RESTORE();
1478 		return;
1479 	}
1480 	/* if this is an iterator timeout, get the struct and clear inp */
1481 	tmr->stopped_from = 0xa003;
1482 	type = tmr->type;
1483 	if (inp) {
1484 		SCTP_INP_INCR_REF(inp);
1485 		if ((inp->sctp_socket == 0) &&
1486 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1487 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1488 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1489 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1490 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1491 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1492 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1493 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1494 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1495 		    ) {
1496 			SCTP_INP_DECR_REF(inp);
1497 			CURVNET_RESTORE();
1498 			return;
1499 		}
1500 	}
1501 	tmr->stopped_from = 0xa004;
1502 	if (stcb) {
1503 		atomic_add_int(&stcb->asoc.refcnt, 1);
1504 		if (stcb->asoc.state == 0) {
1505 			atomic_add_int(&stcb->asoc.refcnt, -1);
1506 			if (inp) {
1507 				SCTP_INP_DECR_REF(inp);
1508 			}
1509 			CURVNET_RESTORE();
1510 			return;
1511 		}
1512 	}
1513 	tmr->stopped_from = 0xa005;
1514 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1515 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1516 		if (inp) {
1517 			SCTP_INP_DECR_REF(inp);
1518 		}
1519 		if (stcb) {
1520 			atomic_add_int(&stcb->asoc.refcnt, -1);
1521 		}
1522 		CURVNET_RESTORE();
1523 		return;
1524 	}
1525 	tmr->stopped_from = 0xa006;
1526 
1527 	if (stcb) {
1528 		SCTP_TCB_LOCK(stcb);
1529 		atomic_add_int(&stcb->asoc.refcnt, -1);
1530 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1531 		    ((stcb->asoc.state == 0) ||
1532 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1533 			SCTP_TCB_UNLOCK(stcb);
1534 			if (inp) {
1535 				SCTP_INP_DECR_REF(inp);
1536 			}
1537 			CURVNET_RESTORE();
1538 			return;
1539 		}
1540 	}
1541 	/* record in stopped what t-o occured */
1542 	tmr->stopped_from = tmr->type;
1543 
1544 	/* mark as being serviced now */
1545 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1546 		/*
1547 		 * Callout has been rescheduled.
1548 		 */
1549 		goto get_out;
1550 	}
1551 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1552 		/*
1553 		 * Not active, so no action.
1554 		 */
1555 		goto get_out;
1556 	}
1557 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1558 
1559 	/* call the handler for the appropriate timer type */
1560 	switch (tmr->type) {
1561 	case SCTP_TIMER_TYPE_ZERO_COPY:
1562 		if (inp == NULL) {
1563 			break;
1564 		}
1565 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1566 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1567 		}
1568 		break;
1569 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1570 		if (inp == NULL) {
1571 			break;
1572 		}
1573 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1574 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1575 		}
1576 		break;
1577 	case SCTP_TIMER_TYPE_ADDR_WQ:
1578 		sctp_handle_addr_wq();
1579 		break;
1580 	case SCTP_TIMER_TYPE_SEND:
1581 		if ((stcb == NULL) || (inp == NULL)) {
1582 			break;
1583 		}
1584 		SCTP_STAT_INCR(sctps_timodata);
1585 		stcb->asoc.timodata++;
1586 		stcb->asoc.num_send_timers_up--;
1587 		if (stcb->asoc.num_send_timers_up < 0) {
1588 			stcb->asoc.num_send_timers_up = 0;
1589 		}
1590 		SCTP_TCB_LOCK_ASSERT(stcb);
1591 		cur_oerr = stcb->asoc.overall_error_count;
1592 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1593 		if (retcode) {
1594 			/* no need to unlock on tcb its gone */
1595 
1596 			goto out_decr;
1597 		}
1598 		SCTP_TCB_LOCK_ASSERT(stcb);
1599 #ifdef SCTP_AUDITING_ENABLED
1600 		sctp_auditing(4, inp, stcb, net);
1601 #endif
1602 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1603 		if ((stcb->asoc.num_send_timers_up == 0) &&
1604 		    (stcb->asoc.sent_queue_cnt > 0)) {
1605 			struct sctp_tmit_chunk *chk;
1606 
1607 			/*
1608 			 * safeguard. If there on some on the sent queue
1609 			 * somewhere but no timers running something is
1610 			 * wrong... so we start a timer on the first chunk
1611 			 * on the send queue on whatever net it is sent to.
1612 			 */
1613 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1614 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1615 			    chk->whoTo);
1616 		}
1617 		break;
1618 	case SCTP_TIMER_TYPE_INIT:
1619 		if ((stcb == NULL) || (inp == NULL)) {
1620 			break;
1621 		}
1622 		SCTP_STAT_INCR(sctps_timoinit);
1623 		stcb->asoc.timoinit++;
1624 		if (sctp_t1init_timer(inp, stcb, net)) {
1625 			/* no need to unlock on tcb its gone */
1626 			goto out_decr;
1627 		}
1628 		/* We do output but not here */
1629 		did_output = 0;
1630 		break;
1631 	case SCTP_TIMER_TYPE_RECV:
1632 		if ((stcb == NULL) || (inp == NULL)) {
1633 			break;
1634 		} {
1635 			SCTP_STAT_INCR(sctps_timosack);
1636 			stcb->asoc.timosack++;
1637 			sctp_send_sack(stcb);
1638 		}
1639 #ifdef SCTP_AUDITING_ENABLED
1640 		sctp_auditing(4, inp, stcb, net);
1641 #endif
1642 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1643 		break;
1644 	case SCTP_TIMER_TYPE_SHUTDOWN:
1645 		if ((stcb == NULL) || (inp == NULL)) {
1646 			break;
1647 		}
1648 		if (sctp_shutdown_timer(inp, stcb, net)) {
1649 			/* no need to unlock on tcb its gone */
1650 			goto out_decr;
1651 		}
1652 		SCTP_STAT_INCR(sctps_timoshutdown);
1653 		stcb->asoc.timoshutdown++;
1654 #ifdef SCTP_AUDITING_ENABLED
1655 		sctp_auditing(4, inp, stcb, net);
1656 #endif
1657 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1658 		break;
1659 	case SCTP_TIMER_TYPE_HEARTBEAT:
1660 		{
1661 			struct sctp_nets *lnet;
1662 			int cnt_of_unconf = 0;
1663 
1664 			if ((stcb == NULL) || (inp == NULL)) {
1665 				break;
1666 			}
1667 			SCTP_STAT_INCR(sctps_timoheartbeat);
1668 			stcb->asoc.timoheartbeat++;
1669 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1670 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1671 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1672 					cnt_of_unconf++;
1673 				}
1674 			}
1675 			if (cnt_of_unconf == 0) {
1676 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1677 				    cnt_of_unconf)) {
1678 					/* no need to unlock on tcb its gone */
1679 					goto out_decr;
1680 				}
1681 			}
1682 #ifdef SCTP_AUDITING_ENABLED
1683 			sctp_auditing(4, inp, stcb, lnet);
1684 #endif
1685 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1686 			    stcb->sctp_ep, stcb, lnet);
1687 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1688 		}
1689 		break;
1690 	case SCTP_TIMER_TYPE_COOKIE:
1691 		if ((stcb == NULL) || (inp == NULL)) {
1692 			break;
1693 		}
1694 		if (sctp_cookie_timer(inp, stcb, net)) {
1695 			/* no need to unlock on tcb its gone */
1696 			goto out_decr;
1697 		}
1698 		SCTP_STAT_INCR(sctps_timocookie);
1699 		stcb->asoc.timocookie++;
1700 #ifdef SCTP_AUDITING_ENABLED
1701 		sctp_auditing(4, inp, stcb, net);
1702 #endif
1703 		/*
1704 		 * We consider T3 and Cookie timer pretty much the same with
1705 		 * respect to where from in chunk_output.
1706 		 */
1707 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1708 		break;
1709 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1710 		{
1711 			struct timeval tv;
1712 			int i, secret;
1713 
1714 			if (inp == NULL) {
1715 				break;
1716 			}
1717 			SCTP_STAT_INCR(sctps_timosecret);
1718 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1719 			SCTP_INP_WLOCK(inp);
1720 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1721 			inp->sctp_ep.last_secret_number =
1722 			    inp->sctp_ep.current_secret_number;
1723 			inp->sctp_ep.current_secret_number++;
1724 			if (inp->sctp_ep.current_secret_number >=
1725 			    SCTP_HOW_MANY_SECRETS) {
1726 				inp->sctp_ep.current_secret_number = 0;
1727 			}
1728 			secret = (int)inp->sctp_ep.current_secret_number;
1729 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1730 				inp->sctp_ep.secret_key[secret][i] =
1731 				    sctp_select_initial_TSN(&inp->sctp_ep);
1732 			}
1733 			SCTP_INP_WUNLOCK(inp);
1734 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1735 		}
1736 		did_output = 0;
1737 		break;
1738 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1739 		if ((stcb == NULL) || (inp == NULL)) {
1740 			break;
1741 		}
1742 		SCTP_STAT_INCR(sctps_timopathmtu);
1743 		sctp_pathmtu_timer(inp, stcb, net);
1744 		did_output = 0;
1745 		break;
1746 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1747 		if ((stcb == NULL) || (inp == NULL)) {
1748 			break;
1749 		}
1750 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1751 			/* no need to unlock on tcb its gone */
1752 			goto out_decr;
1753 		}
1754 		SCTP_STAT_INCR(sctps_timoshutdownack);
1755 		stcb->asoc.timoshutdownack++;
1756 #ifdef SCTP_AUDITING_ENABLED
1757 		sctp_auditing(4, inp, stcb, net);
1758 #endif
1759 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1760 		break;
1761 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1762 		if ((stcb == NULL) || (inp == NULL)) {
1763 			break;
1764 		}
1765 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1766 		sctp_abort_an_association(inp, stcb,
1767 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1768 		/* no need to unlock on tcb its gone */
1769 		goto out_decr;
1770 
1771 	case SCTP_TIMER_TYPE_STRRESET:
1772 		if ((stcb == NULL) || (inp == NULL)) {
1773 			break;
1774 		}
1775 		if (sctp_strreset_timer(inp, stcb, net)) {
1776 			/* no need to unlock on tcb its gone */
1777 			goto out_decr;
1778 		}
1779 		SCTP_STAT_INCR(sctps_timostrmrst);
1780 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1781 		break;
1782 	case SCTP_TIMER_TYPE_EARLYFR:
1783 		/* Need to do FR of things for net */
1784 		if ((stcb == NULL) || (inp == NULL)) {
1785 			break;
1786 		}
1787 		SCTP_STAT_INCR(sctps_timoearlyfr);
1788 		sctp_early_fr_timer(inp, stcb, net);
1789 		break;
1790 	case SCTP_TIMER_TYPE_ASCONF:
1791 		if ((stcb == NULL) || (inp == NULL)) {
1792 			break;
1793 		}
1794 		if (sctp_asconf_timer(inp, stcb, net)) {
1795 			/* no need to unlock on tcb its gone */
1796 			goto out_decr;
1797 		}
1798 		SCTP_STAT_INCR(sctps_timoasconf);
1799 #ifdef SCTP_AUDITING_ENABLED
1800 		sctp_auditing(4, inp, stcb, net);
1801 #endif
1802 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1803 		break;
1804 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1805 		if ((stcb == NULL) || (inp == NULL)) {
1806 			break;
1807 		}
1808 		sctp_delete_prim_timer(inp, stcb, net);
1809 		SCTP_STAT_INCR(sctps_timodelprim);
1810 		break;
1811 
1812 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1813 		if ((stcb == NULL) || (inp == NULL)) {
1814 			break;
1815 		}
1816 		SCTP_STAT_INCR(sctps_timoautoclose);
1817 		sctp_autoclose_timer(inp, stcb, net);
1818 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1819 		did_output = 0;
1820 		break;
1821 	case SCTP_TIMER_TYPE_ASOCKILL:
1822 		if ((stcb == NULL) || (inp == NULL)) {
1823 			break;
1824 		}
1825 		SCTP_STAT_INCR(sctps_timoassockill);
1826 		/* Can we free it yet? */
1827 		SCTP_INP_DECR_REF(inp);
1828 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1829 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1830 		so = SCTP_INP_SO(inp);
1831 		atomic_add_int(&stcb->asoc.refcnt, 1);
1832 		SCTP_TCB_UNLOCK(stcb);
1833 		SCTP_SOCKET_LOCK(so, 1);
1834 		SCTP_TCB_LOCK(stcb);
1835 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1836 #endif
1837 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1838 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1839 		SCTP_SOCKET_UNLOCK(so, 1);
1840 #endif
1841 		/*
1842 		 * free asoc, always unlocks (or destroy's) so prevent
1843 		 * duplicate unlock or unlock of a free mtx :-0
1844 		 */
1845 		stcb = NULL;
1846 		goto out_no_decr;
1847 	case SCTP_TIMER_TYPE_INPKILL:
1848 		SCTP_STAT_INCR(sctps_timoinpkill);
1849 		if (inp == NULL) {
1850 			break;
1851 		}
1852 		/*
1853 		 * special case, take away our increment since WE are the
1854 		 * killer
1855 		 */
1856 		SCTP_INP_DECR_REF(inp);
1857 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1858 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1859 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1860 		inp = NULL;
1861 		goto out_no_decr;
1862 	default:
1863 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1864 		    tmr->type);
1865 		break;
1866 	};
1867 #ifdef SCTP_AUDITING_ENABLED
1868 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1869 	if (inp)
1870 		sctp_auditing(5, inp, stcb, net);
1871 #endif
1872 	if ((did_output) && stcb) {
1873 		/*
1874 		 * Now we need to clean up the control chunk chain if an
1875 		 * ECNE is on it. It must be marked as UNSENT again so next
1876 		 * call will continue to send it until such time that we get
1877 		 * a CWR, to remove it. It is, however, less likely that we
1878 		 * will find a ecn echo on the chain though.
1879 		 */
1880 		sctp_fix_ecn_echo(&stcb->asoc);
1881 	}
1882 get_out:
1883 	if (stcb) {
1884 		SCTP_TCB_UNLOCK(stcb);
1885 	}
1886 out_decr:
1887 	if (inp) {
1888 		SCTP_INP_DECR_REF(inp);
1889 	}
1890 out_no_decr:
1891 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1892 	    type);
1893 	CURVNET_RESTORE();
1894 }
1895 
1896 void
1897 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1898     struct sctp_nets *net)
1899 {
1900 	int to_ticks;
1901 	struct sctp_timer *tmr;
1902 
1903 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1904 		return;
1905 
1906 	to_ticks = 0;
1907 
1908 	tmr = NULL;
1909 	if (stcb) {
1910 		SCTP_TCB_LOCK_ASSERT(stcb);
1911 	}
1912 	switch (t_type) {
1913 	case SCTP_TIMER_TYPE_ZERO_COPY:
1914 		tmr = &inp->sctp_ep.zero_copy_timer;
1915 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1916 		break;
1917 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1918 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1919 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1920 		break;
1921 	case SCTP_TIMER_TYPE_ADDR_WQ:
1922 		/* Only 1 tick away :-) */
1923 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1924 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1925 		break;
1926 	case SCTP_TIMER_TYPE_SEND:
1927 		/* Here we use the RTO timer */
1928 		{
1929 			int rto_val;
1930 
1931 			if ((stcb == NULL) || (net == NULL)) {
1932 				return;
1933 			}
1934 			tmr = &net->rxt_timer;
1935 			if (net->RTO == 0) {
1936 				rto_val = stcb->asoc.initial_rto;
1937 			} else {
1938 				rto_val = net->RTO;
1939 			}
1940 			to_ticks = MSEC_TO_TICKS(rto_val);
1941 		}
1942 		break;
1943 	case SCTP_TIMER_TYPE_INIT:
1944 		/*
1945 		 * Here we use the INIT timer default usually about 1
1946 		 * minute.
1947 		 */
1948 		if ((stcb == NULL) || (net == NULL)) {
1949 			return;
1950 		}
1951 		tmr = &net->rxt_timer;
1952 		if (net->RTO == 0) {
1953 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1954 		} else {
1955 			to_ticks = MSEC_TO_TICKS(net->RTO);
1956 		}
1957 		break;
1958 	case SCTP_TIMER_TYPE_RECV:
1959 		/*
1960 		 * Here we use the Delayed-Ack timer value from the inp
1961 		 * ususually about 200ms.
1962 		 */
1963 		if (stcb == NULL) {
1964 			return;
1965 		}
1966 		tmr = &stcb->asoc.dack_timer;
1967 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1968 		break;
1969 	case SCTP_TIMER_TYPE_SHUTDOWN:
1970 		/* Here we use the RTO of the destination. */
1971 		if ((stcb == NULL) || (net == NULL)) {
1972 			return;
1973 		}
1974 		if (net->RTO == 0) {
1975 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1976 		} else {
1977 			to_ticks = MSEC_TO_TICKS(net->RTO);
1978 		}
1979 		tmr = &net->rxt_timer;
1980 		break;
1981 	case SCTP_TIMER_TYPE_HEARTBEAT:
1982 		/*
1983 		 * the net is used here so that we can add in the RTO. Even
1984 		 * though we use a different timer. We also add the HB timer
1985 		 * PLUS a random jitter.
1986 		 */
1987 		if ((inp == NULL) || (stcb == NULL)) {
1988 			return;
1989 		} else {
1990 			uint32_t rndval;
1991 			uint8_t this_random;
1992 			int cnt_of_unconf = 0;
1993 			struct sctp_nets *lnet;
1994 
1995 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1996 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1997 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1998 					cnt_of_unconf++;
1999 				}
2000 			}
2001 			if (cnt_of_unconf) {
2002 				net = lnet = NULL;
2003 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2004 			}
2005 			if (stcb->asoc.hb_random_idx > 3) {
2006 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2007 				memcpy(stcb->asoc.hb_random_values, &rndval,
2008 				    sizeof(stcb->asoc.hb_random_values));
2009 				stcb->asoc.hb_random_idx = 0;
2010 			}
2011 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2012 			stcb->asoc.hb_random_idx++;
2013 			stcb->asoc.hb_ect_randombit = 0;
2014 			/*
2015 			 * this_random will be 0 - 256 ms RTO is in ms.
2016 			 */
2017 			if ((stcb->asoc.hb_is_disabled) &&
2018 			    (cnt_of_unconf == 0)) {
2019 				return;
2020 			}
2021 			if (net) {
2022 				int delay;
2023 
2024 				delay = stcb->asoc.heart_beat_delay;
2025 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2026 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2027 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2028 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2029 						delay = 0;
2030 					}
2031 				}
2032 				if (net->RTO == 0) {
2033 					/* Never been checked */
2034 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2035 				} else {
2036 					/* set rto_val to the ms */
2037 					to_ticks = delay + net->RTO + this_random;
2038 				}
2039 			} else {
2040 				if (cnt_of_unconf) {
2041 					to_ticks = this_random + stcb->asoc.initial_rto;
2042 				} else {
2043 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2044 				}
2045 			}
2046 			/*
2047 			 * Now we must convert the to_ticks that are now in
2048 			 * ms to ticks.
2049 			 */
2050 			to_ticks = MSEC_TO_TICKS(to_ticks);
2051 			tmr = &stcb->asoc.hb_timer;
2052 		}
2053 		break;
2054 	case SCTP_TIMER_TYPE_COOKIE:
2055 		/*
2056 		 * Here we can use the RTO timer from the network since one
2057 		 * RTT was compelete. If a retran happened then we will be
2058 		 * using the RTO initial value.
2059 		 */
2060 		if ((stcb == NULL) || (net == NULL)) {
2061 			return;
2062 		}
2063 		if (net->RTO == 0) {
2064 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2065 		} else {
2066 			to_ticks = MSEC_TO_TICKS(net->RTO);
2067 		}
2068 		tmr = &net->rxt_timer;
2069 		break;
2070 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2071 		/*
2072 		 * nothing needed but the endpoint here ususually about 60
2073 		 * minutes.
2074 		 */
2075 		if (inp == NULL) {
2076 			return;
2077 		}
2078 		tmr = &inp->sctp_ep.signature_change;
2079 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2080 		break;
2081 	case SCTP_TIMER_TYPE_ASOCKILL:
2082 		if (stcb == NULL) {
2083 			return;
2084 		}
2085 		tmr = &stcb->asoc.strreset_timer;
2086 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2087 		break;
2088 	case SCTP_TIMER_TYPE_INPKILL:
2089 		/*
2090 		 * The inp is setup to die. We re-use the signature_chage
2091 		 * timer since that has stopped and we are in the GONE
2092 		 * state.
2093 		 */
2094 		if (inp == NULL) {
2095 			return;
2096 		}
2097 		tmr = &inp->sctp_ep.signature_change;
2098 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2099 		break;
2100 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2101 		/*
2102 		 * Here we use the value found in the EP for PMTU ususually
2103 		 * about 10 minutes.
2104 		 */
2105 		if ((stcb == NULL) || (inp == NULL)) {
2106 			return;
2107 		}
2108 		if (net == NULL) {
2109 			return;
2110 		}
2111 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2112 		tmr = &net->pmtu_timer;
2113 		break;
2114 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2115 		/* Here we use the RTO of the destination */
2116 		if ((stcb == NULL) || (net == NULL)) {
2117 			return;
2118 		}
2119 		if (net->RTO == 0) {
2120 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2121 		} else {
2122 			to_ticks = MSEC_TO_TICKS(net->RTO);
2123 		}
2124 		tmr = &net->rxt_timer;
2125 		break;
2126 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2127 		/*
2128 		 * Here we use the endpoints shutdown guard timer usually
2129 		 * about 3 minutes.
2130 		 */
2131 		if ((inp == NULL) || (stcb == NULL)) {
2132 			return;
2133 		}
2134 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2135 		tmr = &stcb->asoc.shut_guard_timer;
2136 		break;
2137 	case SCTP_TIMER_TYPE_STRRESET:
2138 		/*
2139 		 * Here the timer comes from the stcb but its value is from
2140 		 * the net's RTO.
2141 		 */
2142 		if ((stcb == NULL) || (net == NULL)) {
2143 			return;
2144 		}
2145 		if (net->RTO == 0) {
2146 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2147 		} else {
2148 			to_ticks = MSEC_TO_TICKS(net->RTO);
2149 		}
2150 		tmr = &stcb->asoc.strreset_timer;
2151 		break;
2152 
2153 	case SCTP_TIMER_TYPE_EARLYFR:
2154 		{
2155 			unsigned int msec;
2156 
2157 			if ((stcb == NULL) || (net == NULL)) {
2158 				return;
2159 			}
2160 			if (net->flight_size > net->cwnd) {
2161 				/* no need to start */
2162 				return;
2163 			}
2164 			SCTP_STAT_INCR(sctps_earlyfrstart);
2165 			if (net->lastsa == 0) {
2166 				/* Hmm no rtt estimate yet? */
2167 				msec = stcb->asoc.initial_rto >> 2;
2168 			} else {
2169 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2170 			}
2171 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2172 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2173 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2174 					msec = SCTP_MINFR_MSEC_FLOOR;
2175 				}
2176 			}
2177 			to_ticks = MSEC_TO_TICKS(msec);
2178 			tmr = &net->fr_timer;
2179 		}
2180 		break;
2181 	case SCTP_TIMER_TYPE_ASCONF:
2182 		/*
2183 		 * Here the timer comes from the stcb but its value is from
2184 		 * the net's RTO.
2185 		 */
2186 		if ((stcb == NULL) || (net == NULL)) {
2187 			return;
2188 		}
2189 		if (net->RTO == 0) {
2190 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2191 		} else {
2192 			to_ticks = MSEC_TO_TICKS(net->RTO);
2193 		}
2194 		tmr = &stcb->asoc.asconf_timer;
2195 		break;
2196 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2197 		if ((stcb == NULL) || (net != NULL)) {
2198 			return;
2199 		}
2200 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2201 		tmr = &stcb->asoc.delete_prim_timer;
2202 		break;
2203 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2204 		if (stcb == NULL) {
2205 			return;
2206 		}
2207 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2208 			/*
2209 			 * Really an error since stcb is NOT set to
2210 			 * autoclose
2211 			 */
2212 			return;
2213 		}
2214 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2215 		tmr = &stcb->asoc.autoclose_timer;
2216 		break;
2217 	default:
2218 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2219 		    __FUNCTION__, t_type);
2220 		return;
2221 		break;
2222 	};
2223 	if ((to_ticks <= 0) || (tmr == NULL)) {
2224 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2225 		    __FUNCTION__, t_type, to_ticks, tmr);
2226 		return;
2227 	}
2228 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2229 		/*
2230 		 * we do NOT allow you to have it already running. if it is
2231 		 * we leave the current one up unchanged
2232 		 */
2233 		return;
2234 	}
2235 	/* At this point we can proceed */
2236 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2237 		stcb->asoc.num_send_timers_up++;
2238 	}
2239 	tmr->stopped_from = 0;
2240 	tmr->type = t_type;
2241 	tmr->ep = (void *)inp;
2242 	tmr->tcb = (void *)stcb;
2243 	tmr->net = (void *)net;
2244 	tmr->self = (void *)tmr;
2245 	tmr->vnet = (void *)curvnet;
2246 	tmr->ticks = sctp_get_tick_count();
2247 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2248 	return;
2249 }
2250 
2251 void
2252 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2253     struct sctp_nets *net, uint32_t from)
2254 {
2255 	struct sctp_timer *tmr;
2256 
2257 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2258 	    (inp == NULL))
2259 		return;
2260 
2261 	tmr = NULL;
2262 	if (stcb) {
2263 		SCTP_TCB_LOCK_ASSERT(stcb);
2264 	}
2265 	switch (t_type) {
2266 	case SCTP_TIMER_TYPE_ZERO_COPY:
2267 		tmr = &inp->sctp_ep.zero_copy_timer;
2268 		break;
2269 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2270 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2271 		break;
2272 	case SCTP_TIMER_TYPE_ADDR_WQ:
2273 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2274 		break;
2275 	case SCTP_TIMER_TYPE_EARLYFR:
2276 		if ((stcb == NULL) || (net == NULL)) {
2277 			return;
2278 		}
2279 		tmr = &net->fr_timer;
2280 		SCTP_STAT_INCR(sctps_earlyfrstop);
2281 		break;
2282 	case SCTP_TIMER_TYPE_SEND:
2283 		if ((stcb == NULL) || (net == NULL)) {
2284 			return;
2285 		}
2286 		tmr = &net->rxt_timer;
2287 		break;
2288 	case SCTP_TIMER_TYPE_INIT:
2289 		if ((stcb == NULL) || (net == NULL)) {
2290 			return;
2291 		}
2292 		tmr = &net->rxt_timer;
2293 		break;
2294 	case SCTP_TIMER_TYPE_RECV:
2295 		if (stcb == NULL) {
2296 			return;
2297 		}
2298 		tmr = &stcb->asoc.dack_timer;
2299 		break;
2300 	case SCTP_TIMER_TYPE_SHUTDOWN:
2301 		if ((stcb == NULL) || (net == NULL)) {
2302 			return;
2303 		}
2304 		tmr = &net->rxt_timer;
2305 		break;
2306 	case SCTP_TIMER_TYPE_HEARTBEAT:
2307 		if (stcb == NULL) {
2308 			return;
2309 		}
2310 		tmr = &stcb->asoc.hb_timer;
2311 		break;
2312 	case SCTP_TIMER_TYPE_COOKIE:
2313 		if ((stcb == NULL) || (net == NULL)) {
2314 			return;
2315 		}
2316 		tmr = &net->rxt_timer;
2317 		break;
2318 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2319 		/* nothing needed but the endpoint here */
2320 		tmr = &inp->sctp_ep.signature_change;
2321 		/*
2322 		 * We re-use the newcookie timer for the INP kill timer. We
2323 		 * must assure that we do not kill it by accident.
2324 		 */
2325 		break;
2326 	case SCTP_TIMER_TYPE_ASOCKILL:
2327 		/*
2328 		 * Stop the asoc kill timer.
2329 		 */
2330 		if (stcb == NULL) {
2331 			return;
2332 		}
2333 		tmr = &stcb->asoc.strreset_timer;
2334 		break;
2335 
2336 	case SCTP_TIMER_TYPE_INPKILL:
2337 		/*
2338 		 * The inp is setup to die. We re-use the signature_chage
2339 		 * timer since that has stopped and we are in the GONE
2340 		 * state.
2341 		 */
2342 		tmr = &inp->sctp_ep.signature_change;
2343 		break;
2344 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2345 		if ((stcb == NULL) || (net == NULL)) {
2346 			return;
2347 		}
2348 		tmr = &net->pmtu_timer;
2349 		break;
2350 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2351 		if ((stcb == NULL) || (net == NULL)) {
2352 			return;
2353 		}
2354 		tmr = &net->rxt_timer;
2355 		break;
2356 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2357 		if (stcb == NULL) {
2358 			return;
2359 		}
2360 		tmr = &stcb->asoc.shut_guard_timer;
2361 		break;
2362 	case SCTP_TIMER_TYPE_STRRESET:
2363 		if (stcb == NULL) {
2364 			return;
2365 		}
2366 		tmr = &stcb->asoc.strreset_timer;
2367 		break;
2368 	case SCTP_TIMER_TYPE_ASCONF:
2369 		if (stcb == NULL) {
2370 			return;
2371 		}
2372 		tmr = &stcb->asoc.asconf_timer;
2373 		break;
2374 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2375 		if (stcb == NULL) {
2376 			return;
2377 		}
2378 		tmr = &stcb->asoc.delete_prim_timer;
2379 		break;
2380 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2381 		if (stcb == NULL) {
2382 			return;
2383 		}
2384 		tmr = &stcb->asoc.autoclose_timer;
2385 		break;
2386 	default:
2387 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2388 		    __FUNCTION__, t_type);
2389 		break;
2390 	};
2391 	if (tmr == NULL) {
2392 		return;
2393 	}
2394 	if ((tmr->type != t_type) && tmr->type) {
2395 		/*
2396 		 * Ok we have a timer that is under joint use. Cookie timer
2397 		 * per chance with the SEND timer. We therefore are NOT
2398 		 * running the timer that the caller wants stopped.  So just
2399 		 * return.
2400 		 */
2401 		return;
2402 	}
2403 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2404 		stcb->asoc.num_send_timers_up--;
2405 		if (stcb->asoc.num_send_timers_up < 0) {
2406 			stcb->asoc.num_send_timers_up = 0;
2407 		}
2408 	}
2409 	tmr->self = NULL;
2410 	tmr->stopped_from = from;
2411 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2412 	return;
2413 }
2414 
2415 uint32_t
2416 sctp_calculate_len(struct mbuf *m)
2417 {
2418 	uint32_t tlen = 0;
2419 	struct mbuf *at;
2420 
2421 	at = m;
2422 	while (at) {
2423 		tlen += SCTP_BUF_LEN(at);
2424 		at = SCTP_BUF_NEXT(at);
2425 	}
2426 	return (tlen);
2427 }
2428 
2429 void
2430 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2431     struct sctp_association *asoc, uint32_t mtu)
2432 {
2433 	/*
2434 	 * Reset the P-MTU size on this association, this involves changing
2435 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2436 	 * allow the DF flag to be cleared.
2437 	 */
2438 	struct sctp_tmit_chunk *chk;
2439 	unsigned int eff_mtu, ovh;
2440 
2441 	asoc->smallest_mtu = mtu;
2442 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2443 		ovh = SCTP_MIN_OVERHEAD;
2444 	} else {
2445 		ovh = SCTP_MIN_V4_OVERHEAD;
2446 	}
2447 	eff_mtu = mtu - ovh;
2448 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2449 		if (chk->send_size > eff_mtu) {
2450 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2451 		}
2452 	}
2453 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2454 		if (chk->send_size > eff_mtu) {
2455 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2456 		}
2457 	}
2458 }
2459 
2460 
2461 /*
2462  * given an association and starting time of the current RTT period return
2463  * RTO in number of msecs net should point to the current network
2464  */
2465 
2466 uint32_t
2467 sctp_calculate_rto(struct sctp_tcb *stcb,
2468     struct sctp_association *asoc,
2469     struct sctp_nets *net,
2470     struct timeval *told,
2471     int safe, int rtt_from_sack)
2472 {
2473 	/*-
2474 	 * given an association and the starting time of the current RTT
2475 	 * period (in value1/value2) return RTO in number of msecs.
2476 	 */
2477 	int32_t rtt;		/* RTT in ms */
2478 	uint32_t new_rto;
2479 	int first_measure = 0;
2480 	struct timeval now, then, *old;
2481 
2482 	/* Copy it out for sparc64 */
2483 	if (safe == sctp_align_unsafe_makecopy) {
2484 		old = &then;
2485 		memcpy(&then, told, sizeof(struct timeval));
2486 	} else if (safe == sctp_align_safe_nocopy) {
2487 		old = told;
2488 	} else {
2489 		/* error */
2490 		SCTP_PRINTF("Huh, bad rto calc call\n");
2491 		return (0);
2492 	}
2493 	/************************/
2494 	/* 1. calculate new RTT */
2495 	/************************/
2496 	/* get the current time */
2497 	if (stcb->asoc.use_precise_time) {
2498 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2499 	} else {
2500 		(void)SCTP_GETTIME_TIMEVAL(&now);
2501 	}
2502 	timevalsub(&now, old);
2503 	/* store the current RTT in us */
2504 	net->rtt = (uint64_t) 10000000 *(uint64_t) now.tv_sec +
2505 	         (uint64_t) now.tv_usec;
2506 
2507 	/* computer rtt in ms */
2508 	rtt = net->rtt / 1000;
2509 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2510 		/*
2511 		 * Tell the CC module that a new update has just occurred
2512 		 * from a sack
2513 		 */
2514 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2515 	}
2516 	/*
2517 	 * Do we need to determine the lan? We do this only on sacks i.e.
2518 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2519 	 */
2520 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2521 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2522 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2523 			net->lan_type = SCTP_LAN_INTERNET;
2524 		} else {
2525 			net->lan_type = SCTP_LAN_LOCAL;
2526 		}
2527 	}
2528 	/***************************/
2529 	/* 2. update RTTVAR & SRTT */
2530 	/***************************/
2531 	/*-
2532 	 * Compute the scaled average lastsa and the
2533 	 * scaled variance lastsv as described in van Jacobson
2534 	 * Paper "Congestion Avoidance and Control", Annex A.
2535 	 *
2536 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2537 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2538 	 */
2539 	if (net->RTO_measured) {
2540 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2541 		net->lastsa += rtt;
2542 		if (rtt < 0) {
2543 			rtt = -rtt;
2544 		}
2545 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2546 		net->lastsv += rtt;
2547 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2548 			rto_logging(net, SCTP_LOG_RTTVAR);
2549 		}
2550 	} else {
2551 		/* First RTO measurment */
2552 		net->RTO_measured = 1;
2553 		first_measure = 1;
2554 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2555 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2556 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2557 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2558 		}
2559 	}
2560 	if (net->lastsv == 0) {
2561 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2562 	}
2563 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2564 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2565 	    (stcb->asoc.sat_network_lockout == 0)) {
2566 		stcb->asoc.sat_network = 1;
2567 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2568 		stcb->asoc.sat_network = 0;
2569 		stcb->asoc.sat_network_lockout = 1;
2570 	}
2571 	/* bound it, per C6/C7 in Section 5.3.1 */
2572 	if (new_rto < stcb->asoc.minrto) {
2573 		new_rto = stcb->asoc.minrto;
2574 	}
2575 	if (new_rto > stcb->asoc.maxrto) {
2576 		new_rto = stcb->asoc.maxrto;
2577 	}
2578 	/* we are now returning the RTO */
2579 	return (new_rto);
2580 }
2581 
2582 /*
2583  * return a pointer to a contiguous piece of data from the given mbuf chain
2584  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2585  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2586  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2587  */
2588 caddr_t
2589 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2590 {
2591 	uint32_t count;
2592 	uint8_t *ptr;
2593 
2594 	ptr = in_ptr;
2595 	if ((off < 0) || (len <= 0))
2596 		return (NULL);
2597 
2598 	/* find the desired start location */
2599 	while ((m != NULL) && (off > 0)) {
2600 		if (off < SCTP_BUF_LEN(m))
2601 			break;
2602 		off -= SCTP_BUF_LEN(m);
2603 		m = SCTP_BUF_NEXT(m);
2604 	}
2605 	if (m == NULL)
2606 		return (NULL);
2607 
2608 	/* is the current mbuf large enough (eg. contiguous)? */
2609 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2610 		return (mtod(m, caddr_t)+off);
2611 	} else {
2612 		/* else, it spans more than one mbuf, so save a temp copy... */
2613 		while ((m != NULL) && (len > 0)) {
2614 			count = min(SCTP_BUF_LEN(m) - off, len);
2615 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2616 			len -= count;
2617 			ptr += count;
2618 			off = 0;
2619 			m = SCTP_BUF_NEXT(m);
2620 		}
2621 		if ((m == NULL) && (len > 0))
2622 			return (NULL);
2623 		else
2624 			return ((caddr_t)in_ptr);
2625 	}
2626 }
2627 
2628 
2629 
2630 struct sctp_paramhdr *
2631 sctp_get_next_param(struct mbuf *m,
2632     int offset,
2633     struct sctp_paramhdr *pull,
2634     int pull_limit)
2635 {
2636 	/* This just provides a typed signature to Peter's Pull routine */
2637 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2638 	    (uint8_t *) pull));
2639 }
2640 
2641 
2642 int
2643 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2644 {
2645 	/*
2646 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2647 	 * padlen is > 3 this routine will fail.
2648 	 */
2649 	uint8_t *dp;
2650 	int i;
2651 
2652 	if (padlen > 3) {
2653 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2654 		return (ENOBUFS);
2655 	}
2656 	if (padlen <= M_TRAILINGSPACE(m)) {
2657 		/*
2658 		 * The easy way. We hope the majority of the time we hit
2659 		 * here :)
2660 		 */
2661 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2662 		SCTP_BUF_LEN(m) += padlen;
2663 	} else {
2664 		/* Hard way we must grow the mbuf */
2665 		struct mbuf *tmp;
2666 
2667 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2668 		if (tmp == NULL) {
2669 			/* Out of space GAK! we are in big trouble. */
2670 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2671 			return (ENOSPC);
2672 		}
2673 		/* setup and insert in middle */
2674 		SCTP_BUF_LEN(tmp) = padlen;
2675 		SCTP_BUF_NEXT(tmp) = NULL;
2676 		SCTP_BUF_NEXT(m) = tmp;
2677 		dp = mtod(tmp, uint8_t *);
2678 	}
2679 	/* zero out the pad */
2680 	for (i = 0; i < padlen; i++) {
2681 		*dp = 0;
2682 		dp++;
2683 	}
2684 	return (0);
2685 }
2686 
2687 int
2688 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2689 {
2690 	/* find the last mbuf in chain and pad it */
2691 	struct mbuf *m_at;
2692 
2693 	m_at = m;
2694 	if (last_mbuf) {
2695 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2696 	} else {
2697 		while (m_at) {
2698 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2699 				return (sctp_add_pad_tombuf(m_at, padval));
2700 			}
2701 			m_at = SCTP_BUF_NEXT(m_at);
2702 		}
2703 	}
2704 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2705 	return (EFAULT);
2706 }
2707 
2708 static void
2709 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2710     uint32_t error, void *data, int so_locked
2711 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2712     SCTP_UNUSED
2713 #endif
2714 )
2715 {
2716 	struct mbuf *m_notify;
2717 	struct sctp_assoc_change *sac;
2718 	struct sctp_queued_to_read *control;
2719 
2720 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2721 	struct socket *so;
2722 
2723 #endif
2724 
2725 	/*
2726 	 * For TCP model AND UDP connected sockets we will send an error up
2727 	 * when an ABORT comes in.
2728 	 */
2729 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2730 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2731 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2732 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2733 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2734 			stcb->sctp_socket->so_error = ECONNREFUSED;
2735 		} else {
2736 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2737 			stcb->sctp_socket->so_error = ECONNRESET;
2738 		}
2739 		/* Wake ANY sleepers */
2740 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2741 		so = SCTP_INP_SO(stcb->sctp_ep);
2742 		if (!so_locked) {
2743 			atomic_add_int(&stcb->asoc.refcnt, 1);
2744 			SCTP_TCB_UNLOCK(stcb);
2745 			SCTP_SOCKET_LOCK(so, 1);
2746 			SCTP_TCB_LOCK(stcb);
2747 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2748 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2749 				SCTP_SOCKET_UNLOCK(so, 1);
2750 				return;
2751 			}
2752 		}
2753 #endif
2754 		socantrcvmore(stcb->sctp_socket);
2755 		sorwakeup(stcb->sctp_socket);
2756 		sowwakeup(stcb->sctp_socket);
2757 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2758 		if (!so_locked) {
2759 			SCTP_SOCKET_UNLOCK(so, 1);
2760 		}
2761 #endif
2762 	}
2763 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2764 		/* event not enabled */
2765 		return;
2766 	}
2767 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2768 	if (m_notify == NULL)
2769 		/* no space left */
2770 		return;
2771 	SCTP_BUF_LEN(m_notify) = 0;
2772 
2773 	sac = mtod(m_notify, struct sctp_assoc_change *);
2774 	sac->sac_type = SCTP_ASSOC_CHANGE;
2775 	sac->sac_flags = 0;
2776 	sac->sac_length = sizeof(struct sctp_assoc_change);
2777 	sac->sac_state = event;
2778 	sac->sac_error = error;
2779 	/* XXX verify these stream counts */
2780 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2781 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2782 	sac->sac_assoc_id = sctp_get_associd(stcb);
2783 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2784 	SCTP_BUF_NEXT(m_notify) = NULL;
2785 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2786 	    0, 0, 0, 0, 0, 0,
2787 	    m_notify);
2788 	if (control == NULL) {
2789 		/* no memory */
2790 		sctp_m_freem(m_notify);
2791 		return;
2792 	}
2793 	control->length = SCTP_BUF_LEN(m_notify);
2794 	/* not that we need this */
2795 	control->tail_mbuf = m_notify;
2796 	control->spec_flags = M_NOTIFICATION;
2797 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2798 	    control,
2799 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2800 	    so_locked);
2801 	if (event == SCTP_COMM_LOST) {
2802 		/* Wake up any sleeper */
2803 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2804 		so = SCTP_INP_SO(stcb->sctp_ep);
2805 		if (!so_locked) {
2806 			atomic_add_int(&stcb->asoc.refcnt, 1);
2807 			SCTP_TCB_UNLOCK(stcb);
2808 			SCTP_SOCKET_LOCK(so, 1);
2809 			SCTP_TCB_LOCK(stcb);
2810 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2811 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2812 				SCTP_SOCKET_UNLOCK(so, 1);
2813 				return;
2814 			}
2815 		}
2816 #endif
2817 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2818 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2819 		if (!so_locked) {
2820 			SCTP_SOCKET_UNLOCK(so, 1);
2821 		}
2822 #endif
2823 	}
2824 }
2825 
2826 static void
2827 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2828     struct sockaddr *sa, uint32_t error)
2829 {
2830 	struct mbuf *m_notify;
2831 	struct sctp_paddr_change *spc;
2832 	struct sctp_queued_to_read *control;
2833 
2834 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2835 		/* event not enabled */
2836 		return;
2837 	}
2838 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2839 	if (m_notify == NULL)
2840 		return;
2841 	SCTP_BUF_LEN(m_notify) = 0;
2842 	spc = mtod(m_notify, struct sctp_paddr_change *);
2843 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2844 	spc->spc_flags = 0;
2845 	spc->spc_length = sizeof(struct sctp_paddr_change);
2846 	switch (sa->sa_family) {
2847 	case AF_INET:
2848 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2849 		break;
2850 #ifdef INET6
2851 	case AF_INET6:
2852 		{
2853 			struct sockaddr_in6 *sin6;
2854 
2855 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2856 
2857 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2858 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2859 				if (sin6->sin6_scope_id == 0) {
2860 					/* recover scope_id for user */
2861 					(void)sa6_recoverscope(sin6);
2862 				} else {
2863 					/* clear embedded scope_id for user */
2864 					in6_clearscope(&sin6->sin6_addr);
2865 				}
2866 			}
2867 			break;
2868 		}
2869 #endif
2870 	default:
2871 		/* TSNH */
2872 		break;
2873 	}
2874 	spc->spc_state = state;
2875 	spc->spc_error = error;
2876 	spc->spc_assoc_id = sctp_get_associd(stcb);
2877 
2878 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2879 	SCTP_BUF_NEXT(m_notify) = NULL;
2880 
2881 	/* append to socket */
2882 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2883 	    0, 0, 0, 0, 0, 0,
2884 	    m_notify);
2885 	if (control == NULL) {
2886 		/* no memory */
2887 		sctp_m_freem(m_notify);
2888 		return;
2889 	}
2890 	control->length = SCTP_BUF_LEN(m_notify);
2891 	control->spec_flags = M_NOTIFICATION;
2892 	/* not that we need this */
2893 	control->tail_mbuf = m_notify;
2894 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2895 	    control,
2896 	    &stcb->sctp_socket->so_rcv, 1,
2897 	    SCTP_READ_LOCK_NOT_HELD,
2898 	    SCTP_SO_NOT_LOCKED);
2899 }
2900 
2901 
2902 static void
2903 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2904     struct sctp_tmit_chunk *chk, int so_locked
2905 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2906     SCTP_UNUSED
2907 #endif
2908 )
2909 {
2910 	struct mbuf *m_notify;
2911 	struct sctp_send_failed *ssf;
2912 	struct sctp_queued_to_read *control;
2913 	int length;
2914 
2915 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2916 		/* event not enabled */
2917 		return;
2918 	}
2919 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2920 	if (m_notify == NULL)
2921 		/* no space left */
2922 		return;
2923 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2924 	length -= sizeof(struct sctp_data_chunk);
2925 	SCTP_BUF_LEN(m_notify) = 0;
2926 	ssf = mtod(m_notify, struct sctp_send_failed *);
2927 	ssf->ssf_type = SCTP_SEND_FAILED;
2928 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2929 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2930 	else
2931 		ssf->ssf_flags = SCTP_DATA_SENT;
2932 	ssf->ssf_length = length;
2933 	ssf->ssf_error = error;
2934 	/* not exactly what the user sent in, but should be close :) */
2935 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2936 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2937 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2938 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2939 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2940 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2941 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2942 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2943 
2944 	if (chk->data) {
2945 		/*
2946 		 * trim off the sctp chunk header(it should be there)
2947 		 */
2948 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2949 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2950 			sctp_mbuf_crush(chk->data);
2951 			chk->send_size -= sizeof(struct sctp_data_chunk);
2952 		}
2953 	}
2954 	SCTP_BUF_NEXT(m_notify) = chk->data;
2955 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2956 	/* Steal off the mbuf */
2957 	chk->data = NULL;
2958 	/*
2959 	 * For this case, we check the actual socket buffer, since the assoc
2960 	 * is going away we don't want to overfill the socket buffer for a
2961 	 * non-reader
2962 	 */
2963 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2964 		sctp_m_freem(m_notify);
2965 		return;
2966 	}
2967 	/* append to socket */
2968 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2969 	    0, 0, 0, 0, 0, 0,
2970 	    m_notify);
2971 	if (control == NULL) {
2972 		/* no memory */
2973 		sctp_m_freem(m_notify);
2974 		return;
2975 	}
2976 	control->spec_flags = M_NOTIFICATION;
2977 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2978 	    control,
2979 	    &stcb->sctp_socket->so_rcv, 1,
2980 	    SCTP_READ_LOCK_NOT_HELD,
2981 	    so_locked);
2982 }
2983 
2984 
2985 static void
2986 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2987     struct sctp_stream_queue_pending *sp, int so_locked
2988 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2989     SCTP_UNUSED
2990 #endif
2991 )
2992 {
2993 	struct mbuf *m_notify;
2994 	struct sctp_send_failed *ssf;
2995 	struct sctp_queued_to_read *control;
2996 	int length;
2997 
2998 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2999 		/* event not enabled */
3000 		return;
3001 	}
3002 	length = sizeof(struct sctp_send_failed) + sp->length;
3003 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3004 	if (m_notify == NULL)
3005 		/* no space left */
3006 		return;
3007 	SCTP_BUF_LEN(m_notify) = 0;
3008 	ssf = mtod(m_notify, struct sctp_send_failed *);
3009 	ssf->ssf_type = SCTP_SEND_FAILED;
3010 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3011 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3012 	else
3013 		ssf->ssf_flags = SCTP_DATA_SENT;
3014 	ssf->ssf_length = length;
3015 	ssf->ssf_error = error;
3016 	/* not exactly what the user sent in, but should be close :) */
3017 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3018 	ssf->ssf_info.sinfo_stream = sp->stream;
3019 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3020 	if (sp->some_taken) {
3021 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3022 	} else {
3023 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3024 	}
3025 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3026 	ssf->ssf_info.sinfo_context = sp->context;
3027 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3028 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3029 	SCTP_BUF_NEXT(m_notify) = sp->data;
3030 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3031 
3032 	/* Steal off the mbuf */
3033 	sp->data = NULL;
3034 	/*
3035 	 * For this case, we check the actual socket buffer, since the assoc
3036 	 * is going away we don't want to overfill the socket buffer for a
3037 	 * non-reader
3038 	 */
3039 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3040 		sctp_m_freem(m_notify);
3041 		return;
3042 	}
3043 	/* append to socket */
3044 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3045 	    0, 0, 0, 0, 0, 0,
3046 	    m_notify);
3047 	if (control == NULL) {
3048 		/* no memory */
3049 		sctp_m_freem(m_notify);
3050 		return;
3051 	}
3052 	control->spec_flags = M_NOTIFICATION;
3053 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3054 	    control,
3055 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3056 }
3057 
3058 
3059 
3060 static void
3061 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3062     uint32_t error)
3063 {
3064 	struct mbuf *m_notify;
3065 	struct sctp_adaptation_event *sai;
3066 	struct sctp_queued_to_read *control;
3067 
3068 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3069 		/* event not enabled */
3070 		return;
3071 	}
3072 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3073 	if (m_notify == NULL)
3074 		/* no space left */
3075 		return;
3076 	SCTP_BUF_LEN(m_notify) = 0;
3077 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3078 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3079 	sai->sai_flags = 0;
3080 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3081 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3082 	sai->sai_assoc_id = sctp_get_associd(stcb);
3083 
3084 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3085 	SCTP_BUF_NEXT(m_notify) = NULL;
3086 
3087 	/* append to socket */
3088 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3089 	    0, 0, 0, 0, 0, 0,
3090 	    m_notify);
3091 	if (control == NULL) {
3092 		/* no memory */
3093 		sctp_m_freem(m_notify);
3094 		return;
3095 	}
3096 	control->length = SCTP_BUF_LEN(m_notify);
3097 	control->spec_flags = M_NOTIFICATION;
3098 	/* not that we need this */
3099 	control->tail_mbuf = m_notify;
3100 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3101 	    control,
3102 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3103 }
3104 
3105 /* This always must be called with the read-queue LOCKED in the INP */
3106 static void
3107 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3108     uint32_t val, int so_locked
3109 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3110     SCTP_UNUSED
3111 #endif
3112 )
3113 {
3114 	struct mbuf *m_notify;
3115 	struct sctp_pdapi_event *pdapi;
3116 	struct sctp_queued_to_read *control;
3117 	struct sockbuf *sb;
3118 
3119 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3120 		/* event not enabled */
3121 		return;
3122 	}
3123 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3124 		return;
3125 	}
3126 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3127 	if (m_notify == NULL)
3128 		/* no space left */
3129 		return;
3130 	SCTP_BUF_LEN(m_notify) = 0;
3131 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3132 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3133 	pdapi->pdapi_flags = 0;
3134 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3135 	pdapi->pdapi_indication = error;
3136 	pdapi->pdapi_stream = (val >> 16);
3137 	pdapi->pdapi_seq = (val & 0x0000ffff);
3138 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3139 
3140 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3141 	SCTP_BUF_NEXT(m_notify) = NULL;
3142 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3143 	    0, 0, 0, 0, 0, 0,
3144 	    m_notify);
3145 	if (control == NULL) {
3146 		/* no memory */
3147 		sctp_m_freem(m_notify);
3148 		return;
3149 	}
3150 	control->spec_flags = M_NOTIFICATION;
3151 	control->length = SCTP_BUF_LEN(m_notify);
3152 	/* not that we need this */
3153 	control->tail_mbuf = m_notify;
3154 	control->held_length = 0;
3155 	control->length = 0;
3156 	sb = &stcb->sctp_socket->so_rcv;
3157 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3158 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3159 	}
3160 	sctp_sballoc(stcb, sb, m_notify);
3161 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3162 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3163 	}
3164 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3165 	control->end_added = 1;
3166 	if (stcb->asoc.control_pdapi)
3167 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3168 	else {
3169 		/* we really should not see this case */
3170 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3171 	}
3172 	if (stcb->sctp_ep && stcb->sctp_socket) {
3173 		/* This should always be the case */
3174 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3175 		struct socket *so;
3176 
3177 		so = SCTP_INP_SO(stcb->sctp_ep);
3178 		if (!so_locked) {
3179 			atomic_add_int(&stcb->asoc.refcnt, 1);
3180 			SCTP_TCB_UNLOCK(stcb);
3181 			SCTP_SOCKET_LOCK(so, 1);
3182 			SCTP_TCB_LOCK(stcb);
3183 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3184 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3185 				SCTP_SOCKET_UNLOCK(so, 1);
3186 				return;
3187 			}
3188 		}
3189 #endif
3190 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3191 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3192 		if (!so_locked) {
3193 			SCTP_SOCKET_UNLOCK(so, 1);
3194 		}
3195 #endif
3196 	}
3197 }
3198 
3199 static void
3200 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3201 {
3202 	struct mbuf *m_notify;
3203 	struct sctp_shutdown_event *sse;
3204 	struct sctp_queued_to_read *control;
3205 
3206 	/*
3207 	 * For TCP model AND UDP connected sockets we will send an error up
3208 	 * when an SHUTDOWN completes
3209 	 */
3210 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3211 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3212 		/* mark socket closed for read/write and wakeup! */
3213 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3214 		struct socket *so;
3215 
3216 		so = SCTP_INP_SO(stcb->sctp_ep);
3217 		atomic_add_int(&stcb->asoc.refcnt, 1);
3218 		SCTP_TCB_UNLOCK(stcb);
3219 		SCTP_SOCKET_LOCK(so, 1);
3220 		SCTP_TCB_LOCK(stcb);
3221 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3222 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3223 			SCTP_SOCKET_UNLOCK(so, 1);
3224 			return;
3225 		}
3226 #endif
3227 		socantsendmore(stcb->sctp_socket);
3228 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3229 		SCTP_SOCKET_UNLOCK(so, 1);
3230 #endif
3231 	}
3232 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3233 		/* event not enabled */
3234 		return;
3235 	}
3236 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3237 	if (m_notify == NULL)
3238 		/* no space left */
3239 		return;
3240 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3241 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3242 	sse->sse_flags = 0;
3243 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3244 	sse->sse_assoc_id = sctp_get_associd(stcb);
3245 
3246 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3247 	SCTP_BUF_NEXT(m_notify) = NULL;
3248 
3249 	/* append to socket */
3250 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3251 	    0, 0, 0, 0, 0, 0,
3252 	    m_notify);
3253 	if (control == NULL) {
3254 		/* no memory */
3255 		sctp_m_freem(m_notify);
3256 		return;
3257 	}
3258 	control->spec_flags = M_NOTIFICATION;
3259 	control->length = SCTP_BUF_LEN(m_notify);
3260 	/* not that we need this */
3261 	control->tail_mbuf = m_notify;
3262 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3263 	    control,
3264 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3265 }
3266 
3267 static void
3268 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3269     int so_locked
3270 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3271     SCTP_UNUSED
3272 #endif
3273 )
3274 {
3275 	struct mbuf *m_notify;
3276 	struct sctp_sender_dry_event *event;
3277 	struct sctp_queued_to_read *control;
3278 
3279 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3280 		/* event not enabled */
3281 		return;
3282 	}
3283 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3284 	if (m_notify == NULL) {
3285 		/* no space left */
3286 		return;
3287 	}
3288 	SCTP_BUF_LEN(m_notify) = 0;
3289 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3290 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3291 	event->sender_dry_flags = 0;
3292 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3293 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3294 
3295 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3296 	SCTP_BUF_NEXT(m_notify) = NULL;
3297 
3298 	/* append to socket */
3299 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3300 	    0, 0, 0, 0, 0, 0, m_notify);
3301 	if (control == NULL) {
3302 		/* no memory */
3303 		sctp_m_freem(m_notify);
3304 		return;
3305 	}
3306 	control->length = SCTP_BUF_LEN(m_notify);
3307 	control->spec_flags = M_NOTIFICATION;
3308 	/* not that we need this */
3309 	control->tail_mbuf = m_notify;
3310 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3311 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3312 }
3313 
3314 
3315 static void
3316 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3317 {
3318 	struct mbuf *m_notify;
3319 	struct sctp_queued_to_read *control;
3320 	struct sctp_stream_reset_event *strreset;
3321 	int len;
3322 
3323 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3324 		/* event not enabled */
3325 		return;
3326 	}
3327 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3328 	if (m_notify == NULL)
3329 		/* no space left */
3330 		return;
3331 	SCTP_BUF_LEN(m_notify) = 0;
3332 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3333 	if (len > M_TRAILINGSPACE(m_notify)) {
3334 		/* never enough room */
3335 		sctp_m_freem(m_notify);
3336 		return;
3337 	}
3338 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3339 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3340 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3341 	strreset->strreset_length = len;
3342 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3343 	strreset->strreset_list[0] = number_entries;
3344 
3345 	SCTP_BUF_LEN(m_notify) = len;
3346 	SCTP_BUF_NEXT(m_notify) = NULL;
3347 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3348 		/* no space */
3349 		sctp_m_freem(m_notify);
3350 		return;
3351 	}
3352 	/* append to socket */
3353 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3354 	    0, 0, 0, 0, 0, 0,
3355 	    m_notify);
3356 	if (control == NULL) {
3357 		/* no memory */
3358 		sctp_m_freem(m_notify);
3359 		return;
3360 	}
3361 	control->spec_flags = M_NOTIFICATION;
3362 	control->length = SCTP_BUF_LEN(m_notify);
3363 	/* not that we need this */
3364 	control->tail_mbuf = m_notify;
3365 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3366 	    control,
3367 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3368 }
3369 
3370 
3371 static void
3372 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3373     int number_entries, uint16_t * list, int flag)
3374 {
3375 	struct mbuf *m_notify;
3376 	struct sctp_queued_to_read *control;
3377 	struct sctp_stream_reset_event *strreset;
3378 	int len;
3379 
3380 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3381 		/* event not enabled */
3382 		return;
3383 	}
3384 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3385 	if (m_notify == NULL)
3386 		/* no space left */
3387 		return;
3388 	SCTP_BUF_LEN(m_notify) = 0;
3389 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3390 	if (len > M_TRAILINGSPACE(m_notify)) {
3391 		/* never enough room */
3392 		sctp_m_freem(m_notify);
3393 		return;
3394 	}
3395 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3396 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3397 	if (number_entries == 0) {
3398 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3399 	} else {
3400 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3401 	}
3402 	strreset->strreset_length = len;
3403 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3404 	if (number_entries) {
3405 		int i;
3406 
3407 		for (i = 0; i < number_entries; i++) {
3408 			strreset->strreset_list[i] = ntohs(list[i]);
3409 		}
3410 	}
3411 	SCTP_BUF_LEN(m_notify) = len;
3412 	SCTP_BUF_NEXT(m_notify) = NULL;
3413 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3414 		/* no space */
3415 		sctp_m_freem(m_notify);
3416 		return;
3417 	}
3418 	/* append to socket */
3419 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3420 	    0, 0, 0, 0, 0, 0,
3421 	    m_notify);
3422 	if (control == NULL) {
3423 		/* no memory */
3424 		sctp_m_freem(m_notify);
3425 		return;
3426 	}
3427 	control->spec_flags = M_NOTIFICATION;
3428 	control->length = SCTP_BUF_LEN(m_notify);
3429 	/* not that we need this */
3430 	control->tail_mbuf = m_notify;
3431 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3432 	    control,
3433 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3434 }
3435 
3436 
3437 void
3438 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3439     uint32_t error, void *data, int so_locked
3440 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3441     SCTP_UNUSED
3442 #endif
3443 )
3444 {
3445 	if ((stcb == NULL) ||
3446 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3447 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3448 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3449 		/* If the socket is gone we are out of here */
3450 		return;
3451 	}
3452 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3453 		return;
3454 	}
3455 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3456 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3457 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3458 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3459 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3460 			/* Don't report these in front states */
3461 			return;
3462 		}
3463 	}
3464 	switch (notification) {
3465 	case SCTP_NOTIFY_ASSOC_UP:
3466 		if (stcb->asoc.assoc_up_sent == 0) {
3467 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3468 			stcb->asoc.assoc_up_sent = 1;
3469 		}
3470 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3471 			sctp_notify_adaptation_layer(stcb, error);
3472 		}
3473 		if (stcb->asoc.peer_supports_auth == 0) {
3474 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3475 			    NULL, so_locked);
3476 		}
3477 		break;
3478 	case SCTP_NOTIFY_ASSOC_DOWN:
3479 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3480 		break;
3481 	case SCTP_NOTIFY_INTERFACE_DOWN:
3482 		{
3483 			struct sctp_nets *net;
3484 
3485 			net = (struct sctp_nets *)data;
3486 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3487 			    (struct sockaddr *)&net->ro._l_addr, error);
3488 			break;
3489 		}
3490 	case SCTP_NOTIFY_INTERFACE_UP:
3491 		{
3492 			struct sctp_nets *net;
3493 
3494 			net = (struct sctp_nets *)data;
3495 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3496 			    (struct sockaddr *)&net->ro._l_addr, error);
3497 			break;
3498 		}
3499 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3500 		{
3501 			struct sctp_nets *net;
3502 
3503 			net = (struct sctp_nets *)data;
3504 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3505 			    (struct sockaddr *)&net->ro._l_addr, error);
3506 			break;
3507 		}
3508 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3509 		sctp_notify_send_failed2(stcb, error,
3510 		    (struct sctp_stream_queue_pending *)data, so_locked);
3511 		break;
3512 	case SCTP_NOTIFY_DG_FAIL:
3513 		sctp_notify_send_failed(stcb, error,
3514 		    (struct sctp_tmit_chunk *)data, so_locked);
3515 		break;
3516 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3517 		{
3518 			uint32_t val;
3519 
3520 			val = *((uint32_t *) data);
3521 
3522 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3523 			break;
3524 		}
3525 	case SCTP_NOTIFY_STRDATA_ERR:
3526 		break;
3527 	case SCTP_NOTIFY_ASSOC_ABORTED:
3528 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3529 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3530 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3531 		} else {
3532 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3533 		}
3534 		break;
3535 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3536 		break;
3537 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3538 		break;
3539 	case SCTP_NOTIFY_ASSOC_RESTART:
3540 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3541 		if (stcb->asoc.peer_supports_auth == 0) {
3542 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3543 			    NULL, so_locked);
3544 		}
3545 		break;
3546 	case SCTP_NOTIFY_HB_RESP:
3547 		break;
3548 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3549 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3550 		break;
3551 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3552 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3553 		break;
3554 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3555 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3556 		break;
3557 
3558 	case SCTP_NOTIFY_STR_RESET_SEND:
3559 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3560 		break;
3561 	case SCTP_NOTIFY_STR_RESET_RECV:
3562 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3563 		break;
3564 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3565 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3566 		break;
3567 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3568 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3569 		break;
3570 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3571 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3572 		    error);
3573 		break;
3574 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3575 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3576 		    error);
3577 		break;
3578 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3579 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3580 		    error);
3581 		break;
3582 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3583 		break;
3584 	case SCTP_NOTIFY_ASCONF_FAILED:
3585 		break;
3586 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3587 		sctp_notify_shutdown_event(stcb);
3588 		break;
3589 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3590 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3591 		    (uint16_t) (uintptr_t) data,
3592 		    so_locked);
3593 		break;
3594 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3595 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3596 		    (uint16_t) (uintptr_t) data,
3597 		    so_locked);
3598 		break;
3599 	case SCTP_NOTIFY_NO_PEER_AUTH:
3600 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3601 		    (uint16_t) (uintptr_t) data,
3602 		    so_locked);
3603 		break;
3604 	case SCTP_NOTIFY_SENDER_DRY:
3605 		sctp_notify_sender_dry_event(stcb, so_locked);
3606 		break;
3607 	default:
3608 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3609 		    __FUNCTION__, notification, notification);
3610 		break;
3611 	}			/* end switch */
3612 }
3613 
3614 void
3615 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3616 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3617     SCTP_UNUSED
3618 #endif
3619 )
3620 {
3621 	struct sctp_association *asoc;
3622 	struct sctp_stream_out *outs;
3623 	struct sctp_tmit_chunk *chk, *nchk;
3624 	struct sctp_stream_queue_pending *sp, *nsp;
3625 	int i;
3626 
3627 	if (stcb == NULL) {
3628 		return;
3629 	}
3630 	asoc = &stcb->asoc;
3631 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3632 		/* already being freed */
3633 		return;
3634 	}
3635 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3636 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3637 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3638 		return;
3639 	}
3640 	/* now through all the gunk freeing chunks */
3641 	if (holds_lock == 0) {
3642 		SCTP_TCB_SEND_LOCK(stcb);
3643 	}
3644 	/* sent queue SHOULD be empty */
3645 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3646 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3647 		asoc->sent_queue_cnt--;
3648 		if (chk->data != NULL) {
3649 			sctp_free_bufspace(stcb, asoc, chk, 1);
3650 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3651 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3652 			if (chk->data) {
3653 				sctp_m_freem(chk->data);
3654 				chk->data = NULL;
3655 			}
3656 		}
3657 		sctp_free_a_chunk(stcb, chk);
3658 		/* sa_ignore FREED_MEMORY */
3659 	}
3660 	/* pending send queue SHOULD be empty */
3661 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3662 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3663 		asoc->send_queue_cnt--;
3664 		if (chk->data != NULL) {
3665 			sctp_free_bufspace(stcb, asoc, chk, 1);
3666 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3667 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3668 			if (chk->data) {
3669 				sctp_m_freem(chk->data);
3670 				chk->data = NULL;
3671 			}
3672 		}
3673 		sctp_free_a_chunk(stcb, chk);
3674 		/* sa_ignore FREED_MEMORY */
3675 	}
3676 	for (i = 0; i < asoc->streamoutcnt; i++) {
3677 		/* For each stream */
3678 		outs = &asoc->strmout[i];
3679 		/* clean up any sends there */
3680 		asoc->locked_on_sending = NULL;
3681 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3682 			asoc->stream_queue_cnt--;
3683 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3684 			sctp_free_spbufspace(stcb, asoc, sp);
3685 			if (sp->data) {
3686 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3687 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3688 				if (sp->data) {
3689 					sctp_m_freem(sp->data);
3690 					sp->data = NULL;
3691 				}
3692 			}
3693 			if (sp->net) {
3694 				sctp_free_remote_addr(sp->net);
3695 				sp->net = NULL;
3696 			}
3697 			/* Free the chunk */
3698 			sctp_free_a_strmoq(stcb, sp);
3699 			/* sa_ignore FREED_MEMORY */
3700 		}
3701 	}
3702 
3703 	if (holds_lock == 0) {
3704 		SCTP_TCB_SEND_UNLOCK(stcb);
3705 	}
3706 }
3707 
3708 void
3709 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3710 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3711     SCTP_UNUSED
3712 #endif
3713 )
3714 {
3715 
3716 	if (stcb == NULL) {
3717 		return;
3718 	}
3719 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3720 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3721 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3722 		return;
3723 	}
3724 	/* Tell them we lost the asoc */
3725 	sctp_report_all_outbound(stcb, 1, so_locked);
3726 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3727 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3728 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3729 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3730 	}
3731 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3732 }
3733 
3734 void
3735 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3736     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3737     uint32_t vrf_id, uint16_t port)
3738 {
3739 	uint32_t vtag;
3740 
3741 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3742 	struct socket *so;
3743 
3744 #endif
3745 
3746 	vtag = 0;
3747 	if (stcb != NULL) {
3748 		/* We have a TCB to abort, send notification too */
3749 		vtag = stcb->asoc.peer_vtag;
3750 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3751 		/* get the assoc vrf id and table id */
3752 		vrf_id = stcb->asoc.vrf_id;
3753 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3754 	}
3755 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3756 	if (stcb != NULL) {
3757 		/* Ok, now lets free it */
3758 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3759 		so = SCTP_INP_SO(inp);
3760 		atomic_add_int(&stcb->asoc.refcnt, 1);
3761 		SCTP_TCB_UNLOCK(stcb);
3762 		SCTP_SOCKET_LOCK(so, 1);
3763 		SCTP_TCB_LOCK(stcb);
3764 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3765 #endif
3766 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3767 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3768 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3769 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3770 		}
3771 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3772 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3773 		SCTP_SOCKET_UNLOCK(so, 1);
3774 #endif
3775 	}
3776 }
3777 
3778 #ifdef SCTP_ASOCLOG_OF_TSNS
3779 void
3780 sctp_print_out_track_log(struct sctp_tcb *stcb)
3781 {
3782 #ifdef NOSIY_PRINTS
3783 	int i;
3784 
3785 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3786 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3787 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3788 		SCTP_PRINTF("None rcvd\n");
3789 		goto none_in;
3790 	}
3791 	if (stcb->asoc.tsn_in_wrapped) {
3792 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3793 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3794 			    stcb->asoc.in_tsnlog[i].tsn,
3795 			    stcb->asoc.in_tsnlog[i].strm,
3796 			    stcb->asoc.in_tsnlog[i].seq,
3797 			    stcb->asoc.in_tsnlog[i].flgs,
3798 			    stcb->asoc.in_tsnlog[i].sz);
3799 		}
3800 	}
3801 	if (stcb->asoc.tsn_in_at) {
3802 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3803 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3804 			    stcb->asoc.in_tsnlog[i].tsn,
3805 			    stcb->asoc.in_tsnlog[i].strm,
3806 			    stcb->asoc.in_tsnlog[i].seq,
3807 			    stcb->asoc.in_tsnlog[i].flgs,
3808 			    stcb->asoc.in_tsnlog[i].sz);
3809 		}
3810 	}
3811 none_in:
3812 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3813 	if ((stcb->asoc.tsn_out_at == 0) &&
3814 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3815 		SCTP_PRINTF("None sent\n");
3816 	}
3817 	if (stcb->asoc.tsn_out_wrapped) {
3818 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3819 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3820 			    stcb->asoc.out_tsnlog[i].tsn,
3821 			    stcb->asoc.out_tsnlog[i].strm,
3822 			    stcb->asoc.out_tsnlog[i].seq,
3823 			    stcb->asoc.out_tsnlog[i].flgs,
3824 			    stcb->asoc.out_tsnlog[i].sz);
3825 		}
3826 	}
3827 	if (stcb->asoc.tsn_out_at) {
3828 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3829 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3830 			    stcb->asoc.out_tsnlog[i].tsn,
3831 			    stcb->asoc.out_tsnlog[i].strm,
3832 			    stcb->asoc.out_tsnlog[i].seq,
3833 			    stcb->asoc.out_tsnlog[i].flgs,
3834 			    stcb->asoc.out_tsnlog[i].sz);
3835 		}
3836 	}
3837 #endif
3838 }
3839 
3840 #endif
3841 
3842 void
3843 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3844     int error, struct mbuf *op_err,
3845     int so_locked
3846 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3847     SCTP_UNUSED
3848 #endif
3849 )
3850 {
3851 	uint32_t vtag;
3852 
3853 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3854 	struct socket *so;
3855 
3856 #endif
3857 
3858 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3859 	so = SCTP_INP_SO(inp);
3860 #endif
3861 	if (stcb == NULL) {
3862 		/* Got to have a TCB */
3863 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3864 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3865 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3866 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3867 			}
3868 		}
3869 		return;
3870 	} else {
3871 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3872 	}
3873 	vtag = stcb->asoc.peer_vtag;
3874 	/* notify the ulp */
3875 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3876 		sctp_abort_notification(stcb, error, so_locked);
3877 	/* notify the peer */
3878 #if defined(SCTP_PANIC_ON_ABORT)
3879 	panic("aborting an association");
3880 #endif
3881 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3882 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3883 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3884 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3885 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3886 	}
3887 	/* now free the asoc */
3888 #ifdef SCTP_ASOCLOG_OF_TSNS
3889 	sctp_print_out_track_log(stcb);
3890 #endif
3891 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3892 	if (!so_locked) {
3893 		atomic_add_int(&stcb->asoc.refcnt, 1);
3894 		SCTP_TCB_UNLOCK(stcb);
3895 		SCTP_SOCKET_LOCK(so, 1);
3896 		SCTP_TCB_LOCK(stcb);
3897 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3898 	}
3899 #endif
3900 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3901 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3902 	if (!so_locked) {
3903 		SCTP_SOCKET_UNLOCK(so, 1);
3904 	}
3905 #endif
3906 }
3907 
3908 void
3909 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3910     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3911 {
3912 	struct sctp_chunkhdr *ch, chunk_buf;
3913 	unsigned int chk_length;
3914 
3915 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3916 	/* Generate a TO address for future reference */
3917 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3918 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3919 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3920 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3921 		}
3922 	}
3923 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3924 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3925 	while (ch != NULL) {
3926 		chk_length = ntohs(ch->chunk_length);
3927 		if (chk_length < sizeof(*ch)) {
3928 			/* break to abort land */
3929 			break;
3930 		}
3931 		switch (ch->chunk_type) {
3932 		case SCTP_COOKIE_ECHO:
3933 			/* We hit here only if the assoc is being freed */
3934 			return;
3935 		case SCTP_PACKET_DROPPED:
3936 			/* we don't respond to pkt-dropped */
3937 			return;
3938 		case SCTP_ABORT_ASSOCIATION:
3939 			/* we don't respond with an ABORT to an ABORT */
3940 			return;
3941 		case SCTP_SHUTDOWN_COMPLETE:
3942 			/*
3943 			 * we ignore it since we are not waiting for it and
3944 			 * peer is gone
3945 			 */
3946 			return;
3947 		case SCTP_SHUTDOWN_ACK:
3948 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
3949 			return;
3950 		default:
3951 			break;
3952 		}
3953 		offset += SCTP_SIZE32(chk_length);
3954 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3955 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3956 	}
3957 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
3958 }
3959 
3960 /*
3961  * check the inbound datagram to make sure there is not an abort inside it,
3962  * if there is return 1, else return 0.
3963  */
3964 int
3965 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3966 {
3967 	struct sctp_chunkhdr *ch;
3968 	struct sctp_init_chunk *init_chk, chunk_buf;
3969 	int offset;
3970 	unsigned int chk_length;
3971 
3972 	offset = iphlen + sizeof(struct sctphdr);
3973 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3974 	    (uint8_t *) & chunk_buf);
3975 	while (ch != NULL) {
3976 		chk_length = ntohs(ch->chunk_length);
3977 		if (chk_length < sizeof(*ch)) {
3978 			/* packet is probably corrupt */
3979 			break;
3980 		}
3981 		/* we seem to be ok, is it an abort? */
3982 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3983 			/* yep, tell them */
3984 			return (1);
3985 		}
3986 		if (ch->chunk_type == SCTP_INITIATION) {
3987 			/* need to update the Vtag */
3988 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
3989 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
3990 			if (init_chk != NULL) {
3991 				*vtagfill = ntohl(init_chk->init.initiate_tag);
3992 			}
3993 		}
3994 		/* Nope, move to the next chunk */
3995 		offset += SCTP_SIZE32(chk_length);
3996 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3997 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3998 	}
3999 	return (0);
4000 }
4001 
4002 /*
4003  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4004  * set (i.e. it's 0) so, create this function to compare link local scopes
4005  */
4006 #ifdef INET6
4007 uint32_t
4008 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4009 {
4010 	struct sockaddr_in6 a, b;
4011 
4012 	/* save copies */
4013 	a = *addr1;
4014 	b = *addr2;
4015 
4016 	if (a.sin6_scope_id == 0)
4017 		if (sa6_recoverscope(&a)) {
4018 			/* can't get scope, so can't match */
4019 			return (0);
4020 		}
4021 	if (b.sin6_scope_id == 0)
4022 		if (sa6_recoverscope(&b)) {
4023 			/* can't get scope, so can't match */
4024 			return (0);
4025 		}
4026 	if (a.sin6_scope_id != b.sin6_scope_id)
4027 		return (0);
4028 
4029 	return (1);
4030 }
4031 
4032 /*
4033  * returns a sockaddr_in6 with embedded scope recovered and removed
4034  */
4035 struct sockaddr_in6 *
4036 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4037 {
4038 	/* check and strip embedded scope junk */
4039 	if (addr->sin6_family == AF_INET6) {
4040 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4041 			if (addr->sin6_scope_id == 0) {
4042 				*store = *addr;
4043 				if (!sa6_recoverscope(store)) {
4044 					/* use the recovered scope */
4045 					addr = store;
4046 				}
4047 			} else {
4048 				/* else, return the original "to" addr */
4049 				in6_clearscope(&addr->sin6_addr);
4050 			}
4051 		}
4052 	}
4053 	return (addr);
4054 }
4055 
4056 #endif
4057 
4058 /*
4059  * are the two addresses the same?  currently a "scopeless" check returns: 1
4060  * if same, 0 if not
4061  */
4062 int
4063 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4064 {
4065 
4066 	/* must be valid */
4067 	if (sa1 == NULL || sa2 == NULL)
4068 		return (0);
4069 
4070 	/* must be the same family */
4071 	if (sa1->sa_family != sa2->sa_family)
4072 		return (0);
4073 
4074 	switch (sa1->sa_family) {
4075 #ifdef INET6
4076 	case AF_INET6:
4077 		{
4078 			/* IPv6 addresses */
4079 			struct sockaddr_in6 *sin6_1, *sin6_2;
4080 
4081 			sin6_1 = (struct sockaddr_in6 *)sa1;
4082 			sin6_2 = (struct sockaddr_in6 *)sa2;
4083 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4084 			    sin6_2));
4085 		}
4086 #endif
4087 	case AF_INET:
4088 		{
4089 			/* IPv4 addresses */
4090 			struct sockaddr_in *sin_1, *sin_2;
4091 
4092 			sin_1 = (struct sockaddr_in *)sa1;
4093 			sin_2 = (struct sockaddr_in *)sa2;
4094 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4095 		}
4096 	default:
4097 		/* we don't do these... */
4098 		return (0);
4099 	}
4100 }
4101 
4102 void
4103 sctp_print_address(struct sockaddr *sa)
4104 {
4105 #ifdef INET6
4106 	char ip6buf[INET6_ADDRSTRLEN];
4107 
4108 	ip6buf[0] = 0;
4109 #endif
4110 
4111 	switch (sa->sa_family) {
4112 #ifdef INET6
4113 	case AF_INET6:
4114 		{
4115 			struct sockaddr_in6 *sin6;
4116 
4117 			sin6 = (struct sockaddr_in6 *)sa;
4118 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4119 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4120 			    ntohs(sin6->sin6_port),
4121 			    sin6->sin6_scope_id);
4122 			break;
4123 		}
4124 #endif
4125 	case AF_INET:
4126 		{
4127 			struct sockaddr_in *sin;
4128 			unsigned char *p;
4129 
4130 			sin = (struct sockaddr_in *)sa;
4131 			p = (unsigned char *)&sin->sin_addr;
4132 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4133 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4134 			break;
4135 		}
4136 	default:
4137 		SCTP_PRINTF("?\n");
4138 		break;
4139 	}
4140 }
4141 
4142 void
4143 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4144 {
4145 	switch (iph->ip_v) {
4146 	case IPVERSION:
4147 		{
4148 			struct sockaddr_in lsa, fsa;
4149 
4150 			bzero(&lsa, sizeof(lsa));
4151 			lsa.sin_len = sizeof(lsa);
4152 			lsa.sin_family = AF_INET;
4153 			lsa.sin_addr = iph->ip_src;
4154 			lsa.sin_port = sh->src_port;
4155 			bzero(&fsa, sizeof(fsa));
4156 			fsa.sin_len = sizeof(fsa);
4157 			fsa.sin_family = AF_INET;
4158 			fsa.sin_addr = iph->ip_dst;
4159 			fsa.sin_port = sh->dest_port;
4160 			SCTP_PRINTF("src: ");
4161 			sctp_print_address((struct sockaddr *)&lsa);
4162 			SCTP_PRINTF("dest: ");
4163 			sctp_print_address((struct sockaddr *)&fsa);
4164 			break;
4165 		}
4166 #ifdef INET6
4167 	case IPV6_VERSION >> 4:
4168 		{
4169 			struct ip6_hdr *ip6;
4170 			struct sockaddr_in6 lsa6, fsa6;
4171 
4172 			ip6 = (struct ip6_hdr *)iph;
4173 			bzero(&lsa6, sizeof(lsa6));
4174 			lsa6.sin6_len = sizeof(lsa6);
4175 			lsa6.sin6_family = AF_INET6;
4176 			lsa6.sin6_addr = ip6->ip6_src;
4177 			lsa6.sin6_port = sh->src_port;
4178 			bzero(&fsa6, sizeof(fsa6));
4179 			fsa6.sin6_len = sizeof(fsa6);
4180 			fsa6.sin6_family = AF_INET6;
4181 			fsa6.sin6_addr = ip6->ip6_dst;
4182 			fsa6.sin6_port = sh->dest_port;
4183 			SCTP_PRINTF("src: ");
4184 			sctp_print_address((struct sockaddr *)&lsa6);
4185 			SCTP_PRINTF("dest: ");
4186 			sctp_print_address((struct sockaddr *)&fsa6);
4187 			break;
4188 		}
4189 #endif
4190 	default:
4191 		/* TSNH */
4192 		break;
4193 	}
4194 }
4195 
4196 void
4197 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4198     struct sctp_inpcb *new_inp,
4199     struct sctp_tcb *stcb,
4200     int waitflags)
4201 {
4202 	/*
4203 	 * go through our old INP and pull off any control structures that
4204 	 * belong to stcb and move then to the new inp.
4205 	 */
4206 	struct socket *old_so, *new_so;
4207 	struct sctp_queued_to_read *control, *nctl;
4208 	struct sctp_readhead tmp_queue;
4209 	struct mbuf *m;
4210 	int error = 0;
4211 
4212 	old_so = old_inp->sctp_socket;
4213 	new_so = new_inp->sctp_socket;
4214 	TAILQ_INIT(&tmp_queue);
4215 	error = sblock(&old_so->so_rcv, waitflags);
4216 	if (error) {
4217 		/*
4218 		 * Gak, can't get sblock, we have a problem. data will be
4219 		 * left stranded.. and we don't dare look at it since the
4220 		 * other thread may be reading something. Oh well, its a
4221 		 * screwed up app that does a peeloff OR a accept while
4222 		 * reading from the main socket... actually its only the
4223 		 * peeloff() case, since I think read will fail on a
4224 		 * listening socket..
4225 		 */
4226 		return;
4227 	}
4228 	/* lock the socket buffers */
4229 	SCTP_INP_READ_LOCK(old_inp);
4230 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4231 		/* Pull off all for out target stcb */
4232 		if (control->stcb == stcb) {
4233 			/* remove it we want it */
4234 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4235 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4236 			m = control->data;
4237 			while (m) {
4238 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4239 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4240 				}
4241 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4242 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4243 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4244 				}
4245 				m = SCTP_BUF_NEXT(m);
4246 			}
4247 		}
4248 	}
4249 	SCTP_INP_READ_UNLOCK(old_inp);
4250 	/* Remove the sb-lock on the old socket */
4251 
4252 	sbunlock(&old_so->so_rcv);
4253 	/* Now we move them over to the new socket buffer */
4254 	SCTP_INP_READ_LOCK(new_inp);
4255 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4256 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4257 		m = control->data;
4258 		while (m) {
4259 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4260 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4261 			}
4262 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4263 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4264 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4265 			}
4266 			m = SCTP_BUF_NEXT(m);
4267 		}
4268 	}
4269 	SCTP_INP_READ_UNLOCK(new_inp);
4270 }
4271 
4272 void
4273 sctp_add_to_readq(struct sctp_inpcb *inp,
4274     struct sctp_tcb *stcb,
4275     struct sctp_queued_to_read *control,
4276     struct sockbuf *sb,
4277     int end,
4278     int inp_read_lock_held,
4279     int so_locked
4280 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4281     SCTP_UNUSED
4282 #endif
4283 )
4284 {
4285 	/*
4286 	 * Here we must place the control on the end of the socket read
4287 	 * queue AND increment sb_cc so that select will work properly on
4288 	 * read.
4289 	 */
4290 	struct mbuf *m, *prev = NULL;
4291 
4292 	if (inp == NULL) {
4293 		/* Gak, TSNH!! */
4294 #ifdef INVARIANTS
4295 		panic("Gak, inp NULL on add_to_readq");
4296 #endif
4297 		return;
4298 	}
4299 	if (inp_read_lock_held == 0)
4300 		SCTP_INP_READ_LOCK(inp);
4301 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4302 		sctp_free_remote_addr(control->whoFrom);
4303 		if (control->data) {
4304 			sctp_m_freem(control->data);
4305 			control->data = NULL;
4306 		}
4307 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4308 		if (inp_read_lock_held == 0)
4309 			SCTP_INP_READ_UNLOCK(inp);
4310 		return;
4311 	}
4312 	if (!(control->spec_flags & M_NOTIFICATION)) {
4313 		atomic_add_int(&inp->total_recvs, 1);
4314 		if (!control->do_not_ref_stcb) {
4315 			atomic_add_int(&stcb->total_recvs, 1);
4316 		}
4317 	}
4318 	m = control->data;
4319 	control->held_length = 0;
4320 	control->length = 0;
4321 	while (m) {
4322 		if (SCTP_BUF_LEN(m) == 0) {
4323 			/* Skip mbufs with NO length */
4324 			if (prev == NULL) {
4325 				/* First one */
4326 				control->data = sctp_m_free(m);
4327 				m = control->data;
4328 			} else {
4329 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4330 				m = SCTP_BUF_NEXT(prev);
4331 			}
4332 			if (m == NULL) {
4333 				control->tail_mbuf = prev;
4334 			}
4335 			continue;
4336 		}
4337 		prev = m;
4338 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4339 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4340 		}
4341 		sctp_sballoc(stcb, sb, m);
4342 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4343 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4344 		}
4345 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4346 		m = SCTP_BUF_NEXT(m);
4347 	}
4348 	if (prev != NULL) {
4349 		control->tail_mbuf = prev;
4350 	} else {
4351 		/* Everything got collapsed out?? */
4352 		sctp_free_remote_addr(control->whoFrom);
4353 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4354 		if (inp_read_lock_held == 0)
4355 			SCTP_INP_READ_UNLOCK(inp);
4356 		return;
4357 	}
4358 	if (end) {
4359 		control->end_added = 1;
4360 	}
4361 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4362 	if (inp_read_lock_held == 0)
4363 		SCTP_INP_READ_UNLOCK(inp);
4364 	if (inp && inp->sctp_socket) {
4365 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4366 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4367 		} else {
4368 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4369 			struct socket *so;
4370 
4371 			so = SCTP_INP_SO(inp);
4372 			if (!so_locked) {
4373 				atomic_add_int(&stcb->asoc.refcnt, 1);
4374 				SCTP_TCB_UNLOCK(stcb);
4375 				SCTP_SOCKET_LOCK(so, 1);
4376 				SCTP_TCB_LOCK(stcb);
4377 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4378 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4379 					SCTP_SOCKET_UNLOCK(so, 1);
4380 					return;
4381 				}
4382 			}
4383 #endif
4384 			sctp_sorwakeup(inp, inp->sctp_socket);
4385 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4386 			if (!so_locked) {
4387 				SCTP_SOCKET_UNLOCK(so, 1);
4388 			}
4389 #endif
4390 		}
4391 	}
4392 }
4393 
4394 
4395 int
4396 sctp_append_to_readq(struct sctp_inpcb *inp,
4397     struct sctp_tcb *stcb,
4398     struct sctp_queued_to_read *control,
4399     struct mbuf *m,
4400     int end,
4401     int ctls_cumack,
4402     struct sockbuf *sb)
4403 {
4404 	/*
4405 	 * A partial delivery API event is underway. OR we are appending on
4406 	 * the reassembly queue.
4407 	 *
4408 	 * If PDAPI this means we need to add m to the end of the data.
4409 	 * Increase the length in the control AND increment the sb_cc.
4410 	 * Otherwise sb is NULL and all we need to do is put it at the end
4411 	 * of the mbuf chain.
4412 	 */
4413 	int len = 0;
4414 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4415 
4416 	if (inp) {
4417 		SCTP_INP_READ_LOCK(inp);
4418 	}
4419 	if (control == NULL) {
4420 get_out:
4421 		if (inp) {
4422 			SCTP_INP_READ_UNLOCK(inp);
4423 		}
4424 		return (-1);
4425 	}
4426 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4427 		SCTP_INP_READ_UNLOCK(inp);
4428 		return 0;
4429 	}
4430 	if (control->end_added) {
4431 		/* huh this one is complete? */
4432 		goto get_out;
4433 	}
4434 	mm = m;
4435 	if (mm == NULL) {
4436 		goto get_out;
4437 	}
4438 	while (mm) {
4439 		if (SCTP_BUF_LEN(mm) == 0) {
4440 			/* Skip mbufs with NO lenght */
4441 			if (prev == NULL) {
4442 				/* First one */
4443 				m = sctp_m_free(mm);
4444 				mm = m;
4445 			} else {
4446 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4447 				mm = SCTP_BUF_NEXT(prev);
4448 			}
4449 			continue;
4450 		}
4451 		prev = mm;
4452 		len += SCTP_BUF_LEN(mm);
4453 		if (sb) {
4454 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4455 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4456 			}
4457 			sctp_sballoc(stcb, sb, mm);
4458 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4459 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4460 			}
4461 		}
4462 		mm = SCTP_BUF_NEXT(mm);
4463 	}
4464 	if (prev) {
4465 		tail = prev;
4466 	} else {
4467 		/* Really there should always be a prev */
4468 		if (m == NULL) {
4469 			/* Huh nothing left? */
4470 #ifdef INVARIANTS
4471 			panic("Nothing left to add?");
4472 #else
4473 			goto get_out;
4474 #endif
4475 		}
4476 		tail = m;
4477 	}
4478 	if (control->tail_mbuf) {
4479 		/* append */
4480 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4481 		control->tail_mbuf = tail;
4482 	} else {
4483 		/* nothing there */
4484 #ifdef INVARIANTS
4485 		if (control->data != NULL) {
4486 			panic("This should NOT happen");
4487 		}
4488 #endif
4489 		control->data = m;
4490 		control->tail_mbuf = tail;
4491 	}
4492 	atomic_add_int(&control->length, len);
4493 	if (end) {
4494 		/* message is complete */
4495 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4496 			stcb->asoc.control_pdapi = NULL;
4497 		}
4498 		control->held_length = 0;
4499 		control->end_added = 1;
4500 	}
4501 	if (stcb == NULL) {
4502 		control->do_not_ref_stcb = 1;
4503 	}
4504 	/*
4505 	 * When we are appending in partial delivery, the cum-ack is used
4506 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4507 	 * is populated in the outbound sinfo structure from the true cumack
4508 	 * if the association exists...
4509 	 */
4510 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4511 	if (inp) {
4512 		SCTP_INP_READ_UNLOCK(inp);
4513 	}
4514 	if (inp && inp->sctp_socket) {
4515 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4516 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4517 		} else {
4518 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4519 			struct socket *so;
4520 
4521 			so = SCTP_INP_SO(inp);
4522 			atomic_add_int(&stcb->asoc.refcnt, 1);
4523 			SCTP_TCB_UNLOCK(stcb);
4524 			SCTP_SOCKET_LOCK(so, 1);
4525 			SCTP_TCB_LOCK(stcb);
4526 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4527 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4528 				SCTP_SOCKET_UNLOCK(so, 1);
4529 				return (0);
4530 			}
4531 #endif
4532 			sctp_sorwakeup(inp, inp->sctp_socket);
4533 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4534 			SCTP_SOCKET_UNLOCK(so, 1);
4535 #endif
4536 		}
4537 	}
4538 	return (0);
4539 }
4540 
4541 
4542 
4543 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4544  *************ALTERNATE ROUTING CODE
4545  */
4546 
4547 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4548  *************ALTERNATE ROUTING CODE
4549  */
4550 
4551 struct mbuf *
4552 sctp_generate_invmanparam(int err)
4553 {
4554 	/* Return a MBUF with a invalid mandatory parameter */
4555 	struct mbuf *m;
4556 
4557 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4558 	if (m) {
4559 		struct sctp_paramhdr *ph;
4560 
4561 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4562 		ph = mtod(m, struct sctp_paramhdr *);
4563 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4564 		ph->param_type = htons(err);
4565 	}
4566 	return (m);
4567 }
4568 
4569 #ifdef SCTP_MBCNT_LOGGING
4570 void
4571 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4572     struct sctp_tmit_chunk *tp1, int chk_cnt)
4573 {
4574 	if (tp1->data == NULL) {
4575 		return;
4576 	}
4577 	asoc->chunks_on_out_queue -= chk_cnt;
4578 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4579 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4580 		    asoc->total_output_queue_size,
4581 		    tp1->book_size,
4582 		    0,
4583 		    tp1->mbcnt);
4584 	}
4585 	if (asoc->total_output_queue_size >= tp1->book_size) {
4586 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4587 	} else {
4588 		asoc->total_output_queue_size = 0;
4589 	}
4590 
4591 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4592 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4593 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4594 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4595 		} else {
4596 			stcb->sctp_socket->so_snd.sb_cc = 0;
4597 
4598 		}
4599 	}
4600 }
4601 
4602 #endif
4603 
4604 int
4605 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4606     int reason, int so_locked
4607 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4608     SCTP_UNUSED
4609 #endif
4610 )
4611 {
4612 	struct sctp_stream_out *strq;
4613 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4614 	struct sctp_stream_queue_pending *sp;
4615 	uint16_t stream = 0, seq = 0;
4616 	uint8_t foundeom = 0;
4617 	int ret_sz = 0;
4618 	int notdone;
4619 	int do_wakeup_routine = 0;
4620 
4621 	stream = tp1->rec.data.stream_number;
4622 	seq = tp1->rec.data.stream_seq;
4623 	do {
4624 		ret_sz += tp1->book_size;
4625 		if (tp1->data != NULL) {
4626 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4627 				sctp_flight_size_decrease(tp1);
4628 				sctp_total_flight_decrease(stcb, tp1);
4629 			}
4630 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4631 			stcb->asoc.peers_rwnd += tp1->send_size;
4632 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4633 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4634 			if (tp1->data) {
4635 				sctp_m_freem(tp1->data);
4636 				tp1->data = NULL;
4637 			}
4638 			do_wakeup_routine = 1;
4639 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4640 				stcb->asoc.sent_queue_cnt_removeable--;
4641 			}
4642 		}
4643 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4644 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4645 		    SCTP_DATA_NOT_FRAG) {
4646 			/* not frag'ed we ae done   */
4647 			notdone = 0;
4648 			foundeom = 1;
4649 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4650 			/* end of frag, we are done */
4651 			notdone = 0;
4652 			foundeom = 1;
4653 		} else {
4654 			/*
4655 			 * Its a begin or middle piece, we must mark all of
4656 			 * it
4657 			 */
4658 			notdone = 1;
4659 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4660 		}
4661 	} while (tp1 && notdone);
4662 	if (foundeom == 0) {
4663 		/*
4664 		 * The multi-part message was scattered across the send and
4665 		 * sent queue.
4666 		 */
4667 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4668 			if ((tp1->rec.data.stream_number != stream) ||
4669 			    (tp1->rec.data.stream_seq != seq)) {
4670 				break;
4671 			}
4672 			/*
4673 			 * save to chk in case we have some on stream out
4674 			 * queue. If so and we have an un-transmitted one we
4675 			 * don't have to fudge the TSN.
4676 			 */
4677 			chk = tp1;
4678 			ret_sz += tp1->book_size;
4679 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4680 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4681 			if (tp1->data) {
4682 				sctp_m_freem(tp1->data);
4683 				tp1->data = NULL;
4684 			}
4685 			/* No flight involved here book the size to 0 */
4686 			tp1->book_size = 0;
4687 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4688 				foundeom = 1;
4689 			}
4690 			do_wakeup_routine = 1;
4691 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4692 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4693 			/*
4694 			 * on to the sent queue so we can wait for it to be
4695 			 * passed by.
4696 			 */
4697 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4698 			    sctp_next);
4699 			stcb->asoc.send_queue_cnt--;
4700 			stcb->asoc.sent_queue_cnt++;
4701 		}
4702 	}
4703 	if (foundeom == 0) {
4704 		/*
4705 		 * Still no eom found. That means there is stuff left on the
4706 		 * stream out queue.. yuck.
4707 		 */
4708 		strq = &stcb->asoc.strmout[stream];
4709 		SCTP_TCB_SEND_LOCK(stcb);
4710 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4711 			/* FIXME: Shouldn't this be a serial number check? */
4712 			if (sp->strseq > seq) {
4713 				break;
4714 			}
4715 			/* Check if its our SEQ */
4716 			if (sp->strseq == seq) {
4717 				sp->discard_rest = 1;
4718 				/*
4719 				 * We may need to put a chunk on the queue
4720 				 * that holds the TSN that would have been
4721 				 * sent with the LAST bit.
4722 				 */
4723 				if (chk == NULL) {
4724 					/* Yep, we have to */
4725 					sctp_alloc_a_chunk(stcb, chk);
4726 					if (chk == NULL) {
4727 						/*
4728 						 * we are hosed. All we can
4729 						 * do is nothing.. which
4730 						 * will cause an abort if
4731 						 * the peer is paying
4732 						 * attention.
4733 						 */
4734 						goto oh_well;
4735 					}
4736 					memset(chk, 0, sizeof(*chk));
4737 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4738 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4739 					chk->asoc = &stcb->asoc;
4740 					chk->rec.data.stream_seq = sp->strseq;
4741 					chk->rec.data.stream_number = sp->stream;
4742 					chk->rec.data.payloadtype = sp->ppid;
4743 					chk->rec.data.context = sp->context;
4744 					chk->flags = sp->act_flags;
4745 					if (sp->net)
4746 						chk->whoTo = sp->net;
4747 					else
4748 						chk->whoTo = stcb->asoc.primary_destination;
4749 					atomic_add_int(&chk->whoTo->ref_count, 1);
4750 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4751 					stcb->asoc.pr_sctp_cnt++;
4752 					chk->pr_sctp_on = 1;
4753 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4754 					stcb->asoc.sent_queue_cnt++;
4755 					stcb->asoc.pr_sctp_cnt++;
4756 				} else {
4757 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4758 				}
4759 		oh_well:
4760 				if (sp->data) {
4761 					/*
4762 					 * Pull any data to free up the SB
4763 					 * and allow sender to "add more"
4764 					 * whilc we will throw away :-)
4765 					 */
4766 					sctp_free_spbufspace(stcb, &stcb->asoc,
4767 					    sp);
4768 					ret_sz += sp->length;
4769 					do_wakeup_routine = 1;
4770 					sp->some_taken = 1;
4771 					sctp_m_freem(sp->data);
4772 					sp->length = 0;
4773 					sp->data = NULL;
4774 					sp->tail_mbuf = NULL;
4775 				}
4776 				break;
4777 			}
4778 		}		/* End tailq_foreach */
4779 		SCTP_TCB_SEND_UNLOCK(stcb);
4780 	}
4781 	if (do_wakeup_routine) {
4782 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4783 		struct socket *so;
4784 
4785 		so = SCTP_INP_SO(stcb->sctp_ep);
4786 		if (!so_locked) {
4787 			atomic_add_int(&stcb->asoc.refcnt, 1);
4788 			SCTP_TCB_UNLOCK(stcb);
4789 			SCTP_SOCKET_LOCK(so, 1);
4790 			SCTP_TCB_LOCK(stcb);
4791 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4792 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4793 				/* assoc was freed while we were unlocked */
4794 				SCTP_SOCKET_UNLOCK(so, 1);
4795 				return (ret_sz);
4796 			}
4797 		}
4798 #endif
4799 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4800 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4801 		if (!so_locked) {
4802 			SCTP_SOCKET_UNLOCK(so, 1);
4803 		}
4804 #endif
4805 	}
4806 	return (ret_sz);
4807 }
4808 
4809 /*
4810  * checks to see if the given address, sa, is one that is currently known by
4811  * the kernel note: can't distinguish the same address on multiple interfaces
4812  * and doesn't handle multiple addresses with different zone/scope id's note:
4813  * ifa_ifwithaddr() compares the entire sockaddr struct
4814  */
4815 struct sctp_ifa *
4816 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4817     int holds_lock)
4818 {
4819 	struct sctp_laddr *laddr;
4820 
4821 	if (holds_lock == 0) {
4822 		SCTP_INP_RLOCK(inp);
4823 	}
4824 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4825 		if (laddr->ifa == NULL)
4826 			continue;
4827 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4828 			continue;
4829 		if (addr->sa_family == AF_INET) {
4830 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4831 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4832 				/* found him. */
4833 				if (holds_lock == 0) {
4834 					SCTP_INP_RUNLOCK(inp);
4835 				}
4836 				return (laddr->ifa);
4837 				break;
4838 			}
4839 		}
4840 #ifdef INET6
4841 		if (addr->sa_family == AF_INET6) {
4842 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4843 			    &laddr->ifa->address.sin6)) {
4844 				/* found him. */
4845 				if (holds_lock == 0) {
4846 					SCTP_INP_RUNLOCK(inp);
4847 				}
4848 				return (laddr->ifa);
4849 				break;
4850 			}
4851 		}
4852 #endif
4853 	}
4854 	if (holds_lock == 0) {
4855 		SCTP_INP_RUNLOCK(inp);
4856 	}
4857 	return (NULL);
4858 }
4859 
4860 uint32_t
4861 sctp_get_ifa_hash_val(struct sockaddr *addr)
4862 {
4863 	if (addr->sa_family == AF_INET) {
4864 		struct sockaddr_in *sin;
4865 
4866 		sin = (struct sockaddr_in *)addr;
4867 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4868 	} else if (addr->sa_family == AF_INET6) {
4869 		struct sockaddr_in6 *sin6;
4870 		uint32_t hash_of_addr;
4871 
4872 		sin6 = (struct sockaddr_in6 *)addr;
4873 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4874 		    sin6->sin6_addr.s6_addr32[1] +
4875 		    sin6->sin6_addr.s6_addr32[2] +
4876 		    sin6->sin6_addr.s6_addr32[3]);
4877 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4878 		return (hash_of_addr);
4879 	}
4880 	return (0);
4881 }
4882 
4883 struct sctp_ifa *
4884 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4885 {
4886 	struct sctp_ifa *sctp_ifap;
4887 	struct sctp_vrf *vrf;
4888 	struct sctp_ifalist *hash_head;
4889 	uint32_t hash_of_addr;
4890 
4891 	if (holds_lock == 0)
4892 		SCTP_IPI_ADDR_RLOCK();
4893 
4894 	vrf = sctp_find_vrf(vrf_id);
4895 	if (vrf == NULL) {
4896 stage_right:
4897 		if (holds_lock == 0)
4898 			SCTP_IPI_ADDR_RUNLOCK();
4899 		return (NULL);
4900 	}
4901 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4902 
4903 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4904 	if (hash_head == NULL) {
4905 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4906 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4907 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4908 		sctp_print_address(addr);
4909 		SCTP_PRINTF("No such bucket for address\n");
4910 		if (holds_lock == 0)
4911 			SCTP_IPI_ADDR_RUNLOCK();
4912 
4913 		return (NULL);
4914 	}
4915 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4916 		if (sctp_ifap == NULL) {
4917 #ifdef INVARIANTS
4918 			panic("Huh LIST_FOREACH corrupt");
4919 			goto stage_right;
4920 #else
4921 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4922 			goto stage_right;
4923 #endif
4924 		}
4925 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4926 			continue;
4927 		if (addr->sa_family == AF_INET) {
4928 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4929 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4930 				/* found him. */
4931 				if (holds_lock == 0)
4932 					SCTP_IPI_ADDR_RUNLOCK();
4933 				return (sctp_ifap);
4934 				break;
4935 			}
4936 		}
4937 #ifdef INET6
4938 		if (addr->sa_family == AF_INET6) {
4939 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4940 			    &sctp_ifap->address.sin6)) {
4941 				/* found him. */
4942 				if (holds_lock == 0)
4943 					SCTP_IPI_ADDR_RUNLOCK();
4944 				return (sctp_ifap);
4945 				break;
4946 			}
4947 		}
4948 #endif
4949 	}
4950 	if (holds_lock == 0)
4951 		SCTP_IPI_ADDR_RUNLOCK();
4952 	return (NULL);
4953 }
4954 
4955 static void
4956 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4957     uint32_t rwnd_req)
4958 {
4959 	/* User pulled some data, do we need a rwnd update? */
4960 	int r_unlocked = 0;
4961 	uint32_t dif, rwnd;
4962 	struct socket *so = NULL;
4963 
4964 	if (stcb == NULL)
4965 		return;
4966 
4967 	atomic_add_int(&stcb->asoc.refcnt, 1);
4968 
4969 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4970 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4971 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4972 		/* Pre-check If we are freeing no update */
4973 		goto no_lock;
4974 	}
4975 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4976 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4977 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4978 		goto out;
4979 	}
4980 	so = stcb->sctp_socket;
4981 	if (so == NULL) {
4982 		goto out;
4983 	}
4984 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4985 	/* Have you have freed enough to look */
4986 	*freed_so_far = 0;
4987 	/* Yep, its worth a look and the lock overhead */
4988 
4989 	/* Figure out what the rwnd would be */
4990 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4991 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4992 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4993 	} else {
4994 		dif = 0;
4995 	}
4996 	if (dif >= rwnd_req) {
4997 		if (hold_rlock) {
4998 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4999 			r_unlocked = 1;
5000 		}
5001 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5002 			/*
5003 			 * One last check before we allow the guy possibly
5004 			 * to get in. There is a race, where the guy has not
5005 			 * reached the gate. In that case
5006 			 */
5007 			goto out;
5008 		}
5009 		SCTP_TCB_LOCK(stcb);
5010 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5011 			/* No reports here */
5012 			SCTP_TCB_UNLOCK(stcb);
5013 			goto out;
5014 		}
5015 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5016 		sctp_send_sack(stcb);
5017 
5018 		sctp_chunk_output(stcb->sctp_ep, stcb,
5019 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5020 		/* make sure no timer is running */
5021 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5022 		SCTP_TCB_UNLOCK(stcb);
5023 	} else {
5024 		/* Update how much we have pending */
5025 		stcb->freed_by_sorcv_sincelast = dif;
5026 	}
5027 out:
5028 	if (so && r_unlocked && hold_rlock) {
5029 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5030 	}
5031 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5032 no_lock:
5033 	atomic_add_int(&stcb->asoc.refcnt, -1);
5034 	return;
5035 }
5036 
5037 int
5038 sctp_sorecvmsg(struct socket *so,
5039     struct uio *uio,
5040     struct mbuf **mp,
5041     struct sockaddr *from,
5042     int fromlen,
5043     int *msg_flags,
5044     struct sctp_sndrcvinfo *sinfo,
5045     int filling_sinfo)
5046 {
5047 	/*
5048 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5049 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5050 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5051 	 * On the way out we may send out any combination of:
5052 	 * MSG_NOTIFICATION MSG_EOR
5053 	 *
5054 	 */
5055 	struct sctp_inpcb *inp = NULL;
5056 	int my_len = 0;
5057 	int cp_len = 0, error = 0;
5058 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5059 	struct mbuf *m = NULL;
5060 	struct sctp_tcb *stcb = NULL;
5061 	int wakeup_read_socket = 0;
5062 	int freecnt_applied = 0;
5063 	int out_flags = 0, in_flags = 0;
5064 	int block_allowed = 1;
5065 	uint32_t freed_so_far = 0;
5066 	uint32_t copied_so_far = 0;
5067 	int in_eeor_mode = 0;
5068 	int no_rcv_needed = 0;
5069 	uint32_t rwnd_req = 0;
5070 	int hold_sblock = 0;
5071 	int hold_rlock = 0;
5072 	int slen = 0;
5073 	uint32_t held_length = 0;
5074 	int sockbuf_lock = 0;
5075 
5076 	if (uio == NULL) {
5077 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5078 		return (EINVAL);
5079 	}
5080 	if (msg_flags) {
5081 		in_flags = *msg_flags;
5082 		if (in_flags & MSG_PEEK)
5083 			SCTP_STAT_INCR(sctps_read_peeks);
5084 	} else {
5085 		in_flags = 0;
5086 	}
5087 	slen = uio->uio_resid;
5088 
5089 	/* Pull in and set up our int flags */
5090 	if (in_flags & MSG_OOB) {
5091 		/* Out of band's NOT supported */
5092 		return (EOPNOTSUPP);
5093 	}
5094 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5095 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5096 		return (EINVAL);
5097 	}
5098 	if ((in_flags & (MSG_DONTWAIT
5099 	    | MSG_NBIO
5100 	    )) ||
5101 	    SCTP_SO_IS_NBIO(so)) {
5102 		block_allowed = 0;
5103 	}
5104 	/* setup the endpoint */
5105 	inp = (struct sctp_inpcb *)so->so_pcb;
5106 	if (inp == NULL) {
5107 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5108 		return (EFAULT);
5109 	}
5110 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5111 	/* Must be at least a MTU's worth */
5112 	if (rwnd_req < SCTP_MIN_RWND)
5113 		rwnd_req = SCTP_MIN_RWND;
5114 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5115 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5116 		sctp_misc_ints(SCTP_SORECV_ENTER,
5117 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5118 	}
5119 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5120 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5121 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5122 	}
5123 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5124 	sockbuf_lock = 1;
5125 	if (error) {
5126 		goto release_unlocked;
5127 	}
5128 restart:
5129 
5130 
5131 restart_nosblocks:
5132 	if (hold_sblock == 0) {
5133 		SOCKBUF_LOCK(&so->so_rcv);
5134 		hold_sblock = 1;
5135 	}
5136 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5137 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5138 		goto out;
5139 	}
5140 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5141 		if (so->so_error) {
5142 			error = so->so_error;
5143 			if ((in_flags & MSG_PEEK) == 0)
5144 				so->so_error = 0;
5145 			goto out;
5146 		} else {
5147 			if (so->so_rcv.sb_cc == 0) {
5148 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5149 				/* indicate EOF */
5150 				error = 0;
5151 				goto out;
5152 			}
5153 		}
5154 	}
5155 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5156 		/* we need to wait for data */
5157 		if ((so->so_rcv.sb_cc == 0) &&
5158 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5159 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5160 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5161 				/*
5162 				 * For active open side clear flags for
5163 				 * re-use passive open is blocked by
5164 				 * connect.
5165 				 */
5166 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5167 					/*
5168 					 * You were aborted, passive side
5169 					 * always hits here
5170 					 */
5171 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5172 					error = ECONNRESET;
5173 					/*
5174 					 * You get this once if you are
5175 					 * active open side
5176 					 */
5177 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5178 						/*
5179 						 * Remove flag if on the
5180 						 * active open side
5181 						 */
5182 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5183 					}
5184 				}
5185 				so->so_state &= ~(SS_ISCONNECTING |
5186 				    SS_ISDISCONNECTING |
5187 				    SS_ISCONFIRMING |
5188 				    SS_ISCONNECTED);
5189 				if (error == 0) {
5190 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5191 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5192 						error = ENOTCONN;
5193 					} else {
5194 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5195 					}
5196 				}
5197 				goto out;
5198 			}
5199 		}
5200 		error = sbwait(&so->so_rcv);
5201 		if (error) {
5202 			goto out;
5203 		}
5204 		held_length = 0;
5205 		goto restart_nosblocks;
5206 	} else if (so->so_rcv.sb_cc == 0) {
5207 		if (so->so_error) {
5208 			error = so->so_error;
5209 			if ((in_flags & MSG_PEEK) == 0)
5210 				so->so_error = 0;
5211 		} else {
5212 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5213 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5214 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5215 					/*
5216 					 * For active open side clear flags
5217 					 * for re-use passive open is
5218 					 * blocked by connect.
5219 					 */
5220 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5221 						/*
5222 						 * You were aborted, passive
5223 						 * side always hits here
5224 						 */
5225 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5226 						error = ECONNRESET;
5227 						/*
5228 						 * You get this once if you
5229 						 * are active open side
5230 						 */
5231 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5232 							/*
5233 							 * Remove flag if on
5234 							 * the active open
5235 							 * side
5236 							 */
5237 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5238 						}
5239 					}
5240 					so->so_state &= ~(SS_ISCONNECTING |
5241 					    SS_ISDISCONNECTING |
5242 					    SS_ISCONFIRMING |
5243 					    SS_ISCONNECTED);
5244 					if (error == 0) {
5245 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5246 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5247 							error = ENOTCONN;
5248 						} else {
5249 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5250 						}
5251 					}
5252 					goto out;
5253 				}
5254 			}
5255 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5256 			error = EWOULDBLOCK;
5257 		}
5258 		goto out;
5259 	}
5260 	if (hold_sblock == 1) {
5261 		SOCKBUF_UNLOCK(&so->so_rcv);
5262 		hold_sblock = 0;
5263 	}
5264 	/* we possibly have data we can read */
5265 	/* sa_ignore FREED_MEMORY */
5266 	control = TAILQ_FIRST(&inp->read_queue);
5267 	if (control == NULL) {
5268 		/*
5269 		 * This could be happening since the appender did the
5270 		 * increment but as not yet did the tailq insert onto the
5271 		 * read_queue
5272 		 */
5273 		if (hold_rlock == 0) {
5274 			SCTP_INP_READ_LOCK(inp);
5275 			hold_rlock = 1;
5276 		}
5277 		control = TAILQ_FIRST(&inp->read_queue);
5278 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5279 #ifdef INVARIANTS
5280 			panic("Huh, its non zero and nothing on control?");
5281 #endif
5282 			so->so_rcv.sb_cc = 0;
5283 		}
5284 		SCTP_INP_READ_UNLOCK(inp);
5285 		hold_rlock = 0;
5286 		goto restart;
5287 	}
5288 	if ((control->length == 0) &&
5289 	    (control->do_not_ref_stcb)) {
5290 		/*
5291 		 * Clean up code for freeing assoc that left behind a
5292 		 * pdapi.. maybe a peer in EEOR that just closed after
5293 		 * sending and never indicated a EOR.
5294 		 */
5295 		if (hold_rlock == 0) {
5296 			hold_rlock = 1;
5297 			SCTP_INP_READ_LOCK(inp);
5298 		}
5299 		control->held_length = 0;
5300 		if (control->data) {
5301 			/* Hmm there is data here .. fix */
5302 			struct mbuf *m_tmp;
5303 			int cnt = 0;
5304 
5305 			m_tmp = control->data;
5306 			while (m_tmp) {
5307 				cnt += SCTP_BUF_LEN(m_tmp);
5308 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5309 					control->tail_mbuf = m_tmp;
5310 					control->end_added = 1;
5311 				}
5312 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5313 			}
5314 			control->length = cnt;
5315 		} else {
5316 			/* remove it */
5317 			TAILQ_REMOVE(&inp->read_queue, control, next);
5318 			/* Add back any hiddend data */
5319 			sctp_free_remote_addr(control->whoFrom);
5320 			sctp_free_a_readq(stcb, control);
5321 		}
5322 		if (hold_rlock) {
5323 			hold_rlock = 0;
5324 			SCTP_INP_READ_UNLOCK(inp);
5325 		}
5326 		goto restart;
5327 	}
5328 	if ((control->length == 0) &&
5329 	    (control->end_added == 1)) {
5330 		/*
5331 		 * Do we also need to check for (control->pdapi_aborted ==
5332 		 * 1)?
5333 		 */
5334 		if (hold_rlock == 0) {
5335 			hold_rlock = 1;
5336 			SCTP_INP_READ_LOCK(inp);
5337 		}
5338 		TAILQ_REMOVE(&inp->read_queue, control, next);
5339 		if (control->data) {
5340 #ifdef INVARIANTS
5341 			panic("control->data not null but control->length == 0");
5342 #else
5343 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5344 			sctp_m_freem(control->data);
5345 			control->data = NULL;
5346 #endif
5347 		}
5348 		if (control->aux_data) {
5349 			sctp_m_free(control->aux_data);
5350 			control->aux_data = NULL;
5351 		}
5352 		sctp_free_remote_addr(control->whoFrom);
5353 		sctp_free_a_readq(stcb, control);
5354 		if (hold_rlock) {
5355 			hold_rlock = 0;
5356 			SCTP_INP_READ_UNLOCK(inp);
5357 		}
5358 		goto restart;
5359 	}
5360 	if (control->length == 0) {
5361 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5362 		    (filling_sinfo)) {
5363 			/* find a more suitable one then this */
5364 			ctl = TAILQ_NEXT(control, next);
5365 			while (ctl) {
5366 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5367 				    (ctl->some_taken ||
5368 				    (ctl->spec_flags & M_NOTIFICATION) ||
5369 				    ((ctl->do_not_ref_stcb == 0) &&
5370 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5371 				    ) {
5372 					/*-
5373 					 * If we have a different TCB next, and there is data
5374 					 * present. If we have already taken some (pdapi), OR we can
5375 					 * ref the tcb and no delivery as started on this stream, we
5376 					 * take it. Note we allow a notification on a different
5377 					 * assoc to be delivered..
5378 					 */
5379 					control = ctl;
5380 					goto found_one;
5381 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5382 					    (ctl->length) &&
5383 					    ((ctl->some_taken) ||
5384 					    ((ctl->do_not_ref_stcb == 0) &&
5385 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5386 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5387 					/*-
5388 					 * If we have the same tcb, and there is data present, and we
5389 					 * have the strm interleave feature present. Then if we have
5390 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5391 					 * not started a delivery for this stream, we can take it.
5392 					 * Note we do NOT allow a notificaiton on the same assoc to
5393 					 * be delivered.
5394 					 */
5395 					control = ctl;
5396 					goto found_one;
5397 				}
5398 				ctl = TAILQ_NEXT(ctl, next);
5399 			}
5400 		}
5401 		/*
5402 		 * if we reach here, not suitable replacement is available
5403 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5404 		 * into the our held count, and its time to sleep again.
5405 		 */
5406 		held_length = so->so_rcv.sb_cc;
5407 		control->held_length = so->so_rcv.sb_cc;
5408 		goto restart;
5409 	}
5410 	/* Clear the held length since there is something to read */
5411 	control->held_length = 0;
5412 	if (hold_rlock) {
5413 		SCTP_INP_READ_UNLOCK(inp);
5414 		hold_rlock = 0;
5415 	}
5416 found_one:
5417 	/*
5418 	 * If we reach here, control has a some data for us to read off.
5419 	 * Note that stcb COULD be NULL.
5420 	 */
5421 	control->some_taken++;
5422 	if (hold_sblock) {
5423 		SOCKBUF_UNLOCK(&so->so_rcv);
5424 		hold_sblock = 0;
5425 	}
5426 	stcb = control->stcb;
5427 	if (stcb) {
5428 		if ((control->do_not_ref_stcb == 0) &&
5429 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5430 			if (freecnt_applied == 0)
5431 				stcb = NULL;
5432 		} else if (control->do_not_ref_stcb == 0) {
5433 			/* you can't free it on me please */
5434 			/*
5435 			 * The lock on the socket buffer protects us so the
5436 			 * free code will stop. But since we used the
5437 			 * socketbuf lock and the sender uses the tcb_lock
5438 			 * to increment, we need to use the atomic add to
5439 			 * the refcnt
5440 			 */
5441 			if (freecnt_applied) {
5442 #ifdef INVARIANTS
5443 				panic("refcnt already incremented");
5444 #else
5445 				printf("refcnt already incremented?\n");
5446 #endif
5447 			} else {
5448 				atomic_add_int(&stcb->asoc.refcnt, 1);
5449 				freecnt_applied = 1;
5450 			}
5451 			/*
5452 			 * Setup to remember how much we have not yet told
5453 			 * the peer our rwnd has opened up. Note we grab the
5454 			 * value from the tcb from last time. Note too that
5455 			 * sack sending clears this when a sack is sent,
5456 			 * which is fine. Once we hit the rwnd_req, we then
5457 			 * will go to the sctp_user_rcvd() that will not
5458 			 * lock until it KNOWs it MUST send a WUP-SACK.
5459 			 */
5460 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5461 			stcb->freed_by_sorcv_sincelast = 0;
5462 		}
5463 	}
5464 	if (stcb &&
5465 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5466 	    control->do_not_ref_stcb == 0) {
5467 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5468 	}
5469 	/* First lets get off the sinfo and sockaddr info */
5470 	if ((sinfo) && filling_sinfo) {
5471 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5472 		nxt = TAILQ_NEXT(control, next);
5473 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5474 			struct sctp_extrcvinfo *s_extra;
5475 
5476 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5477 			if ((nxt) &&
5478 			    (nxt->length)) {
5479 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5480 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5481 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5482 				}
5483 				if (nxt->spec_flags & M_NOTIFICATION) {
5484 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5485 				}
5486 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5487 				s_extra->sreinfo_next_length = nxt->length;
5488 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5489 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5490 				if (nxt->tail_mbuf != NULL) {
5491 					if (nxt->end_added) {
5492 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5493 					}
5494 				}
5495 			} else {
5496 				/*
5497 				 * we explicitly 0 this, since the memcpy
5498 				 * got some other things beyond the older
5499 				 * sinfo_ that is on the control's structure
5500 				 * :-D
5501 				 */
5502 				nxt = NULL;
5503 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5504 				s_extra->sreinfo_next_aid = 0;
5505 				s_extra->sreinfo_next_length = 0;
5506 				s_extra->sreinfo_next_ppid = 0;
5507 				s_extra->sreinfo_next_stream = 0;
5508 			}
5509 		}
5510 		/*
5511 		 * update off the real current cum-ack, if we have an stcb.
5512 		 */
5513 		if ((control->do_not_ref_stcb == 0) && stcb)
5514 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5515 		/*
5516 		 * mask off the high bits, we keep the actual chunk bits in
5517 		 * there.
5518 		 */
5519 		sinfo->sinfo_flags &= 0x00ff;
5520 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5521 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5522 		}
5523 	}
5524 #ifdef SCTP_ASOCLOG_OF_TSNS
5525 	{
5526 		int index, newindex;
5527 		struct sctp_pcbtsn_rlog *entry;
5528 
5529 		do {
5530 			index = inp->readlog_index;
5531 			newindex = index + 1;
5532 			if (newindex >= SCTP_READ_LOG_SIZE) {
5533 				newindex = 0;
5534 			}
5535 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5536 		entry = &inp->readlog[index];
5537 		entry->vtag = control->sinfo_assoc_id;
5538 		entry->strm = control->sinfo_stream;
5539 		entry->seq = control->sinfo_ssn;
5540 		entry->sz = control->length;
5541 		entry->flgs = control->sinfo_flags;
5542 	}
5543 #endif
5544 	if (fromlen && from) {
5545 		struct sockaddr *to;
5546 
5547 #ifdef INET
5548 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5549 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5550 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5551 #else
5552 		/* No AF_INET use AF_INET6 */
5553 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5554 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5555 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5556 #endif
5557 
5558 		to = from;
5559 #if defined(INET) && defined(INET6)
5560 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5561 		    (to->sa_family == AF_INET) &&
5562 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5563 			struct sockaddr_in *sin;
5564 			struct sockaddr_in6 sin6;
5565 
5566 			sin = (struct sockaddr_in *)to;
5567 			bzero(&sin6, sizeof(sin6));
5568 			sin6.sin6_family = AF_INET6;
5569 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5570 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5571 			bcopy(&sin->sin_addr,
5572 			    &sin6.sin6_addr.s6_addr32[3],
5573 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5574 			sin6.sin6_port = sin->sin_port;
5575 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5576 		}
5577 #endif
5578 #if defined(INET6)
5579 		{
5580 			struct sockaddr_in6 lsa6, *to6;
5581 
5582 			to6 = (struct sockaddr_in6 *)to;
5583 			sctp_recover_scope_mac(to6, (&lsa6));
5584 		}
5585 #endif
5586 	}
5587 	/* now copy out what data we can */
5588 	if (mp == NULL) {
5589 		/* copy out each mbuf in the chain up to length */
5590 get_more_data:
5591 		m = control->data;
5592 		while (m) {
5593 			/* Move out all we can */
5594 			cp_len = (int)uio->uio_resid;
5595 			my_len = (int)SCTP_BUF_LEN(m);
5596 			if (cp_len > my_len) {
5597 				/* not enough in this buf */
5598 				cp_len = my_len;
5599 			}
5600 			if (hold_rlock) {
5601 				SCTP_INP_READ_UNLOCK(inp);
5602 				hold_rlock = 0;
5603 			}
5604 			if (cp_len > 0)
5605 				error = uiomove(mtod(m, char *), cp_len, uio);
5606 			/* re-read */
5607 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5608 				goto release;
5609 			}
5610 			if ((control->do_not_ref_stcb == 0) && stcb &&
5611 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5612 				no_rcv_needed = 1;
5613 			}
5614 			if (error) {
5615 				/* error we are out of here */
5616 				goto release;
5617 			}
5618 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5619 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5620 			    ((control->end_added == 0) ||
5621 			    (control->end_added &&
5622 			    (TAILQ_NEXT(control, next) == NULL)))
5623 			    ) {
5624 				SCTP_INP_READ_LOCK(inp);
5625 				hold_rlock = 1;
5626 			}
5627 			if (cp_len == SCTP_BUF_LEN(m)) {
5628 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5629 				    (control->end_added)) {
5630 					out_flags |= MSG_EOR;
5631 					if ((control->do_not_ref_stcb == 0) &&
5632 					    (control->stcb != NULL) &&
5633 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5634 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5635 				}
5636 				if (control->spec_flags & M_NOTIFICATION) {
5637 					out_flags |= MSG_NOTIFICATION;
5638 				}
5639 				/* we ate up the mbuf */
5640 				if (in_flags & MSG_PEEK) {
5641 					/* just looking */
5642 					m = SCTP_BUF_NEXT(m);
5643 					copied_so_far += cp_len;
5644 				} else {
5645 					/* dispose of the mbuf */
5646 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5647 						sctp_sblog(&so->so_rcv,
5648 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5649 					}
5650 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5651 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5652 						sctp_sblog(&so->so_rcv,
5653 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5654 					}
5655 					copied_so_far += cp_len;
5656 					freed_so_far += cp_len;
5657 					freed_so_far += MSIZE;
5658 					atomic_subtract_int(&control->length, cp_len);
5659 					control->data = sctp_m_free(m);
5660 					m = control->data;
5661 					/*
5662 					 * been through it all, must hold sb
5663 					 * lock ok to null tail
5664 					 */
5665 					if (control->data == NULL) {
5666 #ifdef INVARIANTS
5667 						if ((control->end_added == 0) ||
5668 						    (TAILQ_NEXT(control, next) == NULL)) {
5669 							/*
5670 							 * If the end is not
5671 							 * added, OR the
5672 							 * next is NOT null
5673 							 * we MUST have the
5674 							 * lock.
5675 							 */
5676 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5677 								panic("Hmm we don't own the lock?");
5678 							}
5679 						}
5680 #endif
5681 						control->tail_mbuf = NULL;
5682 #ifdef INVARIANTS
5683 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5684 							panic("end_added, nothing left and no MSG_EOR");
5685 						}
5686 #endif
5687 					}
5688 				}
5689 			} else {
5690 				/* Do we need to trim the mbuf? */
5691 				if (control->spec_flags & M_NOTIFICATION) {
5692 					out_flags |= MSG_NOTIFICATION;
5693 				}
5694 				if ((in_flags & MSG_PEEK) == 0) {
5695 					SCTP_BUF_RESV_UF(m, cp_len);
5696 					SCTP_BUF_LEN(m) -= cp_len;
5697 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5698 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5699 					}
5700 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5701 					if ((control->do_not_ref_stcb == 0) &&
5702 					    stcb) {
5703 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5704 					}
5705 					copied_so_far += cp_len;
5706 					freed_so_far += cp_len;
5707 					freed_so_far += MSIZE;
5708 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5709 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5710 						    SCTP_LOG_SBRESULT, 0);
5711 					}
5712 					atomic_subtract_int(&control->length, cp_len);
5713 				} else {
5714 					copied_so_far += cp_len;
5715 				}
5716 			}
5717 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5718 				break;
5719 			}
5720 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5721 			    (control->do_not_ref_stcb == 0) &&
5722 			    (freed_so_far >= rwnd_req)) {
5723 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5724 			}
5725 		}		/* end while(m) */
5726 		/*
5727 		 * At this point we have looked at it all and we either have
5728 		 * a MSG_EOR/or read all the user wants... <OR>
5729 		 * control->length == 0.
5730 		 */
5731 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5732 			/* we are done with this control */
5733 			if (control->length == 0) {
5734 				if (control->data) {
5735 #ifdef INVARIANTS
5736 					panic("control->data not null at read eor?");
5737 #else
5738 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5739 					sctp_m_freem(control->data);
5740 					control->data = NULL;
5741 #endif
5742 				}
5743 		done_with_control:
5744 				if (TAILQ_NEXT(control, next) == NULL) {
5745 					/*
5746 					 * If we don't have a next we need a
5747 					 * lock, if there is a next
5748 					 * interrupt is filling ahead of us
5749 					 * and we don't need a lock to
5750 					 * remove this guy (which is the
5751 					 * head of the queue).
5752 					 */
5753 					if (hold_rlock == 0) {
5754 						SCTP_INP_READ_LOCK(inp);
5755 						hold_rlock = 1;
5756 					}
5757 				}
5758 				TAILQ_REMOVE(&inp->read_queue, control, next);
5759 				/* Add back any hiddend data */
5760 				if (control->held_length) {
5761 					held_length = 0;
5762 					control->held_length = 0;
5763 					wakeup_read_socket = 1;
5764 				}
5765 				if (control->aux_data) {
5766 					sctp_m_free(control->aux_data);
5767 					control->aux_data = NULL;
5768 				}
5769 				no_rcv_needed = control->do_not_ref_stcb;
5770 				sctp_free_remote_addr(control->whoFrom);
5771 				control->data = NULL;
5772 				sctp_free_a_readq(stcb, control);
5773 				control = NULL;
5774 				if ((freed_so_far >= rwnd_req) &&
5775 				    (no_rcv_needed == 0))
5776 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5777 
5778 			} else {
5779 				/*
5780 				 * The user did not read all of this
5781 				 * message, turn off the returned MSG_EOR
5782 				 * since we are leaving more behind on the
5783 				 * control to read.
5784 				 */
5785 #ifdef INVARIANTS
5786 				if (control->end_added &&
5787 				    (control->data == NULL) &&
5788 				    (control->tail_mbuf == NULL)) {
5789 					panic("Gak, control->length is corrupt?");
5790 				}
5791 #endif
5792 				no_rcv_needed = control->do_not_ref_stcb;
5793 				out_flags &= ~MSG_EOR;
5794 			}
5795 		}
5796 		if (out_flags & MSG_EOR) {
5797 			goto release;
5798 		}
5799 		if ((uio->uio_resid == 0) ||
5800 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5801 		    ) {
5802 			goto release;
5803 		}
5804 		/*
5805 		 * If I hit here the receiver wants more and this message is
5806 		 * NOT done (pd-api). So two questions. Can we block? if not
5807 		 * we are done. Did the user NOT set MSG_WAITALL?
5808 		 */
5809 		if (block_allowed == 0) {
5810 			goto release;
5811 		}
5812 		/*
5813 		 * We need to wait for more data a few things: - We don't
5814 		 * sbunlock() so we don't get someone else reading. - We
5815 		 * must be sure to account for the case where what is added
5816 		 * is NOT to our control when we wakeup.
5817 		 */
5818 
5819 		/*
5820 		 * Do we need to tell the transport a rwnd update might be
5821 		 * needed before we go to sleep?
5822 		 */
5823 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5824 		    ((freed_so_far >= rwnd_req) &&
5825 		    (control->do_not_ref_stcb == 0) &&
5826 		    (no_rcv_needed == 0))) {
5827 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5828 		}
5829 wait_some_more:
5830 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5831 			goto release;
5832 		}
5833 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5834 			goto release;
5835 
5836 		if (hold_rlock == 1) {
5837 			SCTP_INP_READ_UNLOCK(inp);
5838 			hold_rlock = 0;
5839 		}
5840 		if (hold_sblock == 0) {
5841 			SOCKBUF_LOCK(&so->so_rcv);
5842 			hold_sblock = 1;
5843 		}
5844 		if ((copied_so_far) && (control->length == 0) &&
5845 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5846 			goto release;
5847 		}
5848 		if (so->so_rcv.sb_cc <= control->held_length) {
5849 			error = sbwait(&so->so_rcv);
5850 			if (error) {
5851 				goto release;
5852 			}
5853 			control->held_length = 0;
5854 		}
5855 		if (hold_sblock) {
5856 			SOCKBUF_UNLOCK(&so->so_rcv);
5857 			hold_sblock = 0;
5858 		}
5859 		if (control->length == 0) {
5860 			/* still nothing here */
5861 			if (control->end_added == 1) {
5862 				/* he aborted, or is done i.e.did a shutdown */
5863 				out_flags |= MSG_EOR;
5864 				if (control->pdapi_aborted) {
5865 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5866 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5867 
5868 					out_flags |= MSG_TRUNC;
5869 				} else {
5870 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5871 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5872 				}
5873 				goto done_with_control;
5874 			}
5875 			if (so->so_rcv.sb_cc > held_length) {
5876 				control->held_length = so->so_rcv.sb_cc;
5877 				held_length = 0;
5878 			}
5879 			goto wait_some_more;
5880 		} else if (control->data == NULL) {
5881 			/*
5882 			 * we must re-sync since data is probably being
5883 			 * added
5884 			 */
5885 			SCTP_INP_READ_LOCK(inp);
5886 			if ((control->length > 0) && (control->data == NULL)) {
5887 				/*
5888 				 * big trouble.. we have the lock and its
5889 				 * corrupt?
5890 				 */
5891 #ifdef INVARIANTS
5892 				panic("Impossible data==NULL length !=0");
5893 #endif
5894 				out_flags |= MSG_EOR;
5895 				out_flags |= MSG_TRUNC;
5896 				control->length = 0;
5897 				SCTP_INP_READ_UNLOCK(inp);
5898 				goto done_with_control;
5899 			}
5900 			SCTP_INP_READ_UNLOCK(inp);
5901 			/* We will fall around to get more data */
5902 		}
5903 		goto get_more_data;
5904 	} else {
5905 		/*-
5906 		 * Give caller back the mbuf chain,
5907 		 * store in uio_resid the length
5908 		 */
5909 		wakeup_read_socket = 0;
5910 		if ((control->end_added == 0) ||
5911 		    (TAILQ_NEXT(control, next) == NULL)) {
5912 			/* Need to get rlock */
5913 			if (hold_rlock == 0) {
5914 				SCTP_INP_READ_LOCK(inp);
5915 				hold_rlock = 1;
5916 			}
5917 		}
5918 		if (control->end_added) {
5919 			out_flags |= MSG_EOR;
5920 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5921 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5922 		}
5923 		if (control->spec_flags & M_NOTIFICATION) {
5924 			out_flags |= MSG_NOTIFICATION;
5925 		}
5926 		uio->uio_resid = control->length;
5927 		*mp = control->data;
5928 		m = control->data;
5929 		while (m) {
5930 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5931 				sctp_sblog(&so->so_rcv,
5932 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5933 			}
5934 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5935 			freed_so_far += SCTP_BUF_LEN(m);
5936 			freed_so_far += MSIZE;
5937 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5938 				sctp_sblog(&so->so_rcv,
5939 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5940 			}
5941 			m = SCTP_BUF_NEXT(m);
5942 		}
5943 		control->data = control->tail_mbuf = NULL;
5944 		control->length = 0;
5945 		if (out_flags & MSG_EOR) {
5946 			/* Done with this control */
5947 			goto done_with_control;
5948 		}
5949 	}
5950 release:
5951 	if (hold_rlock == 1) {
5952 		SCTP_INP_READ_UNLOCK(inp);
5953 		hold_rlock = 0;
5954 	}
5955 	if (hold_sblock == 1) {
5956 		SOCKBUF_UNLOCK(&so->so_rcv);
5957 		hold_sblock = 0;
5958 	}
5959 	sbunlock(&so->so_rcv);
5960 	sockbuf_lock = 0;
5961 
5962 release_unlocked:
5963 	if (hold_sblock) {
5964 		SOCKBUF_UNLOCK(&so->so_rcv);
5965 		hold_sblock = 0;
5966 	}
5967 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5968 		if ((freed_so_far >= rwnd_req) &&
5969 		    (control && (control->do_not_ref_stcb == 0)) &&
5970 		    (no_rcv_needed == 0))
5971 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5972 	}
5973 out:
5974 	if (msg_flags) {
5975 		*msg_flags = out_flags;
5976 	}
5977 	if (((out_flags & MSG_EOR) == 0) &&
5978 	    ((in_flags & MSG_PEEK) == 0) &&
5979 	    (sinfo) &&
5980 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5981 		struct sctp_extrcvinfo *s_extra;
5982 
5983 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5984 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5985 	}
5986 	if (hold_rlock == 1) {
5987 		SCTP_INP_READ_UNLOCK(inp);
5988 		hold_rlock = 0;
5989 	}
5990 	if (hold_sblock) {
5991 		SOCKBUF_UNLOCK(&so->so_rcv);
5992 		hold_sblock = 0;
5993 	}
5994 	if (sockbuf_lock) {
5995 		sbunlock(&so->so_rcv);
5996 	}
5997 	if (freecnt_applied) {
5998 		/*
5999 		 * The lock on the socket buffer protects us so the free
6000 		 * code will stop. But since we used the socketbuf lock and
6001 		 * the sender uses the tcb_lock to increment, we need to use
6002 		 * the atomic add to the refcnt.
6003 		 */
6004 		if (stcb == NULL) {
6005 #ifdef INVARIANTS
6006 			panic("stcb for refcnt has gone NULL?");
6007 			goto stage_left;
6008 #else
6009 			goto stage_left;
6010 #endif
6011 		}
6012 		atomic_add_int(&stcb->asoc.refcnt, -1);
6013 		freecnt_applied = 0;
6014 		/* Save the value back for next time */
6015 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6016 	}
6017 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6018 		if (stcb) {
6019 			sctp_misc_ints(SCTP_SORECV_DONE,
6020 			    freed_so_far,
6021 			    ((uio) ? (slen - uio->uio_resid) : slen),
6022 			    stcb->asoc.my_rwnd,
6023 			    so->so_rcv.sb_cc);
6024 		} else {
6025 			sctp_misc_ints(SCTP_SORECV_DONE,
6026 			    freed_so_far,
6027 			    ((uio) ? (slen - uio->uio_resid) : slen),
6028 			    0,
6029 			    so->so_rcv.sb_cc);
6030 		}
6031 	}
6032 stage_left:
6033 	if (wakeup_read_socket) {
6034 		sctp_sorwakeup(inp, so);
6035 	}
6036 	return (error);
6037 }
6038 
6039 
6040 #ifdef SCTP_MBUF_LOGGING
6041 struct mbuf *
6042 sctp_m_free(struct mbuf *m)
6043 {
6044 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6045 		if (SCTP_BUF_IS_EXTENDED(m)) {
6046 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6047 		}
6048 	}
6049 	return (m_free(m));
6050 }
6051 
6052 void
6053 sctp_m_freem(struct mbuf *mb)
6054 {
6055 	while (mb != NULL)
6056 		mb = sctp_m_free(mb);
6057 }
6058 
6059 #endif
6060 
6061 int
6062 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6063 {
6064 	/*
6065 	 * Given a local address. For all associations that holds the
6066 	 * address, request a peer-set-primary.
6067 	 */
6068 	struct sctp_ifa *ifa;
6069 	struct sctp_laddr *wi;
6070 
6071 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6072 	if (ifa == NULL) {
6073 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6074 		return (EADDRNOTAVAIL);
6075 	}
6076 	/*
6077 	 * Now that we have the ifa we must awaken the iterator with this
6078 	 * message.
6079 	 */
6080 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6081 	if (wi == NULL) {
6082 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6083 		return (ENOMEM);
6084 	}
6085 	/* Now incr the count and int wi structure */
6086 	SCTP_INCR_LADDR_COUNT();
6087 	bzero(wi, sizeof(*wi));
6088 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6089 	wi->ifa = ifa;
6090 	wi->action = SCTP_SET_PRIM_ADDR;
6091 	atomic_add_int(&ifa->refcount, 1);
6092 
6093 	/* Now add it to the work queue */
6094 	SCTP_WQ_ADDR_LOCK();
6095 	/*
6096 	 * Should this really be a tailq? As it is we will process the
6097 	 * newest first :-0
6098 	 */
6099 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6100 	SCTP_WQ_ADDR_UNLOCK();
6101 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6102 	    (struct sctp_inpcb *)NULL,
6103 	    (struct sctp_tcb *)NULL,
6104 	    (struct sctp_nets *)NULL);
6105 	return (0);
6106 }
6107 
6108 
6109 int
6110 sctp_soreceive(struct socket *so,
6111     struct sockaddr **psa,
6112     struct uio *uio,
6113     struct mbuf **mp0,
6114     struct mbuf **controlp,
6115     int *flagsp)
6116 {
6117 	int error, fromlen;
6118 	uint8_t sockbuf[256];
6119 	struct sockaddr *from;
6120 	struct sctp_extrcvinfo sinfo;
6121 	int filling_sinfo = 1;
6122 	struct sctp_inpcb *inp;
6123 
6124 	inp = (struct sctp_inpcb *)so->so_pcb;
6125 	/* pickup the assoc we are reading from */
6126 	if (inp == NULL) {
6127 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6128 		return (EINVAL);
6129 	}
6130 	if ((sctp_is_feature_off(inp,
6131 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6132 	    (controlp == NULL)) {
6133 		/* user does not want the sndrcv ctl */
6134 		filling_sinfo = 0;
6135 	}
6136 	if (psa) {
6137 		from = (struct sockaddr *)sockbuf;
6138 		fromlen = sizeof(sockbuf);
6139 		from->sa_len = 0;
6140 	} else {
6141 		from = NULL;
6142 		fromlen = 0;
6143 	}
6144 
6145 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6146 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6147 	if ((controlp) && (filling_sinfo)) {
6148 		/* copy back the sinfo in a CMSG format */
6149 		if (filling_sinfo)
6150 			*controlp = sctp_build_ctl_nchunk(inp,
6151 			    (struct sctp_sndrcvinfo *)&sinfo);
6152 		else
6153 			*controlp = NULL;
6154 	}
6155 	if (psa) {
6156 		/* copy back the address info */
6157 		if (from && from->sa_len) {
6158 			*psa = sodupsockaddr(from, M_NOWAIT);
6159 		} else {
6160 			*psa = NULL;
6161 		}
6162 	}
6163 	return (error);
6164 }
6165 
6166 
6167 int
6168 sctp_l_soreceive(struct socket *so,
6169     struct sockaddr **name,
6170     struct uio *uio,
6171     char **controlp,
6172     int *controllen,
6173     int *flag)
6174 {
6175 	int error, fromlen;
6176 	uint8_t sockbuf[256];
6177 	struct sockaddr *from;
6178 	struct sctp_extrcvinfo sinfo;
6179 	int filling_sinfo = 1;
6180 	struct sctp_inpcb *inp;
6181 
6182 	inp = (struct sctp_inpcb *)so->so_pcb;
6183 	/* pickup the assoc we are reading from */
6184 	if (inp == NULL) {
6185 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6186 		return (EINVAL);
6187 	}
6188 	if ((sctp_is_feature_off(inp,
6189 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6190 	    (controlp == NULL)) {
6191 		/* user does not want the sndrcv ctl */
6192 		filling_sinfo = 0;
6193 	}
6194 	if (name) {
6195 		from = (struct sockaddr *)sockbuf;
6196 		fromlen = sizeof(sockbuf);
6197 		from->sa_len = 0;
6198 	} else {
6199 		from = NULL;
6200 		fromlen = 0;
6201 	}
6202 
6203 	error = sctp_sorecvmsg(so, uio,
6204 	    (struct mbuf **)NULL,
6205 	    from, fromlen, flag,
6206 	    (struct sctp_sndrcvinfo *)&sinfo,
6207 	    filling_sinfo);
6208 	if ((controlp) && (filling_sinfo)) {
6209 		/*
6210 		 * copy back the sinfo in a CMSG format note that the caller
6211 		 * has reponsibility for freeing the memory.
6212 		 */
6213 		if (filling_sinfo)
6214 			*controlp = sctp_build_ctl_cchunk(inp,
6215 			    controllen,
6216 			    (struct sctp_sndrcvinfo *)&sinfo);
6217 	}
6218 	if (name) {
6219 		/* copy back the address info */
6220 		if (from && from->sa_len) {
6221 			*name = sodupsockaddr(from, M_WAIT);
6222 		} else {
6223 			*name = NULL;
6224 		}
6225 	}
6226 	return (error);
6227 }
6228 
6229 
6230 
6231 
6232 
6233 
6234 
6235 int
6236 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6237     int totaddr, int *error)
6238 {
6239 	int added = 0;
6240 	int i;
6241 	struct sctp_inpcb *inp;
6242 	struct sockaddr *sa;
6243 	size_t incr = 0;
6244 
6245 	sa = addr;
6246 	inp = stcb->sctp_ep;
6247 	*error = 0;
6248 	for (i = 0; i < totaddr; i++) {
6249 		if (sa->sa_family == AF_INET) {
6250 			incr = sizeof(struct sockaddr_in);
6251 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6252 				/* assoc gone no un-lock */
6253 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6254 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6255 				*error = ENOBUFS;
6256 				goto out_now;
6257 			}
6258 			added++;
6259 		} else if (sa->sa_family == AF_INET6) {
6260 			incr = sizeof(struct sockaddr_in6);
6261 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6262 				/* assoc gone no un-lock */
6263 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6264 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6265 				*error = ENOBUFS;
6266 				goto out_now;
6267 			}
6268 			added++;
6269 		}
6270 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6271 	}
6272 out_now:
6273 	return (added);
6274 }
6275 
6276 struct sctp_tcb *
6277 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6278     int *totaddr, int *num_v4, int *num_v6, int *error,
6279     int limit, int *bad_addr)
6280 {
6281 	struct sockaddr *sa;
6282 	struct sctp_tcb *stcb = NULL;
6283 	size_t incr, at, i;
6284 
6285 	at = incr = 0;
6286 	sa = addr;
6287 	*error = *num_v6 = *num_v4 = 0;
6288 	/* account and validate addresses */
6289 	for (i = 0; i < (size_t)*totaddr; i++) {
6290 		if (sa->sa_family == AF_INET) {
6291 			(*num_v4) += 1;
6292 			incr = sizeof(struct sockaddr_in);
6293 			if (sa->sa_len != incr) {
6294 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6295 				*error = EINVAL;
6296 				*bad_addr = 1;
6297 				return (NULL);
6298 			}
6299 		} else if (sa->sa_family == AF_INET6) {
6300 			struct sockaddr_in6 *sin6;
6301 
6302 			sin6 = (struct sockaddr_in6 *)sa;
6303 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6304 				/* Must be non-mapped for connectx */
6305 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6306 				*error = EINVAL;
6307 				*bad_addr = 1;
6308 				return (NULL);
6309 			}
6310 			(*num_v6) += 1;
6311 			incr = sizeof(struct sockaddr_in6);
6312 			if (sa->sa_len != incr) {
6313 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6314 				*error = EINVAL;
6315 				*bad_addr = 1;
6316 				return (NULL);
6317 			}
6318 		} else {
6319 			*totaddr = i;
6320 			/* we are done */
6321 			break;
6322 		}
6323 		SCTP_INP_INCR_REF(inp);
6324 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6325 		if (stcb != NULL) {
6326 			/* Already have or am bring up an association */
6327 			return (stcb);
6328 		} else {
6329 			SCTP_INP_DECR_REF(inp);
6330 		}
6331 		if ((at + incr) > (size_t)limit) {
6332 			*totaddr = i;
6333 			break;
6334 		}
6335 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6336 	}
6337 	return ((struct sctp_tcb *)NULL);
6338 }
6339 
6340 /*
6341  * sctp_bindx(ADD) for one address.
6342  * assumes all arguments are valid/checked by caller.
6343  */
6344 void
6345 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6346     struct sockaddr *sa, sctp_assoc_t assoc_id,
6347     uint32_t vrf_id, int *error, void *p)
6348 {
6349 	struct sockaddr *addr_touse;
6350 
6351 #ifdef INET6
6352 	struct sockaddr_in sin;
6353 
6354 #endif
6355 
6356 	/* see if we're bound all already! */
6357 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6358 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6359 		*error = EINVAL;
6360 		return;
6361 	}
6362 	addr_touse = sa;
6363 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6364 	if (sa->sa_family == AF_INET6) {
6365 		struct sockaddr_in6 *sin6;
6366 
6367 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6368 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6369 			*error = EINVAL;
6370 			return;
6371 		}
6372 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6373 			/* can only bind v6 on PF_INET6 sockets */
6374 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6375 			*error = EINVAL;
6376 			return;
6377 		}
6378 		sin6 = (struct sockaddr_in6 *)addr_touse;
6379 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6380 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6381 			    SCTP_IPV6_V6ONLY(inp)) {
6382 				/* can't bind v4-mapped on PF_INET sockets */
6383 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6384 				*error = EINVAL;
6385 				return;
6386 			}
6387 			in6_sin6_2_sin(&sin, sin6);
6388 			addr_touse = (struct sockaddr *)&sin;
6389 		}
6390 	}
6391 #endif
6392 	if (sa->sa_family == AF_INET) {
6393 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6394 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6395 			*error = EINVAL;
6396 			return;
6397 		}
6398 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6399 		    SCTP_IPV6_V6ONLY(inp)) {
6400 			/* can't bind v4 on PF_INET sockets */
6401 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6402 			*error = EINVAL;
6403 			return;
6404 		}
6405 	}
6406 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6407 		if (p == NULL) {
6408 			/* Can't get proc for Net/Open BSD */
6409 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6410 			*error = EINVAL;
6411 			return;
6412 		}
6413 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6414 		return;
6415 	}
6416 	/*
6417 	 * No locks required here since bind and mgmt_ep_sa all do their own
6418 	 * locking. If we do something for the FIX: below we may need to
6419 	 * lock in that case.
6420 	 */
6421 	if (assoc_id == 0) {
6422 		/* add the address */
6423 		struct sctp_inpcb *lep;
6424 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6425 
6426 		/* validate the incoming port */
6427 		if ((lsin->sin_port != 0) &&
6428 		    (lsin->sin_port != inp->sctp_lport)) {
6429 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6430 			*error = EINVAL;
6431 			return;
6432 		} else {
6433 			/* user specified 0 port, set it to existing port */
6434 			lsin->sin_port = inp->sctp_lport;
6435 		}
6436 
6437 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6438 		if (lep != NULL) {
6439 			/*
6440 			 * We must decrement the refcount since we have the
6441 			 * ep already and are binding. No remove going on
6442 			 * here.
6443 			 */
6444 			SCTP_INP_DECR_REF(lep);
6445 		}
6446 		if (lep == inp) {
6447 			/* already bound to it.. ok */
6448 			return;
6449 		} else if (lep == NULL) {
6450 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6451 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6452 			    SCTP_ADD_IP_ADDRESS,
6453 			    vrf_id, NULL);
6454 		} else {
6455 			*error = EADDRINUSE;
6456 		}
6457 		if (*error)
6458 			return;
6459 	} else {
6460 		/*
6461 		 * FIX: decide whether we allow assoc based bindx
6462 		 */
6463 	}
6464 }
6465 
6466 /*
6467  * sctp_bindx(DELETE) for one address.
6468  * assumes all arguments are valid/checked by caller.
6469  */
6470 void
6471 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6472     struct sockaddr *sa, sctp_assoc_t assoc_id,
6473     uint32_t vrf_id, int *error)
6474 {
6475 	struct sockaddr *addr_touse;
6476 
6477 #ifdef INET6
6478 	struct sockaddr_in sin;
6479 
6480 #endif
6481 
6482 	/* see if we're bound all already! */
6483 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6484 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6485 		*error = EINVAL;
6486 		return;
6487 	}
6488 	addr_touse = sa;
6489 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6490 	if (sa->sa_family == AF_INET6) {
6491 		struct sockaddr_in6 *sin6;
6492 
6493 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6494 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6495 			*error = EINVAL;
6496 			return;
6497 		}
6498 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6499 			/* can only bind v6 on PF_INET6 sockets */
6500 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6501 			*error = EINVAL;
6502 			return;
6503 		}
6504 		sin6 = (struct sockaddr_in6 *)addr_touse;
6505 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6506 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6507 			    SCTP_IPV6_V6ONLY(inp)) {
6508 				/* can't bind mapped-v4 on PF_INET sockets */
6509 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6510 				*error = EINVAL;
6511 				return;
6512 			}
6513 			in6_sin6_2_sin(&sin, sin6);
6514 			addr_touse = (struct sockaddr *)&sin;
6515 		}
6516 	}
6517 #endif
6518 	if (sa->sa_family == AF_INET) {
6519 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6520 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6521 			*error = EINVAL;
6522 			return;
6523 		}
6524 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6525 		    SCTP_IPV6_V6ONLY(inp)) {
6526 			/* can't bind v4 on PF_INET sockets */
6527 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6528 			*error = EINVAL;
6529 			return;
6530 		}
6531 	}
6532 	/*
6533 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6534 	 * below is ever changed we may need to lock before calling
6535 	 * association level binding.
6536 	 */
6537 	if (assoc_id == 0) {
6538 		/* delete the address */
6539 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6540 		    SCTP_DEL_IP_ADDRESS,
6541 		    vrf_id, NULL);
6542 	} else {
6543 		/*
6544 		 * FIX: decide whether we allow assoc based bindx
6545 		 */
6546 	}
6547 }
6548 
6549 /*
6550  * returns the valid local address count for an assoc, taking into account
6551  * all scoping rules
6552  */
6553 int
6554 sctp_local_addr_count(struct sctp_tcb *stcb)
6555 {
6556 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6557 	int ipv4_addr_legal, ipv6_addr_legal;
6558 	struct sctp_vrf *vrf;
6559 	struct sctp_ifn *sctp_ifn;
6560 	struct sctp_ifa *sctp_ifa;
6561 	int count = 0;
6562 
6563 	/* Turn on all the appropriate scopes */
6564 	loopback_scope = stcb->asoc.loopback_scope;
6565 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6566 	local_scope = stcb->asoc.local_scope;
6567 	site_scope = stcb->asoc.site_scope;
6568 	ipv4_addr_legal = ipv6_addr_legal = 0;
6569 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6570 		ipv6_addr_legal = 1;
6571 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6572 			ipv4_addr_legal = 1;
6573 		}
6574 	} else {
6575 		ipv4_addr_legal = 1;
6576 	}
6577 
6578 	SCTP_IPI_ADDR_RLOCK();
6579 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6580 	if (vrf == NULL) {
6581 		/* no vrf, no addresses */
6582 		SCTP_IPI_ADDR_RUNLOCK();
6583 		return (0);
6584 	}
6585 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6586 		/*
6587 		 * bound all case: go through all ifns on the vrf
6588 		 */
6589 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6590 			if ((loopback_scope == 0) &&
6591 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6592 				continue;
6593 			}
6594 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6595 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6596 					continue;
6597 				switch (sctp_ifa->address.sa.sa_family) {
6598 				case AF_INET:
6599 					if (ipv4_addr_legal) {
6600 						struct sockaddr_in *sin;
6601 
6602 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6603 						if (sin->sin_addr.s_addr == 0) {
6604 							/*
6605 							 * skip unspecified
6606 							 * addrs
6607 							 */
6608 							continue;
6609 						}
6610 						if ((ipv4_local_scope == 0) &&
6611 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6612 							continue;
6613 						}
6614 						/* count this one */
6615 						count++;
6616 					} else {
6617 						continue;
6618 					}
6619 					break;
6620 #ifdef INET6
6621 				case AF_INET6:
6622 					if (ipv6_addr_legal) {
6623 						struct sockaddr_in6 *sin6;
6624 
6625 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6626 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6627 							continue;
6628 						}
6629 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6630 							if (local_scope == 0)
6631 								continue;
6632 							if (sin6->sin6_scope_id == 0) {
6633 								if (sa6_recoverscope(sin6) != 0)
6634 									/*
6635 									 *
6636 									 * bad
6637 									 *
6638 									 * li
6639 									 * nk
6640 									 *
6641 									 * loc
6642 									 * al
6643 									 *
6644 									 * add
6645 									 * re
6646 									 * ss
6647 									 * */
6648 									continue;
6649 							}
6650 						}
6651 						if ((site_scope == 0) &&
6652 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6653 							continue;
6654 						}
6655 						/* count this one */
6656 						count++;
6657 					}
6658 					break;
6659 #endif
6660 				default:
6661 					/* TSNH */
6662 					break;
6663 				}
6664 			}
6665 		}
6666 	} else {
6667 		/*
6668 		 * subset bound case
6669 		 */
6670 		struct sctp_laddr *laddr;
6671 
6672 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6673 		    sctp_nxt_addr) {
6674 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6675 				continue;
6676 			}
6677 			/* count this one */
6678 			count++;
6679 		}
6680 	}
6681 	SCTP_IPI_ADDR_RUNLOCK();
6682 	return (count);
6683 }
6684 
6685 #if defined(SCTP_LOCAL_TRACE_BUF)
6686 
6687 void
6688 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6689 {
6690 	uint32_t saveindex, newindex;
6691 
6692 	do {
6693 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6694 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6695 			newindex = 1;
6696 		} else {
6697 			newindex = saveindex + 1;
6698 		}
6699 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6700 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6701 		saveindex = 0;
6702 	}
6703 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6704 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6705 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6706 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6707 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6708 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6709 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6710 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6711 }
6712 
6713 #endif
6714 /* We will need to add support
6715  * to bind the ports and such here
6716  * so we can do UDP tunneling. In
6717  * the mean-time, we return error
6718  */
6719 #include <netinet/udp.h>
6720 #include <netinet/udp_var.h>
6721 #include <sys/proc.h>
6722 #ifdef INET6
6723 #include <netinet6/sctp6_var.h>
6724 #endif
6725 
6726 static void
6727 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6728 {
6729 	struct ip *iph;
6730 	struct mbuf *sp, *last;
6731 	struct udphdr *uhdr;
6732 	uint16_t port = 0, len;
6733 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6734 
6735 	/*
6736 	 * Split out the mbuf chain. Leave the IP header in m, place the
6737 	 * rest in the sp.
6738 	 */
6739 	if ((m->m_flags & M_PKTHDR) == 0) {
6740 		/* Can't handle one that is not a pkt hdr */
6741 		goto out;
6742 	}
6743 	/* pull the src port */
6744 	iph = mtod(m, struct ip *);
6745 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6746 
6747 	port = uhdr->uh_sport;
6748 	sp = m_split(m, off, M_DONTWAIT);
6749 	if (sp == NULL) {
6750 		/* Gak, drop packet, we can't do a split */
6751 		goto out;
6752 	}
6753 	if (sp->m_pkthdr.len < header_size) {
6754 		/* Gak, packet can't have an SCTP header in it - to small */
6755 		m_freem(sp);
6756 		goto out;
6757 	}
6758 	/* ok now pull up the UDP header and SCTP header together */
6759 	sp = m_pullup(sp, header_size);
6760 	if (sp == NULL) {
6761 		/* Gak pullup failed */
6762 		goto out;
6763 	}
6764 	/* trim out the UDP header */
6765 	m_adj(sp, sizeof(struct udphdr));
6766 
6767 	/* Now reconstruct the mbuf chain */
6768 	/* 1) find last one */
6769 	last = m;
6770 	while (last->m_next != NULL) {
6771 		last = last->m_next;
6772 	}
6773 	last->m_next = sp;
6774 	m->m_pkthdr.len += sp->m_pkthdr.len;
6775 	last = m;
6776 	while (last != NULL) {
6777 		last = last->m_next;
6778 	}
6779 	/* Now its ready for sctp_input or sctp6_input */
6780 	iph = mtod(m, struct ip *);
6781 	switch (iph->ip_v) {
6782 	case IPVERSION:
6783 		{
6784 			/* its IPv4 */
6785 			len = SCTP_GET_IPV4_LENGTH(iph);
6786 			len -= sizeof(struct udphdr);
6787 			SCTP_GET_IPV4_LENGTH(iph) = len;
6788 			sctp_input_with_port(m, off, port);
6789 			break;
6790 		}
6791 #ifdef INET6
6792 	case IPV6_VERSION >> 4:
6793 		{
6794 			/* its IPv6 - NOT supported */
6795 			goto out;
6796 			break;
6797 
6798 		}
6799 #endif
6800 	default:
6801 		{
6802 			m_freem(m);
6803 			break;
6804 		}
6805 	}
6806 	return;
6807 out:
6808 	m_freem(m);
6809 }
6810 
6811 void
6812 sctp_over_udp_stop(void)
6813 {
6814 	struct socket *sop;
6815 
6816 	/*
6817 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6818 	 * for writting!
6819 	 */
6820 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6821 		/* Nothing to do */
6822 		return;
6823 	}
6824 	sop = SCTP_BASE_INFO(udp_tun_socket);
6825 	soclose(sop);
6826 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6827 }
6828 int
6829 sctp_over_udp_start(void)
6830 {
6831 	uint16_t port;
6832 	int ret;
6833 	struct sockaddr_in sin;
6834 	struct socket *sop = NULL;
6835 	struct thread *th;
6836 	struct ucred *cred;
6837 
6838 	/*
6839 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6840 	 * for writting!
6841 	 */
6842 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6843 	if (port == 0) {
6844 		/* Must have a port set */
6845 		return (EINVAL);
6846 	}
6847 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6848 		/* Already running -- must stop first */
6849 		return (EALREADY);
6850 	}
6851 	th = curthread;
6852 	cred = th->td_ucred;
6853 	if ((ret = socreate(PF_INET, &sop,
6854 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6855 		return (ret);
6856 	}
6857 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6858 	/* call the special UDP hook */
6859 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6860 	if (ret) {
6861 		goto exit_stage_left;
6862 	}
6863 	/* Ok we have a socket, bind it to the port */
6864 	memset(&sin, 0, sizeof(sin));
6865 	sin.sin_len = sizeof(sin);
6866 	sin.sin_family = AF_INET;
6867 	sin.sin_port = htons(port);
6868 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6869 	if (ret) {
6870 		/* Close up we cant get the port */
6871 exit_stage_left:
6872 		sctp_over_udp_stop();
6873 		return (ret);
6874 	}
6875 	/*
6876 	 * Ok we should now get UDP packets directly to our input routine
6877 	 * sctp_recv_upd_tunneled_packet().
6878 	 */
6879 	return (0);
6880 }
6881