xref: /freebsd/sys/netinet/sctputil.c (revision 5dcd9c10612684d1c823670cbb5b4715028784e7)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *   this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *   the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
34 
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37 
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctputil.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #ifdef INET6
44 #endif
45 #include <netinet/sctp_header.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
50 #include <netinet/sctp_auth.h>
51 #include <netinet/sctp_asconf.h>
52 #include <netinet/sctp_bsd_addr.h>
53 
54 
55 #ifndef KTR_SCTP
56 #define KTR_SCTP KTR_SUBSYS
57 #endif
58 
59 extern struct sctp_cc_functions sctp_cc_functions[];
60 extern struct sctp_ss_functions sctp_ss_functions[];
61 
62 void
63 sctp_sblog(struct sockbuf *sb,
64     struct sctp_tcb *stcb, int from, int incr)
65 {
66 	struct sctp_cwnd_log sctp_clog;
67 
68 	sctp_clog.x.sb.stcb = stcb;
69 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
70 	if (stcb)
71 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
72 	else
73 		sctp_clog.x.sb.stcb_sbcc = 0;
74 	sctp_clog.x.sb.incr = incr;
75 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
76 	    SCTP_LOG_EVENT_SB,
77 	    from,
78 	    sctp_clog.x.misc.log1,
79 	    sctp_clog.x.misc.log2,
80 	    sctp_clog.x.misc.log3,
81 	    sctp_clog.x.misc.log4);
82 }
83 
84 void
85 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
86 {
87 	struct sctp_cwnd_log sctp_clog;
88 
89 	sctp_clog.x.close.inp = (void *)inp;
90 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
91 	if (stcb) {
92 		sctp_clog.x.close.stcb = (void *)stcb;
93 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
94 	} else {
95 		sctp_clog.x.close.stcb = 0;
96 		sctp_clog.x.close.state = 0;
97 	}
98 	sctp_clog.x.close.loc = loc;
99 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
100 	    SCTP_LOG_EVENT_CLOSE,
101 	    0,
102 	    sctp_clog.x.misc.log1,
103 	    sctp_clog.x.misc.log2,
104 	    sctp_clog.x.misc.log3,
105 	    sctp_clog.x.misc.log4);
106 }
107 
108 
109 void
110 rto_logging(struct sctp_nets *net, int from)
111 {
112 	struct sctp_cwnd_log sctp_clog;
113 
114 	memset(&sctp_clog, 0, sizeof(sctp_clog));
115 	sctp_clog.x.rto.net = (void *)net;
116 	sctp_clog.x.rto.rtt = net->rtt / 1000;
117 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
118 	    SCTP_LOG_EVENT_RTT,
119 	    from,
120 	    sctp_clog.x.misc.log1,
121 	    sctp_clog.x.misc.log2,
122 	    sctp_clog.x.misc.log3,
123 	    sctp_clog.x.misc.log4);
124 
125 }
126 
127 void
128 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
129 {
130 	struct sctp_cwnd_log sctp_clog;
131 
132 	sctp_clog.x.strlog.stcb = stcb;
133 	sctp_clog.x.strlog.n_tsn = tsn;
134 	sctp_clog.x.strlog.n_sseq = sseq;
135 	sctp_clog.x.strlog.e_tsn = 0;
136 	sctp_clog.x.strlog.e_sseq = 0;
137 	sctp_clog.x.strlog.strm = stream;
138 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
139 	    SCTP_LOG_EVENT_STRM,
140 	    from,
141 	    sctp_clog.x.misc.log1,
142 	    sctp_clog.x.misc.log2,
143 	    sctp_clog.x.misc.log3,
144 	    sctp_clog.x.misc.log4);
145 
146 }
147 
148 void
149 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
150 {
151 	struct sctp_cwnd_log sctp_clog;
152 
153 	sctp_clog.x.nagle.stcb = (void *)stcb;
154 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
155 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
156 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
157 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
158 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
159 	    SCTP_LOG_EVENT_NAGLE,
160 	    action,
161 	    sctp_clog.x.misc.log1,
162 	    sctp_clog.x.misc.log2,
163 	    sctp_clog.x.misc.log3,
164 	    sctp_clog.x.misc.log4);
165 }
166 
167 
168 void
169 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
170 {
171 	struct sctp_cwnd_log sctp_clog;
172 
173 	sctp_clog.x.sack.cumack = cumack;
174 	sctp_clog.x.sack.oldcumack = old_cumack;
175 	sctp_clog.x.sack.tsn = tsn;
176 	sctp_clog.x.sack.numGaps = gaps;
177 	sctp_clog.x.sack.numDups = dups;
178 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
179 	    SCTP_LOG_EVENT_SACK,
180 	    from,
181 	    sctp_clog.x.misc.log1,
182 	    sctp_clog.x.misc.log2,
183 	    sctp_clog.x.misc.log3,
184 	    sctp_clog.x.misc.log4);
185 }
186 
187 void
188 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
189 {
190 	struct sctp_cwnd_log sctp_clog;
191 
192 	memset(&sctp_clog, 0, sizeof(sctp_clog));
193 	sctp_clog.x.map.base = map;
194 	sctp_clog.x.map.cum = cum;
195 	sctp_clog.x.map.high = high;
196 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
197 	    SCTP_LOG_EVENT_MAP,
198 	    from,
199 	    sctp_clog.x.misc.log1,
200 	    sctp_clog.x.misc.log2,
201 	    sctp_clog.x.misc.log3,
202 	    sctp_clog.x.misc.log4);
203 }
204 
205 void
206 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
207     int from)
208 {
209 	struct sctp_cwnd_log sctp_clog;
210 
211 	memset(&sctp_clog, 0, sizeof(sctp_clog));
212 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
213 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
214 	sctp_clog.x.fr.tsn = tsn;
215 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
216 	    SCTP_LOG_EVENT_FR,
217 	    from,
218 	    sctp_clog.x.misc.log1,
219 	    sctp_clog.x.misc.log2,
220 	    sctp_clog.x.misc.log3,
221 	    sctp_clog.x.misc.log4);
222 
223 }
224 
225 
226 void
227 sctp_log_mb(struct mbuf *m, int from)
228 {
229 	struct sctp_cwnd_log sctp_clog;
230 
231 	sctp_clog.x.mb.mp = m;
232 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
233 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
234 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
235 	if (SCTP_BUF_IS_EXTENDED(m)) {
236 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
237 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
238 	} else {
239 		sctp_clog.x.mb.ext = 0;
240 		sctp_clog.x.mb.refcnt = 0;
241 	}
242 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
243 	    SCTP_LOG_EVENT_MBUF,
244 	    from,
245 	    sctp_clog.x.misc.log1,
246 	    sctp_clog.x.misc.log2,
247 	    sctp_clog.x.misc.log3,
248 	    sctp_clog.x.misc.log4);
249 }
250 
251 
252 void
253 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
254     int from)
255 {
256 	struct sctp_cwnd_log sctp_clog;
257 
258 	if (control == NULL) {
259 		SCTP_PRINTF("Gak log of NULL?\n");
260 		return;
261 	}
262 	sctp_clog.x.strlog.stcb = control->stcb;
263 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
264 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
265 	sctp_clog.x.strlog.strm = control->sinfo_stream;
266 	if (poschk != NULL) {
267 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
268 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
269 	} else {
270 		sctp_clog.x.strlog.e_tsn = 0;
271 		sctp_clog.x.strlog.e_sseq = 0;
272 	}
273 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
274 	    SCTP_LOG_EVENT_STRM,
275 	    from,
276 	    sctp_clog.x.misc.log1,
277 	    sctp_clog.x.misc.log2,
278 	    sctp_clog.x.misc.log3,
279 	    sctp_clog.x.misc.log4);
280 
281 }
282 
283 void
284 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
285 {
286 	struct sctp_cwnd_log sctp_clog;
287 
288 	sctp_clog.x.cwnd.net = net;
289 	if (stcb->asoc.send_queue_cnt > 255)
290 		sctp_clog.x.cwnd.cnt_in_send = 255;
291 	else
292 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
293 	if (stcb->asoc.stream_queue_cnt > 255)
294 		sctp_clog.x.cwnd.cnt_in_str = 255;
295 	else
296 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
297 
298 	if (net) {
299 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
300 		sctp_clog.x.cwnd.inflight = net->flight_size;
301 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
302 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
303 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
304 	}
305 	if (SCTP_CWNDLOG_PRESEND == from) {
306 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
307 	}
308 	sctp_clog.x.cwnd.cwnd_augment = augment;
309 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
310 	    SCTP_LOG_EVENT_CWND,
311 	    from,
312 	    sctp_clog.x.misc.log1,
313 	    sctp_clog.x.misc.log2,
314 	    sctp_clog.x.misc.log3,
315 	    sctp_clog.x.misc.log4);
316 
317 }
318 
319 void
320 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
321 {
322 	struct sctp_cwnd_log sctp_clog;
323 
324 	memset(&sctp_clog, 0, sizeof(sctp_clog));
325 	if (inp) {
326 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
327 
328 	} else {
329 		sctp_clog.x.lock.sock = (void *)NULL;
330 	}
331 	sctp_clog.x.lock.inp = (void *)inp;
332 	if (stcb) {
333 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
334 	} else {
335 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
336 	}
337 	if (inp) {
338 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
339 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
340 	} else {
341 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
342 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
343 	}
344 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
345 	if (inp && (inp->sctp_socket)) {
346 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
347 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
348 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
349 	} else {
350 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
351 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
352 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
353 	}
354 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
355 	    SCTP_LOG_LOCK_EVENT,
356 	    from,
357 	    sctp_clog.x.misc.log1,
358 	    sctp_clog.x.misc.log2,
359 	    sctp_clog.x.misc.log3,
360 	    sctp_clog.x.misc.log4);
361 
362 }
363 
364 void
365 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
366 {
367 	struct sctp_cwnd_log sctp_clog;
368 
369 	memset(&sctp_clog, 0, sizeof(sctp_clog));
370 	sctp_clog.x.cwnd.net = net;
371 	sctp_clog.x.cwnd.cwnd_new_value = error;
372 	sctp_clog.x.cwnd.inflight = net->flight_size;
373 	sctp_clog.x.cwnd.cwnd_augment = burst;
374 	if (stcb->asoc.send_queue_cnt > 255)
375 		sctp_clog.x.cwnd.cnt_in_send = 255;
376 	else
377 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
378 	if (stcb->asoc.stream_queue_cnt > 255)
379 		sctp_clog.x.cwnd.cnt_in_str = 255;
380 	else
381 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
382 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
383 	    SCTP_LOG_EVENT_MAXBURST,
384 	    from,
385 	    sctp_clog.x.misc.log1,
386 	    sctp_clog.x.misc.log2,
387 	    sctp_clog.x.misc.log3,
388 	    sctp_clog.x.misc.log4);
389 
390 }
391 
392 void
393 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
394 {
395 	struct sctp_cwnd_log sctp_clog;
396 
397 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
398 	sctp_clog.x.rwnd.send_size = snd_size;
399 	sctp_clog.x.rwnd.overhead = overhead;
400 	sctp_clog.x.rwnd.new_rwnd = 0;
401 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
402 	    SCTP_LOG_EVENT_RWND,
403 	    from,
404 	    sctp_clog.x.misc.log1,
405 	    sctp_clog.x.misc.log2,
406 	    sctp_clog.x.misc.log3,
407 	    sctp_clog.x.misc.log4);
408 }
409 
410 void
411 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
412 {
413 	struct sctp_cwnd_log sctp_clog;
414 
415 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
416 	sctp_clog.x.rwnd.send_size = flight_size;
417 	sctp_clog.x.rwnd.overhead = overhead;
418 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
419 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
420 	    SCTP_LOG_EVENT_RWND,
421 	    from,
422 	    sctp_clog.x.misc.log1,
423 	    sctp_clog.x.misc.log2,
424 	    sctp_clog.x.misc.log3,
425 	    sctp_clog.x.misc.log4);
426 }
427 
428 void
429 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
430 {
431 	struct sctp_cwnd_log sctp_clog;
432 
433 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
434 	sctp_clog.x.mbcnt.size_change = book;
435 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
436 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_EVENT_MBCNT,
439 	    from,
440 	    sctp_clog.x.misc.log1,
441 	    sctp_clog.x.misc.log2,
442 	    sctp_clog.x.misc.log3,
443 	    sctp_clog.x.misc.log4);
444 
445 }
446 
447 void
448 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
449 {
450 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
451 	    SCTP_LOG_MISC_EVENT,
452 	    from,
453 	    a, b, c, d);
454 }
455 
456 void
457 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
458 {
459 	struct sctp_cwnd_log sctp_clog;
460 
461 	sctp_clog.x.wake.stcb = (void *)stcb;
462 	sctp_clog.x.wake.wake_cnt = wake_cnt;
463 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
464 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
465 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
466 
467 	if (stcb->asoc.stream_queue_cnt < 0xff)
468 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
469 	else
470 		sctp_clog.x.wake.stream_qcnt = 0xff;
471 
472 	if (stcb->asoc.chunks_on_out_queue < 0xff)
473 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
474 	else
475 		sctp_clog.x.wake.chunks_on_oque = 0xff;
476 
477 	sctp_clog.x.wake.sctpflags = 0;
478 	/* set in the defered mode stuff */
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
480 		sctp_clog.x.wake.sctpflags |= 1;
481 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
482 		sctp_clog.x.wake.sctpflags |= 2;
483 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
484 		sctp_clog.x.wake.sctpflags |= 4;
485 	/* what about the sb */
486 	if (stcb->sctp_socket) {
487 		struct socket *so = stcb->sctp_socket;
488 
489 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
490 	} else {
491 		sctp_clog.x.wake.sbflags = 0xff;
492 	}
493 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
494 	    SCTP_LOG_EVENT_WAKE,
495 	    from,
496 	    sctp_clog.x.misc.log1,
497 	    sctp_clog.x.misc.log2,
498 	    sctp_clog.x.misc.log3,
499 	    sctp_clog.x.misc.log4);
500 
501 }
502 
503 void
504 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
505 {
506 	struct sctp_cwnd_log sctp_clog;
507 
508 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
509 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
510 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
511 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
512 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
513 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
514 	sctp_clog.x.blk.sndlen = sendlen;
515 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
516 	    SCTP_LOG_EVENT_BLOCK,
517 	    from,
518 	    sctp_clog.x.misc.log1,
519 	    sctp_clog.x.misc.log2,
520 	    sctp_clog.x.misc.log3,
521 	    sctp_clog.x.misc.log4);
522 
523 }
524 
525 int
526 sctp_fill_stat_log(void *optval, size_t *optsize)
527 {
528 	/* May need to fix this if ktrdump does not work */
529 	return (0);
530 }
531 
532 #ifdef SCTP_AUDITING_ENABLED
533 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
534 static int sctp_audit_indx = 0;
535 
536 static
537 void
538 sctp_print_audit_report(void)
539 {
540 	int i;
541 	int cnt;
542 
543 	cnt = 0;
544 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
545 		if ((sctp_audit_data[i][0] == 0xe0) &&
546 		    (sctp_audit_data[i][1] == 0x01)) {
547 			cnt = 0;
548 			SCTP_PRINTF("\n");
549 		} else if (sctp_audit_data[i][0] == 0xf0) {
550 			cnt = 0;
551 			SCTP_PRINTF("\n");
552 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
553 		    (sctp_audit_data[i][1] == 0x01)) {
554 			SCTP_PRINTF("\n");
555 			cnt = 0;
556 		}
557 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
558 		    (uint32_t) sctp_audit_data[i][1]);
559 		cnt++;
560 		if ((cnt % 14) == 0)
561 			SCTP_PRINTF("\n");
562 	}
563 	for (i = 0; i < sctp_audit_indx; i++) {
564 		if ((sctp_audit_data[i][0] == 0xe0) &&
565 		    (sctp_audit_data[i][1] == 0x01)) {
566 			cnt = 0;
567 			SCTP_PRINTF("\n");
568 		} else if (sctp_audit_data[i][0] == 0xf0) {
569 			cnt = 0;
570 			SCTP_PRINTF("\n");
571 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
572 		    (sctp_audit_data[i][1] == 0x01)) {
573 			SCTP_PRINTF("\n");
574 			cnt = 0;
575 		}
576 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
577 		    (uint32_t) sctp_audit_data[i][1]);
578 		cnt++;
579 		if ((cnt % 14) == 0)
580 			SCTP_PRINTF("\n");
581 	}
582 	SCTP_PRINTF("\n");
583 }
584 
585 void
586 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
587     struct sctp_nets *net)
588 {
589 	int resend_cnt, tot_out, rep, tot_book_cnt;
590 	struct sctp_nets *lnet;
591 	struct sctp_tmit_chunk *chk;
592 
593 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
594 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
595 	sctp_audit_indx++;
596 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
597 		sctp_audit_indx = 0;
598 	}
599 	if (inp == NULL) {
600 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
601 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
602 		sctp_audit_indx++;
603 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
604 			sctp_audit_indx = 0;
605 		}
606 		return;
607 	}
608 	if (stcb == NULL) {
609 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
610 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
611 		sctp_audit_indx++;
612 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
613 			sctp_audit_indx = 0;
614 		}
615 		return;
616 	}
617 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
618 	sctp_audit_data[sctp_audit_indx][1] =
619 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
620 	sctp_audit_indx++;
621 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
622 		sctp_audit_indx = 0;
623 	}
624 	rep = 0;
625 	tot_book_cnt = 0;
626 	resend_cnt = tot_out = 0;
627 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
628 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
629 			resend_cnt++;
630 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
631 			tot_out += chk->book_size;
632 			tot_book_cnt++;
633 		}
634 	}
635 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
636 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
637 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
638 		sctp_audit_indx++;
639 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
640 			sctp_audit_indx = 0;
641 		}
642 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
643 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
644 		rep = 1;
645 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
646 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
647 		sctp_audit_data[sctp_audit_indx][1] =
648 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
649 		sctp_audit_indx++;
650 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
651 			sctp_audit_indx = 0;
652 		}
653 	}
654 	if (tot_out != stcb->asoc.total_flight) {
655 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
656 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
657 		sctp_audit_indx++;
658 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
659 			sctp_audit_indx = 0;
660 		}
661 		rep = 1;
662 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
663 		    (int)stcb->asoc.total_flight);
664 		stcb->asoc.total_flight = tot_out;
665 	}
666 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
667 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
668 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
669 		sctp_audit_indx++;
670 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
671 			sctp_audit_indx = 0;
672 		}
673 		rep = 1;
674 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
675 
676 		stcb->asoc.total_flight_count = tot_book_cnt;
677 	}
678 	tot_out = 0;
679 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
680 		tot_out += lnet->flight_size;
681 	}
682 	if (tot_out != stcb->asoc.total_flight) {
683 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
684 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
685 		sctp_audit_indx++;
686 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
687 			sctp_audit_indx = 0;
688 		}
689 		rep = 1;
690 		SCTP_PRINTF("real flight:%d net total was %d\n",
691 		    stcb->asoc.total_flight, tot_out);
692 		/* now corrective action */
693 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
694 
695 			tot_out = 0;
696 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
697 				if ((chk->whoTo == lnet) &&
698 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
699 					tot_out += chk->book_size;
700 				}
701 			}
702 			if (lnet->flight_size != tot_out) {
703 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
704 				    lnet, lnet->flight_size,
705 				    tot_out);
706 				lnet->flight_size = tot_out;
707 			}
708 		}
709 	}
710 	if (rep) {
711 		sctp_print_audit_report();
712 	}
713 }
714 
715 void
716 sctp_audit_log(uint8_t ev, uint8_t fd)
717 {
718 
719 	sctp_audit_data[sctp_audit_indx][0] = ev;
720 	sctp_audit_data[sctp_audit_indx][1] = fd;
721 	sctp_audit_indx++;
722 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
723 		sctp_audit_indx = 0;
724 	}
725 }
726 
727 #endif
728 
729 /*
730  * sctp_stop_timers_for_shutdown() should be called
731  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
732  * state to make sure that all timers are stopped.
733  */
734 void
735 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
736 {
737 	struct sctp_association *asoc;
738 	struct sctp_nets *net;
739 
740 	asoc = &stcb->asoc;
741 
742 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
743 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
746 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
747 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
748 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
749 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
750 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
751 	}
752 }
753 
754 /*
755  * a list of sizes based on typical mtu's, used only if next hop size not
756  * returned.
757  */
758 static uint32_t sctp_mtu_sizes[] = {
759 	68,
760 	296,
761 	508,
762 	512,
763 	544,
764 	576,
765 	1006,
766 	1492,
767 	1500,
768 	1536,
769 	2002,
770 	2048,
771 	4352,
772 	4464,
773 	8166,
774 	17914,
775 	32000,
776 	65535
777 };
778 
779 /*
780  * Return the largest MTU smaller than val. If there is no
781  * entry, just return val.
782  */
783 uint32_t
784 sctp_get_prev_mtu(uint32_t val)
785 {
786 	uint32_t i;
787 
788 	if (val <= sctp_mtu_sizes[0]) {
789 		return (val);
790 	}
791 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
792 		if (val <= sctp_mtu_sizes[i]) {
793 			break;
794 		}
795 	}
796 	return (sctp_mtu_sizes[i - 1]);
797 }
798 
799 /*
800  * Return the smallest MTU larger than val. If there is no
801  * entry, just return val.
802  */
803 uint32_t
804 sctp_get_next_mtu(struct sctp_inpcb *inp, uint32_t val)
805 {
806 	/* select another MTU that is just bigger than this one */
807 	uint32_t i;
808 
809 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
810 		if (val < sctp_mtu_sizes[i]) {
811 			return (sctp_mtu_sizes[i]);
812 		}
813 	}
814 	return (val);
815 }
816 
817 void
818 sctp_fill_random_store(struct sctp_pcb *m)
819 {
820 	/*
821 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
822 	 * our counter. The result becomes our good random numbers and we
823 	 * then setup to give these out. Note that we do no locking to
824 	 * protect this. This is ok, since if competing folks call this we
825 	 * will get more gobbled gook in the random store which is what we
826 	 * want. There is a danger that two guys will use the same random
827 	 * numbers, but thats ok too since that is random as well :->
828 	 */
829 	m->store_at = 0;
830 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
831 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
832 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
833 	m->random_counter++;
834 }
835 
836 uint32_t
837 sctp_select_initial_TSN(struct sctp_pcb *inp)
838 {
839 	/*
840 	 * A true implementation should use random selection process to get
841 	 * the initial stream sequence number, using RFC1750 as a good
842 	 * guideline
843 	 */
844 	uint32_t x, *xp;
845 	uint8_t *p;
846 	int store_at, new_store;
847 
848 	if (inp->initial_sequence_debug != 0) {
849 		uint32_t ret;
850 
851 		ret = inp->initial_sequence_debug;
852 		inp->initial_sequence_debug++;
853 		return (ret);
854 	}
855 retry:
856 	store_at = inp->store_at;
857 	new_store = store_at + sizeof(uint32_t);
858 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
859 		new_store = 0;
860 	}
861 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
862 		goto retry;
863 	}
864 	if (new_store == 0) {
865 		/* Refill the random store */
866 		sctp_fill_random_store(inp);
867 	}
868 	p = &inp->random_store[store_at];
869 	xp = (uint32_t *) p;
870 	x = *xp;
871 	return (x);
872 }
873 
874 uint32_t
875 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
876 {
877 	uint32_t x, not_done;
878 	struct timeval now;
879 
880 	(void)SCTP_GETTIME_TIMEVAL(&now);
881 	not_done = 1;
882 	while (not_done) {
883 		x = sctp_select_initial_TSN(&inp->sctp_ep);
884 		if (x == 0) {
885 			/* we never use 0 */
886 			continue;
887 		}
888 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
889 			not_done = 0;
890 		}
891 	}
892 	return (x);
893 }
894 
895 int
896 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
897     uint32_t override_tag, uint32_t vrf_id)
898 {
899 	struct sctp_association *asoc;
900 
901 	/*
902 	 * Anything set to zero is taken care of by the allocation routine's
903 	 * bzero
904 	 */
905 
906 	/*
907 	 * Up front select what scoping to apply on addresses I tell my peer
908 	 * Not sure what to do with these right now, we will need to come up
909 	 * with a way to set them. We may need to pass them through from the
910 	 * caller in the sctp_aloc_assoc() function.
911 	 */
912 	int i;
913 
914 	asoc = &stcb->asoc;
915 	/* init all variables to a known value. */
916 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
917 	asoc->max_burst = m->sctp_ep.max_burst;
918 	asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
919 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
920 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
921 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
922 	asoc->ecn_allowed = m->sctp_ecn_enable;
923 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
924 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
925 	asoc->sctp_frag_point = m->sctp_frag_point;
926 #ifdef INET
927 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
928 #else
929 	asoc->default_tos = 0;
930 #endif
931 
932 #ifdef INET6
933 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
934 #else
935 	asoc->default_flowlabel = 0;
936 #endif
937 	asoc->sb_send_resv = 0;
938 	if (override_tag) {
939 		asoc->my_vtag = override_tag;
940 	} else {
941 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
942 	}
943 	/* Get the nonce tags */
944 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
945 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
946 	asoc->vrf_id = vrf_id;
947 
948 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
949 		asoc->hb_is_disabled = 1;
950 	else
951 		asoc->hb_is_disabled = 0;
952 
953 #ifdef SCTP_ASOCLOG_OF_TSNS
954 	asoc->tsn_in_at = 0;
955 	asoc->tsn_out_at = 0;
956 	asoc->tsn_in_wrapped = 0;
957 	asoc->tsn_out_wrapped = 0;
958 	asoc->cumack_log_at = 0;
959 	asoc->cumack_log_atsnt = 0;
960 #endif
961 #ifdef SCTP_FS_SPEC_LOG
962 	asoc->fs_index = 0;
963 #endif
964 	asoc->refcnt = 0;
965 	asoc->assoc_up_sent = 0;
966 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
967 	    sctp_select_initial_TSN(&m->sctp_ep);
968 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
969 	/* we are optimisitic here */
970 	asoc->peer_supports_pktdrop = 1;
971 	asoc->peer_supports_nat = 0;
972 	asoc->sent_queue_retran_cnt = 0;
973 
974 	/* for CMT */
975 	asoc->last_net_cmt_send_started = NULL;
976 
977 	/* This will need to be adjusted */
978 	asoc->last_acked_seq = asoc->init_seq_number - 1;
979 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
980 	asoc->asconf_seq_in = asoc->last_acked_seq;
981 
982 	/* here we are different, we hold the next one we expect */
983 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
984 
985 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
986 	asoc->initial_rto = m->sctp_ep.initial_rto;
987 
988 	asoc->max_init_times = m->sctp_ep.max_init_times;
989 	asoc->max_send_times = m->sctp_ep.max_send_times;
990 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
991 	asoc->free_chunk_cnt = 0;
992 
993 	asoc->iam_blocking = 0;
994 
995 	asoc->context = m->sctp_context;
996 	asoc->def_send = m->def_send;
997 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
998 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
999 	asoc->pr_sctp_cnt = 0;
1000 	asoc->total_output_queue_size = 0;
1001 
1002 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1003 		struct in6pcb *inp6;
1004 
1005 		/* Its a V6 socket */
1006 		inp6 = (struct in6pcb *)m;
1007 		asoc->ipv6_addr_legal = 1;
1008 		/* Now look at the binding flag to see if V4 will be legal */
1009 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1010 			asoc->ipv4_addr_legal = 1;
1011 		} else {
1012 			/* V4 addresses are NOT legal on the association */
1013 			asoc->ipv4_addr_legal = 0;
1014 		}
1015 	} else {
1016 		/* Its a V4 socket, no - V6 */
1017 		asoc->ipv4_addr_legal = 1;
1018 		asoc->ipv6_addr_legal = 0;
1019 	}
1020 
1021 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1022 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1023 
1024 	asoc->smallest_mtu = m->sctp_frag_point;
1025 	asoc->minrto = m->sctp_ep.sctp_minrto;
1026 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1027 
1028 	asoc->locked_on_sending = NULL;
1029 	asoc->stream_locked_on = 0;
1030 	asoc->ecn_echo_cnt_onq = 0;
1031 	asoc->stream_locked = 0;
1032 
1033 	asoc->send_sack = 1;
1034 
1035 	LIST_INIT(&asoc->sctp_restricted_addrs);
1036 
1037 	TAILQ_INIT(&asoc->nets);
1038 	TAILQ_INIT(&asoc->pending_reply_queue);
1039 	TAILQ_INIT(&asoc->asconf_ack_sent);
1040 	/* Setup to fill the hb random cache at first HB */
1041 	asoc->hb_random_idx = 4;
1042 
1043 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1044 
1045 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1046 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1047 
1048 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1049 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1050 
1051 	/*
1052 	 * Now the stream parameters, here we allocate space for all streams
1053 	 * that we request by default.
1054 	 */
1055 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1056 	    m->sctp_ep.pre_open_stream_count;
1057 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1058 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1059 	    SCTP_M_STRMO);
1060 	if (asoc->strmout == NULL) {
1061 		/* big trouble no memory */
1062 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1063 		return (ENOMEM);
1064 	}
1065 	for (i = 0; i < asoc->streamoutcnt; i++) {
1066 		/*
1067 		 * inbound side must be set to 0xffff, also NOTE when we get
1068 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1069 		 * count (streamoutcnt) but first check if we sent to any of
1070 		 * the upper streams that were dropped (if some were). Those
1071 		 * that were dropped must be notified to the upper layer as
1072 		 * failed to send.
1073 		 */
1074 		asoc->strmout[i].next_sequence_sent = 0x0;
1075 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1076 		asoc->strmout[i].stream_no = i;
1077 		asoc->strmout[i].last_msg_incomplete = 0;
1078 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1079 	}
1080 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1081 
1082 	/* Now the mapping array */
1083 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1084 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1085 	    SCTP_M_MAP);
1086 	if (asoc->mapping_array == NULL) {
1087 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1088 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1089 		return (ENOMEM);
1090 	}
1091 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1092 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1093 	    SCTP_M_MAP);
1094 	if (asoc->nr_mapping_array == NULL) {
1095 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1096 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1097 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1098 		return (ENOMEM);
1099 	}
1100 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1101 
1102 	/* Now the init of the other outqueues */
1103 	TAILQ_INIT(&asoc->free_chunks);
1104 	TAILQ_INIT(&asoc->control_send_queue);
1105 	TAILQ_INIT(&asoc->asconf_send_queue);
1106 	TAILQ_INIT(&asoc->send_queue);
1107 	TAILQ_INIT(&asoc->sent_queue);
1108 	TAILQ_INIT(&asoc->reasmqueue);
1109 	TAILQ_INIT(&asoc->resetHead);
1110 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1111 	TAILQ_INIT(&asoc->asconf_queue);
1112 	/* authentication fields */
1113 	asoc->authinfo.random = NULL;
1114 	asoc->authinfo.active_keyid = 0;
1115 	asoc->authinfo.assoc_key = NULL;
1116 	asoc->authinfo.assoc_keyid = 0;
1117 	asoc->authinfo.recv_key = NULL;
1118 	asoc->authinfo.recv_keyid = 0;
1119 	LIST_INIT(&asoc->shared_keys);
1120 	asoc->marked_retrans = 0;
1121 	asoc->timoinit = 0;
1122 	asoc->timodata = 0;
1123 	asoc->timosack = 0;
1124 	asoc->timoshutdown = 0;
1125 	asoc->timoheartbeat = 0;
1126 	asoc->timocookie = 0;
1127 	asoc->timoshutdownack = 0;
1128 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1129 	asoc->discontinuity_time = asoc->start_time;
1130 	/*
1131 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1132 	 * freed later when the association is freed.
1133 	 */
1134 	return (0);
1135 }
1136 
1137 void
1138 sctp_print_mapping_array(struct sctp_association *asoc)
1139 {
1140 	unsigned int i, limit;
1141 
1142 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1143 	    asoc->mapping_array_size,
1144 	    asoc->mapping_array_base_tsn,
1145 	    asoc->cumulative_tsn,
1146 	    asoc->highest_tsn_inside_map,
1147 	    asoc->highest_tsn_inside_nr_map);
1148 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1149 		if (asoc->mapping_array[limit - 1]) {
1150 			break;
1151 		}
1152 	}
1153 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1154 	for (i = 0; i < limit; i++) {
1155 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1156 	}
1157 	if (limit % 16)
1158 		printf("\n");
1159 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1160 		if (asoc->nr_mapping_array[limit - 1]) {
1161 			break;
1162 		}
1163 	}
1164 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1165 	for (i = 0; i < limit; i++) {
1166 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1167 	}
1168 	if (limit % 16)
1169 		printf("\n");
1170 }
1171 
1172 int
1173 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1174 {
1175 	/* mapping array needs to grow */
1176 	uint8_t *new_array1, *new_array2;
1177 	uint32_t new_size;
1178 
1179 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1180 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1181 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1182 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1183 		/* can't get more, forget it */
1184 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1185 		if (new_array1) {
1186 			SCTP_FREE(new_array1, SCTP_M_MAP);
1187 		}
1188 		if (new_array2) {
1189 			SCTP_FREE(new_array2, SCTP_M_MAP);
1190 		}
1191 		return (-1);
1192 	}
1193 	memset(new_array1, 0, new_size);
1194 	memset(new_array2, 0, new_size);
1195 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1196 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1197 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1198 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1199 	asoc->mapping_array = new_array1;
1200 	asoc->nr_mapping_array = new_array2;
1201 	asoc->mapping_array_size = new_size;
1202 	return (0);
1203 }
1204 
1205 
1206 static void
1207 sctp_iterator_work(struct sctp_iterator *it)
1208 {
1209 	int iteration_count = 0;
1210 	int inp_skip = 0;
1211 	int first_in = 1;
1212 	struct sctp_inpcb *tinp;
1213 
1214 	SCTP_INP_INFO_RLOCK();
1215 	SCTP_ITERATOR_LOCK();
1216 	if (it->inp) {
1217 		SCTP_INP_RLOCK(it->inp);
1218 		SCTP_INP_DECR_REF(it->inp);
1219 	}
1220 	if (it->inp == NULL) {
1221 		/* iterator is complete */
1222 done_with_iterator:
1223 		SCTP_ITERATOR_UNLOCK();
1224 		SCTP_INP_INFO_RUNLOCK();
1225 		if (it->function_atend != NULL) {
1226 			(*it->function_atend) (it->pointer, it->val);
1227 		}
1228 		SCTP_FREE(it, SCTP_M_ITER);
1229 		return;
1230 	}
1231 select_a_new_ep:
1232 	if (first_in) {
1233 		first_in = 0;
1234 	} else {
1235 		SCTP_INP_RLOCK(it->inp);
1236 	}
1237 	while (((it->pcb_flags) &&
1238 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1239 	    ((it->pcb_features) &&
1240 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1241 		/* endpoint flags or features don't match, so keep looking */
1242 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1243 			SCTP_INP_RUNLOCK(it->inp);
1244 			goto done_with_iterator;
1245 		}
1246 		tinp = it->inp;
1247 		it->inp = LIST_NEXT(it->inp, sctp_list);
1248 		SCTP_INP_RUNLOCK(tinp);
1249 		if (it->inp == NULL) {
1250 			goto done_with_iterator;
1251 		}
1252 		SCTP_INP_RLOCK(it->inp);
1253 	}
1254 	/* now go through each assoc which is in the desired state */
1255 	if (it->done_current_ep == 0) {
1256 		if (it->function_inp != NULL)
1257 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1258 		it->done_current_ep = 1;
1259 	}
1260 	if (it->stcb == NULL) {
1261 		/* run the per instance function */
1262 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1263 	}
1264 	if ((inp_skip) || it->stcb == NULL) {
1265 		if (it->function_inp_end != NULL) {
1266 			inp_skip = (*it->function_inp_end) (it->inp,
1267 			    it->pointer,
1268 			    it->val);
1269 		}
1270 		SCTP_INP_RUNLOCK(it->inp);
1271 		goto no_stcb;
1272 	}
1273 	while (it->stcb) {
1274 		SCTP_TCB_LOCK(it->stcb);
1275 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1276 			/* not in the right state... keep looking */
1277 			SCTP_TCB_UNLOCK(it->stcb);
1278 			goto next_assoc;
1279 		}
1280 		/* see if we have limited out the iterator loop */
1281 		iteration_count++;
1282 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1283 			/* Pause to let others grab the lock */
1284 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1285 			SCTP_TCB_UNLOCK(it->stcb);
1286 			SCTP_INP_INCR_REF(it->inp);
1287 			SCTP_INP_RUNLOCK(it->inp);
1288 			SCTP_ITERATOR_UNLOCK();
1289 			SCTP_INP_INFO_RUNLOCK();
1290 			SCTP_INP_INFO_RLOCK();
1291 			SCTP_ITERATOR_LOCK();
1292 			if (sctp_it_ctl.iterator_flags) {
1293 				/* We won't be staying here */
1294 				SCTP_INP_DECR_REF(it->inp);
1295 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1296 				if (sctp_it_ctl.iterator_flags &
1297 				    SCTP_ITERATOR_MUST_EXIT) {
1298 					goto done_with_iterator;
1299 				}
1300 				if (sctp_it_ctl.iterator_flags &
1301 				    SCTP_ITERATOR_STOP_CUR_IT) {
1302 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1303 					goto done_with_iterator;
1304 				}
1305 				if (sctp_it_ctl.iterator_flags &
1306 				    SCTP_ITERATOR_STOP_CUR_INP) {
1307 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1308 					goto no_stcb;
1309 				}
1310 				/* If we reach here huh? */
1311 				printf("Unknown it ctl flag %x\n",
1312 				    sctp_it_ctl.iterator_flags);
1313 				sctp_it_ctl.iterator_flags = 0;
1314 			}
1315 			SCTP_INP_RLOCK(it->inp);
1316 			SCTP_INP_DECR_REF(it->inp);
1317 			SCTP_TCB_LOCK(it->stcb);
1318 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1319 			iteration_count = 0;
1320 		}
1321 		/* run function on this one */
1322 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1323 
1324 		/*
1325 		 * we lie here, it really needs to have its own type but
1326 		 * first I must verify that this won't effect things :-0
1327 		 */
1328 		if (it->no_chunk_output == 0)
1329 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1330 
1331 		SCTP_TCB_UNLOCK(it->stcb);
1332 next_assoc:
1333 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1334 		if (it->stcb == NULL) {
1335 			/* Run last function */
1336 			if (it->function_inp_end != NULL) {
1337 				inp_skip = (*it->function_inp_end) (it->inp,
1338 				    it->pointer,
1339 				    it->val);
1340 			}
1341 		}
1342 	}
1343 	SCTP_INP_RUNLOCK(it->inp);
1344 no_stcb:
1345 	/* done with all assocs on this endpoint, move on to next endpoint */
1346 	it->done_current_ep = 0;
1347 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1348 		it->inp = NULL;
1349 	} else {
1350 		it->inp = LIST_NEXT(it->inp, sctp_list);
1351 	}
1352 	if (it->inp == NULL) {
1353 		goto done_with_iterator;
1354 	}
1355 	goto select_a_new_ep;
1356 }
1357 
1358 void
1359 sctp_iterator_worker(void)
1360 {
1361 	struct sctp_iterator *it, *nit;
1362 
1363 	/* This function is called with the WQ lock in place */
1364 
1365 	sctp_it_ctl.iterator_running = 1;
1366 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1367 		sctp_it_ctl.cur_it = it;
1368 		/* now lets work on this one */
1369 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1370 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1371 		CURVNET_SET(it->vn);
1372 		sctp_iterator_work(it);
1373 
1374 		CURVNET_RESTORE();
1375 		SCTP_IPI_ITERATOR_WQ_LOCK();
1376 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1377 			sctp_it_ctl.cur_it = NULL;
1378 			break;
1379 		}
1380 		/* sa_ignore FREED_MEMORY */
1381 	}
1382 	sctp_it_ctl.iterator_running = 0;
1383 	return;
1384 }
1385 
1386 
1387 static void
1388 sctp_handle_addr_wq(void)
1389 {
1390 	/* deal with the ADDR wq from the rtsock calls */
1391 	struct sctp_laddr *wi, *nwi;
1392 	struct sctp_asconf_iterator *asc;
1393 
1394 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1395 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1396 	if (asc == NULL) {
1397 		/* Try later, no memory */
1398 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1399 		    (struct sctp_inpcb *)NULL,
1400 		    (struct sctp_tcb *)NULL,
1401 		    (struct sctp_nets *)NULL);
1402 		return;
1403 	}
1404 	LIST_INIT(&asc->list_of_work);
1405 	asc->cnt = 0;
1406 
1407 	SCTP_WQ_ADDR_LOCK();
1408 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1409 		LIST_REMOVE(wi, sctp_nxt_addr);
1410 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1411 		asc->cnt++;
1412 	}
1413 	SCTP_WQ_ADDR_UNLOCK();
1414 
1415 	if (asc->cnt == 0) {
1416 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1417 	} else {
1418 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1419 		    sctp_asconf_iterator_stcb,
1420 		    NULL,	/* No ep end for boundall */
1421 		    SCTP_PCB_FLAGS_BOUNDALL,
1422 		    SCTP_PCB_ANY_FEATURES,
1423 		    SCTP_ASOC_ANY_STATE,
1424 		    (void *)asc, 0,
1425 		    sctp_asconf_iterator_end, NULL, 0);
1426 	}
1427 }
1428 
1429 int retcode = 0;
1430 int cur_oerr = 0;
1431 
1432 void
1433 sctp_timeout_handler(void *t)
1434 {
1435 	struct sctp_inpcb *inp;
1436 	struct sctp_tcb *stcb;
1437 	struct sctp_nets *net;
1438 	struct sctp_timer *tmr;
1439 
1440 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1441 	struct socket *so;
1442 
1443 #endif
1444 	int did_output, type;
1445 
1446 	tmr = (struct sctp_timer *)t;
1447 	inp = (struct sctp_inpcb *)tmr->ep;
1448 	stcb = (struct sctp_tcb *)tmr->tcb;
1449 	net = (struct sctp_nets *)tmr->net;
1450 	CURVNET_SET((struct vnet *)tmr->vnet);
1451 	did_output = 1;
1452 
1453 #ifdef SCTP_AUDITING_ENABLED
1454 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1455 	sctp_auditing(3, inp, stcb, net);
1456 #endif
1457 
1458 	/* sanity checks... */
1459 	if (tmr->self != (void *)tmr) {
1460 		/*
1461 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1462 		 * tmr);
1463 		 */
1464 		CURVNET_RESTORE();
1465 		return;
1466 	}
1467 	tmr->stopped_from = 0xa001;
1468 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1469 		/*
1470 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1471 		 * tmr->type);
1472 		 */
1473 		CURVNET_RESTORE();
1474 		return;
1475 	}
1476 	tmr->stopped_from = 0xa002;
1477 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1478 		CURVNET_RESTORE();
1479 		return;
1480 	}
1481 	/* if this is an iterator timeout, get the struct and clear inp */
1482 	tmr->stopped_from = 0xa003;
1483 	type = tmr->type;
1484 	if (inp) {
1485 		SCTP_INP_INCR_REF(inp);
1486 		if ((inp->sctp_socket == 0) &&
1487 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1488 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1489 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1490 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1491 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1492 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1493 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1494 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1495 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1496 		    ) {
1497 			SCTP_INP_DECR_REF(inp);
1498 			CURVNET_RESTORE();
1499 			return;
1500 		}
1501 	}
1502 	tmr->stopped_from = 0xa004;
1503 	if (stcb) {
1504 		atomic_add_int(&stcb->asoc.refcnt, 1);
1505 		if (stcb->asoc.state == 0) {
1506 			atomic_add_int(&stcb->asoc.refcnt, -1);
1507 			if (inp) {
1508 				SCTP_INP_DECR_REF(inp);
1509 			}
1510 			CURVNET_RESTORE();
1511 			return;
1512 		}
1513 	}
1514 	tmr->stopped_from = 0xa005;
1515 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1516 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1517 		if (inp) {
1518 			SCTP_INP_DECR_REF(inp);
1519 		}
1520 		if (stcb) {
1521 			atomic_add_int(&stcb->asoc.refcnt, -1);
1522 		}
1523 		CURVNET_RESTORE();
1524 		return;
1525 	}
1526 	tmr->stopped_from = 0xa006;
1527 
1528 	if (stcb) {
1529 		SCTP_TCB_LOCK(stcb);
1530 		atomic_add_int(&stcb->asoc.refcnt, -1);
1531 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1532 		    ((stcb->asoc.state == 0) ||
1533 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1534 			SCTP_TCB_UNLOCK(stcb);
1535 			if (inp) {
1536 				SCTP_INP_DECR_REF(inp);
1537 			}
1538 			CURVNET_RESTORE();
1539 			return;
1540 		}
1541 	}
1542 	/* record in stopped what t-o occured */
1543 	tmr->stopped_from = tmr->type;
1544 
1545 	/* mark as being serviced now */
1546 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1547 		/*
1548 		 * Callout has been rescheduled.
1549 		 */
1550 		goto get_out;
1551 	}
1552 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1553 		/*
1554 		 * Not active, so no action.
1555 		 */
1556 		goto get_out;
1557 	}
1558 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1559 
1560 	/* call the handler for the appropriate timer type */
1561 	switch (tmr->type) {
1562 	case SCTP_TIMER_TYPE_ZERO_COPY:
1563 		if (inp == NULL) {
1564 			break;
1565 		}
1566 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1567 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1568 		}
1569 		break;
1570 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1571 		if (inp == NULL) {
1572 			break;
1573 		}
1574 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1575 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1576 		}
1577 		break;
1578 	case SCTP_TIMER_TYPE_ADDR_WQ:
1579 		sctp_handle_addr_wq();
1580 		break;
1581 	case SCTP_TIMER_TYPE_SEND:
1582 		if ((stcb == NULL) || (inp == NULL)) {
1583 			break;
1584 		}
1585 		SCTP_STAT_INCR(sctps_timodata);
1586 		stcb->asoc.timodata++;
1587 		stcb->asoc.num_send_timers_up--;
1588 		if (stcb->asoc.num_send_timers_up < 0) {
1589 			stcb->asoc.num_send_timers_up = 0;
1590 		}
1591 		SCTP_TCB_LOCK_ASSERT(stcb);
1592 		cur_oerr = stcb->asoc.overall_error_count;
1593 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1594 		if (retcode) {
1595 			/* no need to unlock on tcb its gone */
1596 
1597 			goto out_decr;
1598 		}
1599 		SCTP_TCB_LOCK_ASSERT(stcb);
1600 #ifdef SCTP_AUDITING_ENABLED
1601 		sctp_auditing(4, inp, stcb, net);
1602 #endif
1603 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1604 		if ((stcb->asoc.num_send_timers_up == 0) &&
1605 		    (stcb->asoc.sent_queue_cnt > 0)) {
1606 			struct sctp_tmit_chunk *chk;
1607 
1608 			/*
1609 			 * safeguard. If there on some on the sent queue
1610 			 * somewhere but no timers running something is
1611 			 * wrong... so we start a timer on the first chunk
1612 			 * on the send queue on whatever net it is sent to.
1613 			 */
1614 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1615 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1616 			    chk->whoTo);
1617 		}
1618 		break;
1619 	case SCTP_TIMER_TYPE_INIT:
1620 		if ((stcb == NULL) || (inp == NULL)) {
1621 			break;
1622 		}
1623 		SCTP_STAT_INCR(sctps_timoinit);
1624 		stcb->asoc.timoinit++;
1625 		if (sctp_t1init_timer(inp, stcb, net)) {
1626 			/* no need to unlock on tcb its gone */
1627 			goto out_decr;
1628 		}
1629 		/* We do output but not here */
1630 		did_output = 0;
1631 		break;
1632 	case SCTP_TIMER_TYPE_RECV:
1633 		if ((stcb == NULL) || (inp == NULL)) {
1634 			break;
1635 		} {
1636 			SCTP_STAT_INCR(sctps_timosack);
1637 			stcb->asoc.timosack++;
1638 			sctp_send_sack(stcb);
1639 		}
1640 #ifdef SCTP_AUDITING_ENABLED
1641 		sctp_auditing(4, inp, stcb, net);
1642 #endif
1643 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1644 		break;
1645 	case SCTP_TIMER_TYPE_SHUTDOWN:
1646 		if ((stcb == NULL) || (inp == NULL)) {
1647 			break;
1648 		}
1649 		if (sctp_shutdown_timer(inp, stcb, net)) {
1650 			/* no need to unlock on tcb its gone */
1651 			goto out_decr;
1652 		}
1653 		SCTP_STAT_INCR(sctps_timoshutdown);
1654 		stcb->asoc.timoshutdown++;
1655 #ifdef SCTP_AUDITING_ENABLED
1656 		sctp_auditing(4, inp, stcb, net);
1657 #endif
1658 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1659 		break;
1660 	case SCTP_TIMER_TYPE_HEARTBEAT:
1661 		{
1662 			struct sctp_nets *lnet;
1663 			int cnt_of_unconf = 0;
1664 
1665 			if ((stcb == NULL) || (inp == NULL)) {
1666 				break;
1667 			}
1668 			SCTP_STAT_INCR(sctps_timoheartbeat);
1669 			stcb->asoc.timoheartbeat++;
1670 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1671 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1672 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1673 					cnt_of_unconf++;
1674 				}
1675 			}
1676 			if (cnt_of_unconf == 0) {
1677 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1678 				    cnt_of_unconf)) {
1679 					/* no need to unlock on tcb its gone */
1680 					goto out_decr;
1681 				}
1682 			}
1683 #ifdef SCTP_AUDITING_ENABLED
1684 			sctp_auditing(4, inp, stcb, lnet);
1685 #endif
1686 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1687 			    stcb->sctp_ep, stcb, lnet);
1688 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1689 		}
1690 		break;
1691 	case SCTP_TIMER_TYPE_COOKIE:
1692 		if ((stcb == NULL) || (inp == NULL)) {
1693 			break;
1694 		}
1695 		if (sctp_cookie_timer(inp, stcb, net)) {
1696 			/* no need to unlock on tcb its gone */
1697 			goto out_decr;
1698 		}
1699 		SCTP_STAT_INCR(sctps_timocookie);
1700 		stcb->asoc.timocookie++;
1701 #ifdef SCTP_AUDITING_ENABLED
1702 		sctp_auditing(4, inp, stcb, net);
1703 #endif
1704 		/*
1705 		 * We consider T3 and Cookie timer pretty much the same with
1706 		 * respect to where from in chunk_output.
1707 		 */
1708 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1709 		break;
1710 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1711 		{
1712 			struct timeval tv;
1713 			int i, secret;
1714 
1715 			if (inp == NULL) {
1716 				break;
1717 			}
1718 			SCTP_STAT_INCR(sctps_timosecret);
1719 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1720 			SCTP_INP_WLOCK(inp);
1721 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1722 			inp->sctp_ep.last_secret_number =
1723 			    inp->sctp_ep.current_secret_number;
1724 			inp->sctp_ep.current_secret_number++;
1725 			if (inp->sctp_ep.current_secret_number >=
1726 			    SCTP_HOW_MANY_SECRETS) {
1727 				inp->sctp_ep.current_secret_number = 0;
1728 			}
1729 			secret = (int)inp->sctp_ep.current_secret_number;
1730 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1731 				inp->sctp_ep.secret_key[secret][i] =
1732 				    sctp_select_initial_TSN(&inp->sctp_ep);
1733 			}
1734 			SCTP_INP_WUNLOCK(inp);
1735 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1736 		}
1737 		did_output = 0;
1738 		break;
1739 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1740 		if ((stcb == NULL) || (inp == NULL)) {
1741 			break;
1742 		}
1743 		SCTP_STAT_INCR(sctps_timopathmtu);
1744 		sctp_pathmtu_timer(inp, stcb, net);
1745 		did_output = 0;
1746 		break;
1747 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1748 		if ((stcb == NULL) || (inp == NULL)) {
1749 			break;
1750 		}
1751 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1752 			/* no need to unlock on tcb its gone */
1753 			goto out_decr;
1754 		}
1755 		SCTP_STAT_INCR(sctps_timoshutdownack);
1756 		stcb->asoc.timoshutdownack++;
1757 #ifdef SCTP_AUDITING_ENABLED
1758 		sctp_auditing(4, inp, stcb, net);
1759 #endif
1760 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1761 		break;
1762 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1763 		if ((stcb == NULL) || (inp == NULL)) {
1764 			break;
1765 		}
1766 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1767 		sctp_abort_an_association(inp, stcb,
1768 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1769 		/* no need to unlock on tcb its gone */
1770 		goto out_decr;
1771 
1772 	case SCTP_TIMER_TYPE_STRRESET:
1773 		if ((stcb == NULL) || (inp == NULL)) {
1774 			break;
1775 		}
1776 		if (sctp_strreset_timer(inp, stcb, net)) {
1777 			/* no need to unlock on tcb its gone */
1778 			goto out_decr;
1779 		}
1780 		SCTP_STAT_INCR(sctps_timostrmrst);
1781 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1782 		break;
1783 	case SCTP_TIMER_TYPE_EARLYFR:
1784 		/* Need to do FR of things for net */
1785 		if ((stcb == NULL) || (inp == NULL)) {
1786 			break;
1787 		}
1788 		SCTP_STAT_INCR(sctps_timoearlyfr);
1789 		sctp_early_fr_timer(inp, stcb, net);
1790 		break;
1791 	case SCTP_TIMER_TYPE_ASCONF:
1792 		if ((stcb == NULL) || (inp == NULL)) {
1793 			break;
1794 		}
1795 		if (sctp_asconf_timer(inp, stcb, net)) {
1796 			/* no need to unlock on tcb its gone */
1797 			goto out_decr;
1798 		}
1799 		SCTP_STAT_INCR(sctps_timoasconf);
1800 #ifdef SCTP_AUDITING_ENABLED
1801 		sctp_auditing(4, inp, stcb, net);
1802 #endif
1803 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1804 		break;
1805 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1806 		if ((stcb == NULL) || (inp == NULL)) {
1807 			break;
1808 		}
1809 		sctp_delete_prim_timer(inp, stcb, net);
1810 		SCTP_STAT_INCR(sctps_timodelprim);
1811 		break;
1812 
1813 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1814 		if ((stcb == NULL) || (inp == NULL)) {
1815 			break;
1816 		}
1817 		SCTP_STAT_INCR(sctps_timoautoclose);
1818 		sctp_autoclose_timer(inp, stcb, net);
1819 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1820 		did_output = 0;
1821 		break;
1822 	case SCTP_TIMER_TYPE_ASOCKILL:
1823 		if ((stcb == NULL) || (inp == NULL)) {
1824 			break;
1825 		}
1826 		SCTP_STAT_INCR(sctps_timoassockill);
1827 		/* Can we free it yet? */
1828 		SCTP_INP_DECR_REF(inp);
1829 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1830 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1831 		so = SCTP_INP_SO(inp);
1832 		atomic_add_int(&stcb->asoc.refcnt, 1);
1833 		SCTP_TCB_UNLOCK(stcb);
1834 		SCTP_SOCKET_LOCK(so, 1);
1835 		SCTP_TCB_LOCK(stcb);
1836 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1837 #endif
1838 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1839 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1840 		SCTP_SOCKET_UNLOCK(so, 1);
1841 #endif
1842 		/*
1843 		 * free asoc, always unlocks (or destroy's) so prevent
1844 		 * duplicate unlock or unlock of a free mtx :-0
1845 		 */
1846 		stcb = NULL;
1847 		goto out_no_decr;
1848 	case SCTP_TIMER_TYPE_INPKILL:
1849 		SCTP_STAT_INCR(sctps_timoinpkill);
1850 		if (inp == NULL) {
1851 			break;
1852 		}
1853 		/*
1854 		 * special case, take away our increment since WE are the
1855 		 * killer
1856 		 */
1857 		SCTP_INP_DECR_REF(inp);
1858 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1859 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1860 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1861 		inp = NULL;
1862 		goto out_no_decr;
1863 	default:
1864 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1865 		    tmr->type);
1866 		break;
1867 	};
1868 #ifdef SCTP_AUDITING_ENABLED
1869 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1870 	if (inp)
1871 		sctp_auditing(5, inp, stcb, net);
1872 #endif
1873 	if ((did_output) && stcb) {
1874 		/*
1875 		 * Now we need to clean up the control chunk chain if an
1876 		 * ECNE is on it. It must be marked as UNSENT again so next
1877 		 * call will continue to send it until such time that we get
1878 		 * a CWR, to remove it. It is, however, less likely that we
1879 		 * will find a ecn echo on the chain though.
1880 		 */
1881 		sctp_fix_ecn_echo(&stcb->asoc);
1882 	}
1883 get_out:
1884 	if (stcb) {
1885 		SCTP_TCB_UNLOCK(stcb);
1886 	}
1887 out_decr:
1888 	if (inp) {
1889 		SCTP_INP_DECR_REF(inp);
1890 	}
1891 out_no_decr:
1892 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1893 	    type);
1894 	CURVNET_RESTORE();
1895 }
1896 
1897 void
1898 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1899     struct sctp_nets *net)
1900 {
1901 	int to_ticks;
1902 	struct sctp_timer *tmr;
1903 
1904 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1905 		return;
1906 
1907 	to_ticks = 0;
1908 
1909 	tmr = NULL;
1910 	if (stcb) {
1911 		SCTP_TCB_LOCK_ASSERT(stcb);
1912 	}
1913 	switch (t_type) {
1914 	case SCTP_TIMER_TYPE_ZERO_COPY:
1915 		tmr = &inp->sctp_ep.zero_copy_timer;
1916 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1917 		break;
1918 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1919 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1920 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1921 		break;
1922 	case SCTP_TIMER_TYPE_ADDR_WQ:
1923 		/* Only 1 tick away :-) */
1924 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1925 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1926 		break;
1927 	case SCTP_TIMER_TYPE_SEND:
1928 		/* Here we use the RTO timer */
1929 		{
1930 			int rto_val;
1931 
1932 			if ((stcb == NULL) || (net == NULL)) {
1933 				return;
1934 			}
1935 			tmr = &net->rxt_timer;
1936 			if (net->RTO == 0) {
1937 				rto_val = stcb->asoc.initial_rto;
1938 			} else {
1939 				rto_val = net->RTO;
1940 			}
1941 			to_ticks = MSEC_TO_TICKS(rto_val);
1942 		}
1943 		break;
1944 	case SCTP_TIMER_TYPE_INIT:
1945 		/*
1946 		 * Here we use the INIT timer default usually about 1
1947 		 * minute.
1948 		 */
1949 		if ((stcb == NULL) || (net == NULL)) {
1950 			return;
1951 		}
1952 		tmr = &net->rxt_timer;
1953 		if (net->RTO == 0) {
1954 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1955 		} else {
1956 			to_ticks = MSEC_TO_TICKS(net->RTO);
1957 		}
1958 		break;
1959 	case SCTP_TIMER_TYPE_RECV:
1960 		/*
1961 		 * Here we use the Delayed-Ack timer value from the inp
1962 		 * ususually about 200ms.
1963 		 */
1964 		if (stcb == NULL) {
1965 			return;
1966 		}
1967 		tmr = &stcb->asoc.dack_timer;
1968 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1969 		break;
1970 	case SCTP_TIMER_TYPE_SHUTDOWN:
1971 		/* Here we use the RTO of the destination. */
1972 		if ((stcb == NULL) || (net == NULL)) {
1973 			return;
1974 		}
1975 		if (net->RTO == 0) {
1976 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1977 		} else {
1978 			to_ticks = MSEC_TO_TICKS(net->RTO);
1979 		}
1980 		tmr = &net->rxt_timer;
1981 		break;
1982 	case SCTP_TIMER_TYPE_HEARTBEAT:
1983 		/*
1984 		 * the net is used here so that we can add in the RTO. Even
1985 		 * though we use a different timer. We also add the HB timer
1986 		 * PLUS a random jitter.
1987 		 */
1988 		if ((inp == NULL) || (stcb == NULL)) {
1989 			return;
1990 		} else {
1991 			uint32_t rndval;
1992 			uint8_t this_random;
1993 			int cnt_of_unconf = 0;
1994 			struct sctp_nets *lnet;
1995 
1996 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1997 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1998 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1999 					cnt_of_unconf++;
2000 				}
2001 			}
2002 			if (cnt_of_unconf) {
2003 				net = lnet = NULL;
2004 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2005 			}
2006 			if (stcb->asoc.hb_random_idx > 3) {
2007 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2008 				memcpy(stcb->asoc.hb_random_values, &rndval,
2009 				    sizeof(stcb->asoc.hb_random_values));
2010 				stcb->asoc.hb_random_idx = 0;
2011 			}
2012 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2013 			stcb->asoc.hb_random_idx++;
2014 			stcb->asoc.hb_ect_randombit = 0;
2015 			/*
2016 			 * this_random will be 0 - 256 ms RTO is in ms.
2017 			 */
2018 			if ((stcb->asoc.hb_is_disabled) &&
2019 			    (cnt_of_unconf == 0)) {
2020 				return;
2021 			}
2022 			if (net) {
2023 				int delay;
2024 
2025 				delay = stcb->asoc.heart_beat_delay;
2026 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2027 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2028 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2029 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2030 						delay = 0;
2031 					}
2032 				}
2033 				if (net->RTO == 0) {
2034 					/* Never been checked */
2035 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2036 				} else {
2037 					/* set rto_val to the ms */
2038 					to_ticks = delay + net->RTO + this_random;
2039 				}
2040 			} else {
2041 				if (cnt_of_unconf) {
2042 					to_ticks = this_random + stcb->asoc.initial_rto;
2043 				} else {
2044 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2045 				}
2046 			}
2047 			/*
2048 			 * Now we must convert the to_ticks that are now in
2049 			 * ms to ticks.
2050 			 */
2051 			to_ticks = MSEC_TO_TICKS(to_ticks);
2052 			tmr = &stcb->asoc.hb_timer;
2053 		}
2054 		break;
2055 	case SCTP_TIMER_TYPE_COOKIE:
2056 		/*
2057 		 * Here we can use the RTO timer from the network since one
2058 		 * RTT was compelete. If a retran happened then we will be
2059 		 * using the RTO initial value.
2060 		 */
2061 		if ((stcb == NULL) || (net == NULL)) {
2062 			return;
2063 		}
2064 		if (net->RTO == 0) {
2065 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2066 		} else {
2067 			to_ticks = MSEC_TO_TICKS(net->RTO);
2068 		}
2069 		tmr = &net->rxt_timer;
2070 		break;
2071 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2072 		/*
2073 		 * nothing needed but the endpoint here ususually about 60
2074 		 * minutes.
2075 		 */
2076 		if (inp == NULL) {
2077 			return;
2078 		}
2079 		tmr = &inp->sctp_ep.signature_change;
2080 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2081 		break;
2082 	case SCTP_TIMER_TYPE_ASOCKILL:
2083 		if (stcb == NULL) {
2084 			return;
2085 		}
2086 		tmr = &stcb->asoc.strreset_timer;
2087 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2088 		break;
2089 	case SCTP_TIMER_TYPE_INPKILL:
2090 		/*
2091 		 * The inp is setup to die. We re-use the signature_chage
2092 		 * timer since that has stopped and we are in the GONE
2093 		 * state.
2094 		 */
2095 		if (inp == NULL) {
2096 			return;
2097 		}
2098 		tmr = &inp->sctp_ep.signature_change;
2099 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2100 		break;
2101 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2102 		/*
2103 		 * Here we use the value found in the EP for PMTU ususually
2104 		 * about 10 minutes.
2105 		 */
2106 		if ((stcb == NULL) || (inp == NULL)) {
2107 			return;
2108 		}
2109 		if (net == NULL) {
2110 			return;
2111 		}
2112 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2113 		tmr = &net->pmtu_timer;
2114 		break;
2115 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2116 		/* Here we use the RTO of the destination */
2117 		if ((stcb == NULL) || (net == NULL)) {
2118 			return;
2119 		}
2120 		if (net->RTO == 0) {
2121 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2122 		} else {
2123 			to_ticks = MSEC_TO_TICKS(net->RTO);
2124 		}
2125 		tmr = &net->rxt_timer;
2126 		break;
2127 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2128 		/*
2129 		 * Here we use the endpoints shutdown guard timer usually
2130 		 * about 3 minutes.
2131 		 */
2132 		if ((inp == NULL) || (stcb == NULL)) {
2133 			return;
2134 		}
2135 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2136 		tmr = &stcb->asoc.shut_guard_timer;
2137 		break;
2138 	case SCTP_TIMER_TYPE_STRRESET:
2139 		/*
2140 		 * Here the timer comes from the stcb but its value is from
2141 		 * the net's RTO.
2142 		 */
2143 		if ((stcb == NULL) || (net == NULL)) {
2144 			return;
2145 		}
2146 		if (net->RTO == 0) {
2147 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2148 		} else {
2149 			to_ticks = MSEC_TO_TICKS(net->RTO);
2150 		}
2151 		tmr = &stcb->asoc.strreset_timer;
2152 		break;
2153 
2154 	case SCTP_TIMER_TYPE_EARLYFR:
2155 		{
2156 			unsigned int msec;
2157 
2158 			if ((stcb == NULL) || (net == NULL)) {
2159 				return;
2160 			}
2161 			if (net->flight_size > net->cwnd) {
2162 				/* no need to start */
2163 				return;
2164 			}
2165 			SCTP_STAT_INCR(sctps_earlyfrstart);
2166 			if (net->lastsa == 0) {
2167 				/* Hmm no rtt estimate yet? */
2168 				msec = stcb->asoc.initial_rto >> 2;
2169 			} else {
2170 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2171 			}
2172 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2173 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2174 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2175 					msec = SCTP_MINFR_MSEC_FLOOR;
2176 				}
2177 			}
2178 			to_ticks = MSEC_TO_TICKS(msec);
2179 			tmr = &net->fr_timer;
2180 		}
2181 		break;
2182 	case SCTP_TIMER_TYPE_ASCONF:
2183 		/*
2184 		 * Here the timer comes from the stcb but its value is from
2185 		 * the net's RTO.
2186 		 */
2187 		if ((stcb == NULL) || (net == NULL)) {
2188 			return;
2189 		}
2190 		if (net->RTO == 0) {
2191 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2192 		} else {
2193 			to_ticks = MSEC_TO_TICKS(net->RTO);
2194 		}
2195 		tmr = &stcb->asoc.asconf_timer;
2196 		break;
2197 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2198 		if ((stcb == NULL) || (net != NULL)) {
2199 			return;
2200 		}
2201 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2202 		tmr = &stcb->asoc.delete_prim_timer;
2203 		break;
2204 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2205 		if (stcb == NULL) {
2206 			return;
2207 		}
2208 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2209 			/*
2210 			 * Really an error since stcb is NOT set to
2211 			 * autoclose
2212 			 */
2213 			return;
2214 		}
2215 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2216 		tmr = &stcb->asoc.autoclose_timer;
2217 		break;
2218 	default:
2219 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2220 		    __FUNCTION__, t_type);
2221 		return;
2222 		break;
2223 	};
2224 	if ((to_ticks <= 0) || (tmr == NULL)) {
2225 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2226 		    __FUNCTION__, t_type, to_ticks, tmr);
2227 		return;
2228 	}
2229 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2230 		/*
2231 		 * we do NOT allow you to have it already running. if it is
2232 		 * we leave the current one up unchanged
2233 		 */
2234 		return;
2235 	}
2236 	/* At this point we can proceed */
2237 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2238 		stcb->asoc.num_send_timers_up++;
2239 	}
2240 	tmr->stopped_from = 0;
2241 	tmr->type = t_type;
2242 	tmr->ep = (void *)inp;
2243 	tmr->tcb = (void *)stcb;
2244 	tmr->net = (void *)net;
2245 	tmr->self = (void *)tmr;
2246 	tmr->vnet = (void *)curvnet;
2247 	tmr->ticks = sctp_get_tick_count();
2248 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2249 	return;
2250 }
2251 
2252 void
2253 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2254     struct sctp_nets *net, uint32_t from)
2255 {
2256 	struct sctp_timer *tmr;
2257 
2258 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2259 	    (inp == NULL))
2260 		return;
2261 
2262 	tmr = NULL;
2263 	if (stcb) {
2264 		SCTP_TCB_LOCK_ASSERT(stcb);
2265 	}
2266 	switch (t_type) {
2267 	case SCTP_TIMER_TYPE_ZERO_COPY:
2268 		tmr = &inp->sctp_ep.zero_copy_timer;
2269 		break;
2270 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2271 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2272 		break;
2273 	case SCTP_TIMER_TYPE_ADDR_WQ:
2274 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2275 		break;
2276 	case SCTP_TIMER_TYPE_EARLYFR:
2277 		if ((stcb == NULL) || (net == NULL)) {
2278 			return;
2279 		}
2280 		tmr = &net->fr_timer;
2281 		SCTP_STAT_INCR(sctps_earlyfrstop);
2282 		break;
2283 	case SCTP_TIMER_TYPE_SEND:
2284 		if ((stcb == NULL) || (net == NULL)) {
2285 			return;
2286 		}
2287 		tmr = &net->rxt_timer;
2288 		break;
2289 	case SCTP_TIMER_TYPE_INIT:
2290 		if ((stcb == NULL) || (net == NULL)) {
2291 			return;
2292 		}
2293 		tmr = &net->rxt_timer;
2294 		break;
2295 	case SCTP_TIMER_TYPE_RECV:
2296 		if (stcb == NULL) {
2297 			return;
2298 		}
2299 		tmr = &stcb->asoc.dack_timer;
2300 		break;
2301 	case SCTP_TIMER_TYPE_SHUTDOWN:
2302 		if ((stcb == NULL) || (net == NULL)) {
2303 			return;
2304 		}
2305 		tmr = &net->rxt_timer;
2306 		break;
2307 	case SCTP_TIMER_TYPE_HEARTBEAT:
2308 		if (stcb == NULL) {
2309 			return;
2310 		}
2311 		tmr = &stcb->asoc.hb_timer;
2312 		break;
2313 	case SCTP_TIMER_TYPE_COOKIE:
2314 		if ((stcb == NULL) || (net == NULL)) {
2315 			return;
2316 		}
2317 		tmr = &net->rxt_timer;
2318 		break;
2319 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2320 		/* nothing needed but the endpoint here */
2321 		tmr = &inp->sctp_ep.signature_change;
2322 		/*
2323 		 * We re-use the newcookie timer for the INP kill timer. We
2324 		 * must assure that we do not kill it by accident.
2325 		 */
2326 		break;
2327 	case SCTP_TIMER_TYPE_ASOCKILL:
2328 		/*
2329 		 * Stop the asoc kill timer.
2330 		 */
2331 		if (stcb == NULL) {
2332 			return;
2333 		}
2334 		tmr = &stcb->asoc.strreset_timer;
2335 		break;
2336 
2337 	case SCTP_TIMER_TYPE_INPKILL:
2338 		/*
2339 		 * The inp is setup to die. We re-use the signature_chage
2340 		 * timer since that has stopped and we are in the GONE
2341 		 * state.
2342 		 */
2343 		tmr = &inp->sctp_ep.signature_change;
2344 		break;
2345 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2346 		if ((stcb == NULL) || (net == NULL)) {
2347 			return;
2348 		}
2349 		tmr = &net->pmtu_timer;
2350 		break;
2351 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2352 		if ((stcb == NULL) || (net == NULL)) {
2353 			return;
2354 		}
2355 		tmr = &net->rxt_timer;
2356 		break;
2357 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2358 		if (stcb == NULL) {
2359 			return;
2360 		}
2361 		tmr = &stcb->asoc.shut_guard_timer;
2362 		break;
2363 	case SCTP_TIMER_TYPE_STRRESET:
2364 		if (stcb == NULL) {
2365 			return;
2366 		}
2367 		tmr = &stcb->asoc.strreset_timer;
2368 		break;
2369 	case SCTP_TIMER_TYPE_ASCONF:
2370 		if (stcb == NULL) {
2371 			return;
2372 		}
2373 		tmr = &stcb->asoc.asconf_timer;
2374 		break;
2375 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2376 		if (stcb == NULL) {
2377 			return;
2378 		}
2379 		tmr = &stcb->asoc.delete_prim_timer;
2380 		break;
2381 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2382 		if (stcb == NULL) {
2383 			return;
2384 		}
2385 		tmr = &stcb->asoc.autoclose_timer;
2386 		break;
2387 	default:
2388 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2389 		    __FUNCTION__, t_type);
2390 		break;
2391 	};
2392 	if (tmr == NULL) {
2393 		return;
2394 	}
2395 	if ((tmr->type != t_type) && tmr->type) {
2396 		/*
2397 		 * Ok we have a timer that is under joint use. Cookie timer
2398 		 * per chance with the SEND timer. We therefore are NOT
2399 		 * running the timer that the caller wants stopped.  So just
2400 		 * return.
2401 		 */
2402 		return;
2403 	}
2404 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2405 		stcb->asoc.num_send_timers_up--;
2406 		if (stcb->asoc.num_send_timers_up < 0) {
2407 			stcb->asoc.num_send_timers_up = 0;
2408 		}
2409 	}
2410 	tmr->self = NULL;
2411 	tmr->stopped_from = from;
2412 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2413 	return;
2414 }
2415 
2416 uint32_t
2417 sctp_calculate_len(struct mbuf *m)
2418 {
2419 	uint32_t tlen = 0;
2420 	struct mbuf *at;
2421 
2422 	at = m;
2423 	while (at) {
2424 		tlen += SCTP_BUF_LEN(at);
2425 		at = SCTP_BUF_NEXT(at);
2426 	}
2427 	return (tlen);
2428 }
2429 
2430 void
2431 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2432     struct sctp_association *asoc, uint32_t mtu)
2433 {
2434 	/*
2435 	 * Reset the P-MTU size on this association, this involves changing
2436 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2437 	 * allow the DF flag to be cleared.
2438 	 */
2439 	struct sctp_tmit_chunk *chk;
2440 	unsigned int eff_mtu, ovh;
2441 
2442 	asoc->smallest_mtu = mtu;
2443 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2444 		ovh = SCTP_MIN_OVERHEAD;
2445 	} else {
2446 		ovh = SCTP_MIN_V4_OVERHEAD;
2447 	}
2448 	eff_mtu = mtu - ovh;
2449 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2450 		if (chk->send_size > eff_mtu) {
2451 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2452 		}
2453 	}
2454 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2455 		if (chk->send_size > eff_mtu) {
2456 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2457 		}
2458 	}
2459 }
2460 
2461 
2462 /*
2463  * given an association and starting time of the current RTT period return
2464  * RTO in number of msecs net should point to the current network
2465  */
2466 
2467 uint32_t
2468 sctp_calculate_rto(struct sctp_tcb *stcb,
2469     struct sctp_association *asoc,
2470     struct sctp_nets *net,
2471     struct timeval *told,
2472     int safe, int local_lan_determine)
2473 {
2474 	/*-
2475 	 * given an association and the starting time of the current RTT
2476 	 * period (in value1/value2) return RTO in number of msecs.
2477 	 */
2478 	int32_t rtt;		/* RTT in ms */
2479 	uint32_t new_rto;
2480 	int first_measure = 0;
2481 	struct timeval now, then, *old;
2482 
2483 	/* Copy it out for sparc64 */
2484 	if (safe == sctp_align_unsafe_makecopy) {
2485 		old = &then;
2486 		memcpy(&then, told, sizeof(struct timeval));
2487 	} else if (safe == sctp_align_safe_nocopy) {
2488 		old = told;
2489 	} else {
2490 		/* error */
2491 		SCTP_PRINTF("Huh, bad rto calc call\n");
2492 		return (0);
2493 	}
2494 	/************************/
2495 	/* 1. calculate new RTT */
2496 	/************************/
2497 	/* get the current time */
2498 	if (stcb->asoc.use_precise_time) {
2499 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2500 	} else {
2501 		(void)SCTP_GETTIME_TIMEVAL(&now);
2502 	}
2503 	timevalsub(&now, old);
2504 	/* store the current RTT in us */
2505 	net->rtt = (uint64_t) 10000000 *(uint64_t) now.tv_sec +
2506 	         (uint64_t) now.tv_usec;
2507 
2508 	/* computer rtt in ms */
2509 	rtt = net->rtt / 1000;
2510 
2511 	/* Do we need to determine the lan type? */
2512 	if ((local_lan_determine == SCTP_DETERMINE_LL_OK) &&
2513 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2514 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2515 			net->lan_type = SCTP_LAN_INTERNET;
2516 		} else {
2517 			net->lan_type = SCTP_LAN_LOCAL;
2518 		}
2519 	}
2520 	/***************************/
2521 	/* 2. update RTTVAR & SRTT */
2522 	/***************************/
2523 	/*-
2524 	 * Compute the scaled average lastsa and the
2525 	 * scaled variance lastsv as described in van Jacobson
2526 	 * Paper "Congestion Avoidance and Control", Annex A.
2527 	 *
2528 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2529 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2530 	 */
2531 	if (net->RTO_measured) {
2532 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2533 		net->lastsa += rtt;
2534 		if (rtt < 0) {
2535 			rtt = -rtt;
2536 		}
2537 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2538 		net->lastsv += rtt;
2539 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2540 			rto_logging(net, SCTP_LOG_RTTVAR);
2541 		}
2542 	} else {
2543 		/* First RTO measurment */
2544 		net->RTO_measured = 1;
2545 		first_measure = 1;
2546 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2547 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2548 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2549 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2550 		}
2551 	}
2552 	if (net->lastsv == 0) {
2553 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2554 	}
2555 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2556 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2557 	    (stcb->asoc.sat_network_lockout == 0)) {
2558 		stcb->asoc.sat_network = 1;
2559 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2560 		stcb->asoc.sat_network = 0;
2561 		stcb->asoc.sat_network_lockout = 1;
2562 	}
2563 	/* bound it, per C6/C7 in Section 5.3.1 */
2564 	if (new_rto < stcb->asoc.minrto) {
2565 		new_rto = stcb->asoc.minrto;
2566 	}
2567 	if (new_rto > stcb->asoc.maxrto) {
2568 		new_rto = stcb->asoc.maxrto;
2569 	}
2570 	/* we are now returning the RTO */
2571 	return (new_rto);
2572 }
2573 
2574 /*
2575  * return a pointer to a contiguous piece of data from the given mbuf chain
2576  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2577  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2578  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2579  */
2580 caddr_t
2581 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2582 {
2583 	uint32_t count;
2584 	uint8_t *ptr;
2585 
2586 	ptr = in_ptr;
2587 	if ((off < 0) || (len <= 0))
2588 		return (NULL);
2589 
2590 	/* find the desired start location */
2591 	while ((m != NULL) && (off > 0)) {
2592 		if (off < SCTP_BUF_LEN(m))
2593 			break;
2594 		off -= SCTP_BUF_LEN(m);
2595 		m = SCTP_BUF_NEXT(m);
2596 	}
2597 	if (m == NULL)
2598 		return (NULL);
2599 
2600 	/* is the current mbuf large enough (eg. contiguous)? */
2601 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2602 		return (mtod(m, caddr_t)+off);
2603 	} else {
2604 		/* else, it spans more than one mbuf, so save a temp copy... */
2605 		while ((m != NULL) && (len > 0)) {
2606 			count = min(SCTP_BUF_LEN(m) - off, len);
2607 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2608 			len -= count;
2609 			ptr += count;
2610 			off = 0;
2611 			m = SCTP_BUF_NEXT(m);
2612 		}
2613 		if ((m == NULL) && (len > 0))
2614 			return (NULL);
2615 		else
2616 			return ((caddr_t)in_ptr);
2617 	}
2618 }
2619 
2620 
2621 
2622 struct sctp_paramhdr *
2623 sctp_get_next_param(struct mbuf *m,
2624     int offset,
2625     struct sctp_paramhdr *pull,
2626     int pull_limit)
2627 {
2628 	/* This just provides a typed signature to Peter's Pull routine */
2629 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2630 	    (uint8_t *) pull));
2631 }
2632 
2633 
2634 int
2635 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2636 {
2637 	/*
2638 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2639 	 * padlen is > 3 this routine will fail.
2640 	 */
2641 	uint8_t *dp;
2642 	int i;
2643 
2644 	if (padlen > 3) {
2645 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2646 		return (ENOBUFS);
2647 	}
2648 	if (padlen <= M_TRAILINGSPACE(m)) {
2649 		/*
2650 		 * The easy way. We hope the majority of the time we hit
2651 		 * here :)
2652 		 */
2653 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2654 		SCTP_BUF_LEN(m) += padlen;
2655 	} else {
2656 		/* Hard way we must grow the mbuf */
2657 		struct mbuf *tmp;
2658 
2659 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2660 		if (tmp == NULL) {
2661 			/* Out of space GAK! we are in big trouble. */
2662 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2663 			return (ENOSPC);
2664 		}
2665 		/* setup and insert in middle */
2666 		SCTP_BUF_LEN(tmp) = padlen;
2667 		SCTP_BUF_NEXT(tmp) = NULL;
2668 		SCTP_BUF_NEXT(m) = tmp;
2669 		dp = mtod(tmp, uint8_t *);
2670 	}
2671 	/* zero out the pad */
2672 	for (i = 0; i < padlen; i++) {
2673 		*dp = 0;
2674 		dp++;
2675 	}
2676 	return (0);
2677 }
2678 
2679 int
2680 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2681 {
2682 	/* find the last mbuf in chain and pad it */
2683 	struct mbuf *m_at;
2684 
2685 	m_at = m;
2686 	if (last_mbuf) {
2687 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2688 	} else {
2689 		while (m_at) {
2690 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2691 				return (sctp_add_pad_tombuf(m_at, padval));
2692 			}
2693 			m_at = SCTP_BUF_NEXT(m_at);
2694 		}
2695 	}
2696 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2697 	return (EFAULT);
2698 }
2699 
2700 static void
2701 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2702     uint32_t error, void *data, int so_locked
2703 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2704     SCTP_UNUSED
2705 #endif
2706 )
2707 {
2708 	struct mbuf *m_notify;
2709 	struct sctp_assoc_change *sac;
2710 	struct sctp_queued_to_read *control;
2711 
2712 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2713 	struct socket *so;
2714 
2715 #endif
2716 
2717 	/*
2718 	 * For TCP model AND UDP connected sockets we will send an error up
2719 	 * when an ABORT comes in.
2720 	 */
2721 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2722 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2723 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2724 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2725 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2726 			stcb->sctp_socket->so_error = ECONNREFUSED;
2727 		} else {
2728 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2729 			stcb->sctp_socket->so_error = ECONNRESET;
2730 		}
2731 		/* Wake ANY sleepers */
2732 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2733 		so = SCTP_INP_SO(stcb->sctp_ep);
2734 		if (!so_locked) {
2735 			atomic_add_int(&stcb->asoc.refcnt, 1);
2736 			SCTP_TCB_UNLOCK(stcb);
2737 			SCTP_SOCKET_LOCK(so, 1);
2738 			SCTP_TCB_LOCK(stcb);
2739 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2740 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2741 				SCTP_SOCKET_UNLOCK(so, 1);
2742 				return;
2743 			}
2744 		}
2745 #endif
2746 		socantrcvmore(stcb->sctp_socket);
2747 		sorwakeup(stcb->sctp_socket);
2748 		sowwakeup(stcb->sctp_socket);
2749 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2750 		if (!so_locked) {
2751 			SCTP_SOCKET_UNLOCK(so, 1);
2752 		}
2753 #endif
2754 	}
2755 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2756 		/* event not enabled */
2757 		return;
2758 	}
2759 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2760 	if (m_notify == NULL)
2761 		/* no space left */
2762 		return;
2763 	SCTP_BUF_LEN(m_notify) = 0;
2764 
2765 	sac = mtod(m_notify, struct sctp_assoc_change *);
2766 	sac->sac_type = SCTP_ASSOC_CHANGE;
2767 	sac->sac_flags = 0;
2768 	sac->sac_length = sizeof(struct sctp_assoc_change);
2769 	sac->sac_state = event;
2770 	sac->sac_error = error;
2771 	/* XXX verify these stream counts */
2772 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2773 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2774 	sac->sac_assoc_id = sctp_get_associd(stcb);
2775 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2776 	SCTP_BUF_NEXT(m_notify) = NULL;
2777 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2778 	    0, 0, 0, 0, 0, 0,
2779 	    m_notify);
2780 	if (control == NULL) {
2781 		/* no memory */
2782 		sctp_m_freem(m_notify);
2783 		return;
2784 	}
2785 	control->length = SCTP_BUF_LEN(m_notify);
2786 	/* not that we need this */
2787 	control->tail_mbuf = m_notify;
2788 	control->spec_flags = M_NOTIFICATION;
2789 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2790 	    control,
2791 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2792 	    so_locked);
2793 	if (event == SCTP_COMM_LOST) {
2794 		/* Wake up any sleeper */
2795 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2796 		so = SCTP_INP_SO(stcb->sctp_ep);
2797 		if (!so_locked) {
2798 			atomic_add_int(&stcb->asoc.refcnt, 1);
2799 			SCTP_TCB_UNLOCK(stcb);
2800 			SCTP_SOCKET_LOCK(so, 1);
2801 			SCTP_TCB_LOCK(stcb);
2802 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2803 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2804 				SCTP_SOCKET_UNLOCK(so, 1);
2805 				return;
2806 			}
2807 		}
2808 #endif
2809 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2810 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2811 		if (!so_locked) {
2812 			SCTP_SOCKET_UNLOCK(so, 1);
2813 		}
2814 #endif
2815 	}
2816 }
2817 
2818 static void
2819 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2820     struct sockaddr *sa, uint32_t error)
2821 {
2822 	struct mbuf *m_notify;
2823 	struct sctp_paddr_change *spc;
2824 	struct sctp_queued_to_read *control;
2825 
2826 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2827 		/* event not enabled */
2828 		return;
2829 	}
2830 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2831 	if (m_notify == NULL)
2832 		return;
2833 	SCTP_BUF_LEN(m_notify) = 0;
2834 	spc = mtod(m_notify, struct sctp_paddr_change *);
2835 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2836 	spc->spc_flags = 0;
2837 	spc->spc_length = sizeof(struct sctp_paddr_change);
2838 	switch (sa->sa_family) {
2839 	case AF_INET:
2840 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2841 		break;
2842 #ifdef INET6
2843 	case AF_INET6:
2844 		{
2845 			struct sockaddr_in6 *sin6;
2846 
2847 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2848 
2849 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2850 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2851 				if (sin6->sin6_scope_id == 0) {
2852 					/* recover scope_id for user */
2853 					(void)sa6_recoverscope(sin6);
2854 				} else {
2855 					/* clear embedded scope_id for user */
2856 					in6_clearscope(&sin6->sin6_addr);
2857 				}
2858 			}
2859 			break;
2860 		}
2861 #endif
2862 	default:
2863 		/* TSNH */
2864 		break;
2865 	}
2866 	spc->spc_state = state;
2867 	spc->spc_error = error;
2868 	spc->spc_assoc_id = sctp_get_associd(stcb);
2869 
2870 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2871 	SCTP_BUF_NEXT(m_notify) = NULL;
2872 
2873 	/* append to socket */
2874 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2875 	    0, 0, 0, 0, 0, 0,
2876 	    m_notify);
2877 	if (control == NULL) {
2878 		/* no memory */
2879 		sctp_m_freem(m_notify);
2880 		return;
2881 	}
2882 	control->length = SCTP_BUF_LEN(m_notify);
2883 	control->spec_flags = M_NOTIFICATION;
2884 	/* not that we need this */
2885 	control->tail_mbuf = m_notify;
2886 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2887 	    control,
2888 	    &stcb->sctp_socket->so_rcv, 1,
2889 	    SCTP_READ_LOCK_NOT_HELD,
2890 	    SCTP_SO_NOT_LOCKED);
2891 }
2892 
2893 
2894 static void
2895 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2896     struct sctp_tmit_chunk *chk, int so_locked
2897 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2898     SCTP_UNUSED
2899 #endif
2900 )
2901 {
2902 	struct mbuf *m_notify;
2903 	struct sctp_send_failed *ssf;
2904 	struct sctp_queued_to_read *control;
2905 	int length;
2906 
2907 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2908 		/* event not enabled */
2909 		return;
2910 	}
2911 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2912 	if (m_notify == NULL)
2913 		/* no space left */
2914 		return;
2915 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2916 	length -= sizeof(struct sctp_data_chunk);
2917 	SCTP_BUF_LEN(m_notify) = 0;
2918 	ssf = mtod(m_notify, struct sctp_send_failed *);
2919 	ssf->ssf_type = SCTP_SEND_FAILED;
2920 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2921 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2922 	else
2923 		ssf->ssf_flags = SCTP_DATA_SENT;
2924 	ssf->ssf_length = length;
2925 	ssf->ssf_error = error;
2926 	/* not exactly what the user sent in, but should be close :) */
2927 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2928 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2929 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2930 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2931 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2932 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2933 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2934 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2935 
2936 	if (chk->data) {
2937 		/*
2938 		 * trim off the sctp chunk header(it should be there)
2939 		 */
2940 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2941 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2942 			sctp_mbuf_crush(chk->data);
2943 			chk->send_size -= sizeof(struct sctp_data_chunk);
2944 		}
2945 	}
2946 	SCTP_BUF_NEXT(m_notify) = chk->data;
2947 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2948 	/* Steal off the mbuf */
2949 	chk->data = NULL;
2950 	/*
2951 	 * For this case, we check the actual socket buffer, since the assoc
2952 	 * is going away we don't want to overfill the socket buffer for a
2953 	 * non-reader
2954 	 */
2955 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2956 		sctp_m_freem(m_notify);
2957 		return;
2958 	}
2959 	/* append to socket */
2960 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2961 	    0, 0, 0, 0, 0, 0,
2962 	    m_notify);
2963 	if (control == NULL) {
2964 		/* no memory */
2965 		sctp_m_freem(m_notify);
2966 		return;
2967 	}
2968 	control->spec_flags = M_NOTIFICATION;
2969 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2970 	    control,
2971 	    &stcb->sctp_socket->so_rcv, 1,
2972 	    SCTP_READ_LOCK_NOT_HELD,
2973 	    so_locked);
2974 }
2975 
2976 
2977 static void
2978 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2979     struct sctp_stream_queue_pending *sp, int so_locked
2980 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2981     SCTP_UNUSED
2982 #endif
2983 )
2984 {
2985 	struct mbuf *m_notify;
2986 	struct sctp_send_failed *ssf;
2987 	struct sctp_queued_to_read *control;
2988 	int length;
2989 
2990 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2991 		/* event not enabled */
2992 		return;
2993 	}
2994 	length = sizeof(struct sctp_send_failed) + sp->length;
2995 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2996 	if (m_notify == NULL)
2997 		/* no space left */
2998 		return;
2999 	SCTP_BUF_LEN(m_notify) = 0;
3000 	ssf = mtod(m_notify, struct sctp_send_failed *);
3001 	ssf->ssf_type = SCTP_SEND_FAILED;
3002 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3003 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3004 	else
3005 		ssf->ssf_flags = SCTP_DATA_SENT;
3006 	ssf->ssf_length = length;
3007 	ssf->ssf_error = error;
3008 	/* not exactly what the user sent in, but should be close :) */
3009 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3010 	ssf->ssf_info.sinfo_stream = sp->stream;
3011 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3012 	if (sp->some_taken) {
3013 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3014 	} else {
3015 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3016 	}
3017 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3018 	ssf->ssf_info.sinfo_context = sp->context;
3019 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3020 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3021 	SCTP_BUF_NEXT(m_notify) = sp->data;
3022 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3023 
3024 	/* Steal off the mbuf */
3025 	sp->data = NULL;
3026 	/*
3027 	 * For this case, we check the actual socket buffer, since the assoc
3028 	 * is going away we don't want to overfill the socket buffer for a
3029 	 * non-reader
3030 	 */
3031 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3032 		sctp_m_freem(m_notify);
3033 		return;
3034 	}
3035 	/* append to socket */
3036 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3037 	    0, 0, 0, 0, 0, 0,
3038 	    m_notify);
3039 	if (control == NULL) {
3040 		/* no memory */
3041 		sctp_m_freem(m_notify);
3042 		return;
3043 	}
3044 	control->spec_flags = M_NOTIFICATION;
3045 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3046 	    control,
3047 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3048 }
3049 
3050 
3051 
3052 static void
3053 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3054     uint32_t error)
3055 {
3056 	struct mbuf *m_notify;
3057 	struct sctp_adaptation_event *sai;
3058 	struct sctp_queued_to_read *control;
3059 
3060 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3061 		/* event not enabled */
3062 		return;
3063 	}
3064 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3065 	if (m_notify == NULL)
3066 		/* no space left */
3067 		return;
3068 	SCTP_BUF_LEN(m_notify) = 0;
3069 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3070 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3071 	sai->sai_flags = 0;
3072 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3073 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3074 	sai->sai_assoc_id = sctp_get_associd(stcb);
3075 
3076 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3077 	SCTP_BUF_NEXT(m_notify) = NULL;
3078 
3079 	/* append to socket */
3080 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3081 	    0, 0, 0, 0, 0, 0,
3082 	    m_notify);
3083 	if (control == NULL) {
3084 		/* no memory */
3085 		sctp_m_freem(m_notify);
3086 		return;
3087 	}
3088 	control->length = SCTP_BUF_LEN(m_notify);
3089 	control->spec_flags = M_NOTIFICATION;
3090 	/* not that we need this */
3091 	control->tail_mbuf = m_notify;
3092 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3093 	    control,
3094 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3095 }
3096 
3097 /* This always must be called with the read-queue LOCKED in the INP */
3098 static void
3099 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3100     uint32_t val, int so_locked
3101 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3102     SCTP_UNUSED
3103 #endif
3104 )
3105 {
3106 	struct mbuf *m_notify;
3107 	struct sctp_pdapi_event *pdapi;
3108 	struct sctp_queued_to_read *control;
3109 	struct sockbuf *sb;
3110 
3111 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3112 		/* event not enabled */
3113 		return;
3114 	}
3115 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3116 		return;
3117 	}
3118 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3119 	if (m_notify == NULL)
3120 		/* no space left */
3121 		return;
3122 	SCTP_BUF_LEN(m_notify) = 0;
3123 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3124 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3125 	pdapi->pdapi_flags = 0;
3126 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3127 	pdapi->pdapi_indication = error;
3128 	pdapi->pdapi_stream = (val >> 16);
3129 	pdapi->pdapi_seq = (val & 0x0000ffff);
3130 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3131 
3132 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3133 	SCTP_BUF_NEXT(m_notify) = NULL;
3134 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3135 	    0, 0, 0, 0, 0, 0,
3136 	    m_notify);
3137 	if (control == NULL) {
3138 		/* no memory */
3139 		sctp_m_freem(m_notify);
3140 		return;
3141 	}
3142 	control->spec_flags = M_NOTIFICATION;
3143 	control->length = SCTP_BUF_LEN(m_notify);
3144 	/* not that we need this */
3145 	control->tail_mbuf = m_notify;
3146 	control->held_length = 0;
3147 	control->length = 0;
3148 	sb = &stcb->sctp_socket->so_rcv;
3149 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3150 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3151 	}
3152 	sctp_sballoc(stcb, sb, m_notify);
3153 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3154 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3155 	}
3156 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3157 	control->end_added = 1;
3158 	if (stcb->asoc.control_pdapi)
3159 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3160 	else {
3161 		/* we really should not see this case */
3162 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3163 	}
3164 	if (stcb->sctp_ep && stcb->sctp_socket) {
3165 		/* This should always be the case */
3166 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3167 		struct socket *so;
3168 
3169 		so = SCTP_INP_SO(stcb->sctp_ep);
3170 		if (!so_locked) {
3171 			atomic_add_int(&stcb->asoc.refcnt, 1);
3172 			SCTP_TCB_UNLOCK(stcb);
3173 			SCTP_SOCKET_LOCK(so, 1);
3174 			SCTP_TCB_LOCK(stcb);
3175 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3176 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3177 				SCTP_SOCKET_UNLOCK(so, 1);
3178 				return;
3179 			}
3180 		}
3181 #endif
3182 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3183 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3184 		if (!so_locked) {
3185 			SCTP_SOCKET_UNLOCK(so, 1);
3186 		}
3187 #endif
3188 	}
3189 }
3190 
3191 static void
3192 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3193 {
3194 	struct mbuf *m_notify;
3195 	struct sctp_shutdown_event *sse;
3196 	struct sctp_queued_to_read *control;
3197 
3198 	/*
3199 	 * For TCP model AND UDP connected sockets we will send an error up
3200 	 * when an SHUTDOWN completes
3201 	 */
3202 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3203 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3204 		/* mark socket closed for read/write and wakeup! */
3205 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3206 		struct socket *so;
3207 
3208 		so = SCTP_INP_SO(stcb->sctp_ep);
3209 		atomic_add_int(&stcb->asoc.refcnt, 1);
3210 		SCTP_TCB_UNLOCK(stcb);
3211 		SCTP_SOCKET_LOCK(so, 1);
3212 		SCTP_TCB_LOCK(stcb);
3213 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3214 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3215 			SCTP_SOCKET_UNLOCK(so, 1);
3216 			return;
3217 		}
3218 #endif
3219 		socantsendmore(stcb->sctp_socket);
3220 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3221 		SCTP_SOCKET_UNLOCK(so, 1);
3222 #endif
3223 	}
3224 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3225 		/* event not enabled */
3226 		return;
3227 	}
3228 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3229 	if (m_notify == NULL)
3230 		/* no space left */
3231 		return;
3232 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3233 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3234 	sse->sse_flags = 0;
3235 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3236 	sse->sse_assoc_id = sctp_get_associd(stcb);
3237 
3238 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3239 	SCTP_BUF_NEXT(m_notify) = NULL;
3240 
3241 	/* append to socket */
3242 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3243 	    0, 0, 0, 0, 0, 0,
3244 	    m_notify);
3245 	if (control == NULL) {
3246 		/* no memory */
3247 		sctp_m_freem(m_notify);
3248 		return;
3249 	}
3250 	control->spec_flags = M_NOTIFICATION;
3251 	control->length = SCTP_BUF_LEN(m_notify);
3252 	/* not that we need this */
3253 	control->tail_mbuf = m_notify;
3254 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3255 	    control,
3256 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3257 }
3258 
3259 static void
3260 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3261     int so_locked
3262 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3263     SCTP_UNUSED
3264 #endif
3265 )
3266 {
3267 	struct mbuf *m_notify;
3268 	struct sctp_sender_dry_event *event;
3269 	struct sctp_queued_to_read *control;
3270 
3271 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3272 		/* event not enabled */
3273 		return;
3274 	}
3275 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3276 	if (m_notify == NULL) {
3277 		/* no space left */
3278 		return;
3279 	}
3280 	SCTP_BUF_LEN(m_notify) = 0;
3281 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3282 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3283 	event->sender_dry_flags = 0;
3284 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3285 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3286 
3287 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3288 	SCTP_BUF_NEXT(m_notify) = NULL;
3289 
3290 	/* append to socket */
3291 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3292 	    0, 0, 0, 0, 0, 0, m_notify);
3293 	if (control == NULL) {
3294 		/* no memory */
3295 		sctp_m_freem(m_notify);
3296 		return;
3297 	}
3298 	control->length = SCTP_BUF_LEN(m_notify);
3299 	control->spec_flags = M_NOTIFICATION;
3300 	/* not that we need this */
3301 	control->tail_mbuf = m_notify;
3302 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3303 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3304 }
3305 
3306 
3307 static void
3308 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3309 {
3310 	struct mbuf *m_notify;
3311 	struct sctp_queued_to_read *control;
3312 	struct sctp_stream_reset_event *strreset;
3313 	int len;
3314 
3315 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3316 		/* event not enabled */
3317 		return;
3318 	}
3319 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3320 	if (m_notify == NULL)
3321 		/* no space left */
3322 		return;
3323 	SCTP_BUF_LEN(m_notify) = 0;
3324 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3325 	if (len > M_TRAILINGSPACE(m_notify)) {
3326 		/* never enough room */
3327 		sctp_m_freem(m_notify);
3328 		return;
3329 	}
3330 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3331 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3332 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3333 	strreset->strreset_length = len;
3334 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3335 	strreset->strreset_list[0] = number_entries;
3336 
3337 	SCTP_BUF_LEN(m_notify) = len;
3338 	SCTP_BUF_NEXT(m_notify) = NULL;
3339 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3340 		/* no space */
3341 		sctp_m_freem(m_notify);
3342 		return;
3343 	}
3344 	/* append to socket */
3345 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3346 	    0, 0, 0, 0, 0, 0,
3347 	    m_notify);
3348 	if (control == NULL) {
3349 		/* no memory */
3350 		sctp_m_freem(m_notify);
3351 		return;
3352 	}
3353 	control->spec_flags = M_NOTIFICATION;
3354 	control->length = SCTP_BUF_LEN(m_notify);
3355 	/* not that we need this */
3356 	control->tail_mbuf = m_notify;
3357 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3358 	    control,
3359 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3360 }
3361 
3362 
3363 static void
3364 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3365     int number_entries, uint16_t * list, int flag)
3366 {
3367 	struct mbuf *m_notify;
3368 	struct sctp_queued_to_read *control;
3369 	struct sctp_stream_reset_event *strreset;
3370 	int len;
3371 
3372 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3373 		/* event not enabled */
3374 		return;
3375 	}
3376 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3377 	if (m_notify == NULL)
3378 		/* no space left */
3379 		return;
3380 	SCTP_BUF_LEN(m_notify) = 0;
3381 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3382 	if (len > M_TRAILINGSPACE(m_notify)) {
3383 		/* never enough room */
3384 		sctp_m_freem(m_notify);
3385 		return;
3386 	}
3387 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3388 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3389 	if (number_entries == 0) {
3390 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3391 	} else {
3392 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3393 	}
3394 	strreset->strreset_length = len;
3395 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3396 	if (number_entries) {
3397 		int i;
3398 
3399 		for (i = 0; i < number_entries; i++) {
3400 			strreset->strreset_list[i] = ntohs(list[i]);
3401 		}
3402 	}
3403 	SCTP_BUF_LEN(m_notify) = len;
3404 	SCTP_BUF_NEXT(m_notify) = NULL;
3405 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3406 		/* no space */
3407 		sctp_m_freem(m_notify);
3408 		return;
3409 	}
3410 	/* append to socket */
3411 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3412 	    0, 0, 0, 0, 0, 0,
3413 	    m_notify);
3414 	if (control == NULL) {
3415 		/* no memory */
3416 		sctp_m_freem(m_notify);
3417 		return;
3418 	}
3419 	control->spec_flags = M_NOTIFICATION;
3420 	control->length = SCTP_BUF_LEN(m_notify);
3421 	/* not that we need this */
3422 	control->tail_mbuf = m_notify;
3423 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3424 	    control,
3425 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3426 }
3427 
3428 
3429 void
3430 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3431     uint32_t error, void *data, int so_locked
3432 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3433     SCTP_UNUSED
3434 #endif
3435 )
3436 {
3437 	if ((stcb == NULL) ||
3438 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3439 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3440 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3441 		/* If the socket is gone we are out of here */
3442 		return;
3443 	}
3444 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3445 		return;
3446 	}
3447 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3448 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3449 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3450 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3451 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3452 			/* Don't report these in front states */
3453 			return;
3454 		}
3455 	}
3456 	switch (notification) {
3457 	case SCTP_NOTIFY_ASSOC_UP:
3458 		if (stcb->asoc.assoc_up_sent == 0) {
3459 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3460 			stcb->asoc.assoc_up_sent = 1;
3461 		}
3462 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3463 			sctp_notify_adaptation_layer(stcb, error);
3464 		}
3465 		if (stcb->asoc.peer_supports_auth == 0) {
3466 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3467 			    NULL, so_locked);
3468 		}
3469 		break;
3470 	case SCTP_NOTIFY_ASSOC_DOWN:
3471 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3472 		break;
3473 	case SCTP_NOTIFY_INTERFACE_DOWN:
3474 		{
3475 			struct sctp_nets *net;
3476 
3477 			net = (struct sctp_nets *)data;
3478 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3479 			    (struct sockaddr *)&net->ro._l_addr, error);
3480 			break;
3481 		}
3482 	case SCTP_NOTIFY_INTERFACE_UP:
3483 		{
3484 			struct sctp_nets *net;
3485 
3486 			net = (struct sctp_nets *)data;
3487 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3488 			    (struct sockaddr *)&net->ro._l_addr, error);
3489 			break;
3490 		}
3491 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3492 		{
3493 			struct sctp_nets *net;
3494 
3495 			net = (struct sctp_nets *)data;
3496 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3497 			    (struct sockaddr *)&net->ro._l_addr, error);
3498 			break;
3499 		}
3500 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3501 		sctp_notify_send_failed2(stcb, error,
3502 		    (struct sctp_stream_queue_pending *)data, so_locked);
3503 		break;
3504 	case SCTP_NOTIFY_DG_FAIL:
3505 		sctp_notify_send_failed(stcb, error,
3506 		    (struct sctp_tmit_chunk *)data, so_locked);
3507 		break;
3508 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3509 		{
3510 			uint32_t val;
3511 
3512 			val = *((uint32_t *) data);
3513 
3514 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3515 			break;
3516 		}
3517 	case SCTP_NOTIFY_STRDATA_ERR:
3518 		break;
3519 	case SCTP_NOTIFY_ASSOC_ABORTED:
3520 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3521 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3522 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3523 		} else {
3524 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3525 		}
3526 		break;
3527 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3528 		break;
3529 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3530 		break;
3531 	case SCTP_NOTIFY_ASSOC_RESTART:
3532 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3533 		if (stcb->asoc.peer_supports_auth == 0) {
3534 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3535 			    NULL, so_locked);
3536 		}
3537 		break;
3538 	case SCTP_NOTIFY_HB_RESP:
3539 		break;
3540 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3541 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3542 		break;
3543 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3544 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3545 		break;
3546 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3547 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3548 		break;
3549 
3550 	case SCTP_NOTIFY_STR_RESET_SEND:
3551 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3552 		break;
3553 	case SCTP_NOTIFY_STR_RESET_RECV:
3554 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3555 		break;
3556 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3557 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3558 		break;
3559 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3560 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3561 		break;
3562 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3563 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3564 		    error);
3565 		break;
3566 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3567 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3568 		    error);
3569 		break;
3570 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3571 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3572 		    error);
3573 		break;
3574 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3575 		break;
3576 	case SCTP_NOTIFY_ASCONF_FAILED:
3577 		break;
3578 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3579 		sctp_notify_shutdown_event(stcb);
3580 		break;
3581 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3582 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3583 		    (uint16_t) (uintptr_t) data,
3584 		    so_locked);
3585 		break;
3586 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3587 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3588 		    (uint16_t) (uintptr_t) data,
3589 		    so_locked);
3590 		break;
3591 	case SCTP_NOTIFY_NO_PEER_AUTH:
3592 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3593 		    (uint16_t) (uintptr_t) data,
3594 		    so_locked);
3595 		break;
3596 	case SCTP_NOTIFY_SENDER_DRY:
3597 		sctp_notify_sender_dry_event(stcb, so_locked);
3598 		break;
3599 	default:
3600 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3601 		    __FUNCTION__, notification, notification);
3602 		break;
3603 	}			/* end switch */
3604 }
3605 
3606 void
3607 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3608 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3609     SCTP_UNUSED
3610 #endif
3611 )
3612 {
3613 	struct sctp_association *asoc;
3614 	struct sctp_stream_out *outs;
3615 	struct sctp_tmit_chunk *chk, *nchk;
3616 	struct sctp_stream_queue_pending *sp, *nsp;
3617 	int i;
3618 
3619 	if (stcb == NULL) {
3620 		return;
3621 	}
3622 	asoc = &stcb->asoc;
3623 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3624 		/* already being freed */
3625 		return;
3626 	}
3627 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3628 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3629 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3630 		return;
3631 	}
3632 	/* now through all the gunk freeing chunks */
3633 	if (holds_lock == 0) {
3634 		SCTP_TCB_SEND_LOCK(stcb);
3635 	}
3636 	/* sent queue SHOULD be empty */
3637 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3638 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3639 		asoc->sent_queue_cnt--;
3640 		if (chk->data != NULL) {
3641 			sctp_free_bufspace(stcb, asoc, chk, 1);
3642 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3643 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3644 			if (chk->data) {
3645 				sctp_m_freem(chk->data);
3646 				chk->data = NULL;
3647 			}
3648 		}
3649 		sctp_free_a_chunk(stcb, chk);
3650 		/* sa_ignore FREED_MEMORY */
3651 	}
3652 	/* pending send queue SHOULD be empty */
3653 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3654 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3655 		asoc->send_queue_cnt--;
3656 		if (chk->data != NULL) {
3657 			sctp_free_bufspace(stcb, asoc, chk, 1);
3658 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3659 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3660 			if (chk->data) {
3661 				sctp_m_freem(chk->data);
3662 				chk->data = NULL;
3663 			}
3664 		}
3665 		sctp_free_a_chunk(stcb, chk);
3666 		/* sa_ignore FREED_MEMORY */
3667 	}
3668 	for (i = 0; i < asoc->streamoutcnt; i++) {
3669 		/* For each stream */
3670 		outs = &asoc->strmout[i];
3671 		/* clean up any sends there */
3672 		asoc->locked_on_sending = NULL;
3673 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3674 			asoc->stream_queue_cnt--;
3675 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3676 			sctp_free_spbufspace(stcb, asoc, sp);
3677 			if (sp->data) {
3678 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3679 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3680 				if (sp->data) {
3681 					sctp_m_freem(sp->data);
3682 					sp->data = NULL;
3683 				}
3684 			}
3685 			if (sp->net) {
3686 				sctp_free_remote_addr(sp->net);
3687 				sp->net = NULL;
3688 			}
3689 			/* Free the chunk */
3690 			sctp_free_a_strmoq(stcb, sp);
3691 			/* sa_ignore FREED_MEMORY */
3692 		}
3693 	}
3694 
3695 	if (holds_lock == 0) {
3696 		SCTP_TCB_SEND_UNLOCK(stcb);
3697 	}
3698 }
3699 
3700 void
3701 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3702 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3703     SCTP_UNUSED
3704 #endif
3705 )
3706 {
3707 
3708 	if (stcb == NULL) {
3709 		return;
3710 	}
3711 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3712 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3713 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3714 		return;
3715 	}
3716 	/* Tell them we lost the asoc */
3717 	sctp_report_all_outbound(stcb, 1, so_locked);
3718 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3719 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3720 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3721 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3722 	}
3723 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3724 }
3725 
3726 void
3727 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3728     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3729     uint32_t vrf_id, uint16_t port)
3730 {
3731 	uint32_t vtag;
3732 
3733 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3734 	struct socket *so;
3735 
3736 #endif
3737 
3738 	vtag = 0;
3739 	if (stcb != NULL) {
3740 		/* We have a TCB to abort, send notification too */
3741 		vtag = stcb->asoc.peer_vtag;
3742 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3743 		/* get the assoc vrf id and table id */
3744 		vrf_id = stcb->asoc.vrf_id;
3745 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3746 	}
3747 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3748 	if (stcb != NULL) {
3749 		/* Ok, now lets free it */
3750 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3751 		so = SCTP_INP_SO(inp);
3752 		atomic_add_int(&stcb->asoc.refcnt, 1);
3753 		SCTP_TCB_UNLOCK(stcb);
3754 		SCTP_SOCKET_LOCK(so, 1);
3755 		SCTP_TCB_LOCK(stcb);
3756 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3757 #endif
3758 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3759 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3760 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3761 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3762 		}
3763 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3764 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3765 		SCTP_SOCKET_UNLOCK(so, 1);
3766 #endif
3767 	}
3768 }
3769 
3770 #ifdef SCTP_ASOCLOG_OF_TSNS
3771 void
3772 sctp_print_out_track_log(struct sctp_tcb *stcb)
3773 {
3774 #ifdef NOSIY_PRINTS
3775 	int i;
3776 
3777 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3778 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3779 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3780 		SCTP_PRINTF("None rcvd\n");
3781 		goto none_in;
3782 	}
3783 	if (stcb->asoc.tsn_in_wrapped) {
3784 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3785 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3786 			    stcb->asoc.in_tsnlog[i].tsn,
3787 			    stcb->asoc.in_tsnlog[i].strm,
3788 			    stcb->asoc.in_tsnlog[i].seq,
3789 			    stcb->asoc.in_tsnlog[i].flgs,
3790 			    stcb->asoc.in_tsnlog[i].sz);
3791 		}
3792 	}
3793 	if (stcb->asoc.tsn_in_at) {
3794 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3795 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3796 			    stcb->asoc.in_tsnlog[i].tsn,
3797 			    stcb->asoc.in_tsnlog[i].strm,
3798 			    stcb->asoc.in_tsnlog[i].seq,
3799 			    stcb->asoc.in_tsnlog[i].flgs,
3800 			    stcb->asoc.in_tsnlog[i].sz);
3801 		}
3802 	}
3803 none_in:
3804 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3805 	if ((stcb->asoc.tsn_out_at == 0) &&
3806 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3807 		SCTP_PRINTF("None sent\n");
3808 	}
3809 	if (stcb->asoc.tsn_out_wrapped) {
3810 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3811 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3812 			    stcb->asoc.out_tsnlog[i].tsn,
3813 			    stcb->asoc.out_tsnlog[i].strm,
3814 			    stcb->asoc.out_tsnlog[i].seq,
3815 			    stcb->asoc.out_tsnlog[i].flgs,
3816 			    stcb->asoc.out_tsnlog[i].sz);
3817 		}
3818 	}
3819 	if (stcb->asoc.tsn_out_at) {
3820 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3821 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3822 			    stcb->asoc.out_tsnlog[i].tsn,
3823 			    stcb->asoc.out_tsnlog[i].strm,
3824 			    stcb->asoc.out_tsnlog[i].seq,
3825 			    stcb->asoc.out_tsnlog[i].flgs,
3826 			    stcb->asoc.out_tsnlog[i].sz);
3827 		}
3828 	}
3829 #endif
3830 }
3831 
3832 #endif
3833 
3834 void
3835 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3836     int error, struct mbuf *op_err,
3837     int so_locked
3838 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3839     SCTP_UNUSED
3840 #endif
3841 )
3842 {
3843 	uint32_t vtag;
3844 
3845 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3846 	struct socket *so;
3847 
3848 #endif
3849 
3850 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3851 	so = SCTP_INP_SO(inp);
3852 #endif
3853 	if (stcb == NULL) {
3854 		/* Got to have a TCB */
3855 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3856 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3857 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3858 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3859 			}
3860 		}
3861 		return;
3862 	} else {
3863 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3864 	}
3865 	vtag = stcb->asoc.peer_vtag;
3866 	/* notify the ulp */
3867 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3868 		sctp_abort_notification(stcb, error, so_locked);
3869 	/* notify the peer */
3870 #if defined(SCTP_PANIC_ON_ABORT)
3871 	panic("aborting an association");
3872 #endif
3873 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3874 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3875 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3876 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3877 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3878 	}
3879 	/* now free the asoc */
3880 #ifdef SCTP_ASOCLOG_OF_TSNS
3881 	sctp_print_out_track_log(stcb);
3882 #endif
3883 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3884 	if (!so_locked) {
3885 		atomic_add_int(&stcb->asoc.refcnt, 1);
3886 		SCTP_TCB_UNLOCK(stcb);
3887 		SCTP_SOCKET_LOCK(so, 1);
3888 		SCTP_TCB_LOCK(stcb);
3889 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3890 	}
3891 #endif
3892 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3893 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3894 	if (!so_locked) {
3895 		SCTP_SOCKET_UNLOCK(so, 1);
3896 	}
3897 #endif
3898 }
3899 
3900 void
3901 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3902     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3903 {
3904 	struct sctp_chunkhdr *ch, chunk_buf;
3905 	unsigned int chk_length;
3906 
3907 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3908 	/* Generate a TO address for future reference */
3909 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3910 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3911 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3912 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3913 		}
3914 	}
3915 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3916 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3917 	while (ch != NULL) {
3918 		chk_length = ntohs(ch->chunk_length);
3919 		if (chk_length < sizeof(*ch)) {
3920 			/* break to abort land */
3921 			break;
3922 		}
3923 		switch (ch->chunk_type) {
3924 		case SCTP_COOKIE_ECHO:
3925 			/* We hit here only if the assoc is being freed */
3926 			return;
3927 		case SCTP_PACKET_DROPPED:
3928 			/* we don't respond to pkt-dropped */
3929 			return;
3930 		case SCTP_ABORT_ASSOCIATION:
3931 			/* we don't respond with an ABORT to an ABORT */
3932 			return;
3933 		case SCTP_SHUTDOWN_COMPLETE:
3934 			/*
3935 			 * we ignore it since we are not waiting for it and
3936 			 * peer is gone
3937 			 */
3938 			return;
3939 		case SCTP_SHUTDOWN_ACK:
3940 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
3941 			return;
3942 		default:
3943 			break;
3944 		}
3945 		offset += SCTP_SIZE32(chk_length);
3946 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3947 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3948 	}
3949 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
3950 }
3951 
3952 /*
3953  * check the inbound datagram to make sure there is not an abort inside it,
3954  * if there is return 1, else return 0.
3955  */
3956 int
3957 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3958 {
3959 	struct sctp_chunkhdr *ch;
3960 	struct sctp_init_chunk *init_chk, chunk_buf;
3961 	int offset;
3962 	unsigned int chk_length;
3963 
3964 	offset = iphlen + sizeof(struct sctphdr);
3965 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3966 	    (uint8_t *) & chunk_buf);
3967 	while (ch != NULL) {
3968 		chk_length = ntohs(ch->chunk_length);
3969 		if (chk_length < sizeof(*ch)) {
3970 			/* packet is probably corrupt */
3971 			break;
3972 		}
3973 		/* we seem to be ok, is it an abort? */
3974 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3975 			/* yep, tell them */
3976 			return (1);
3977 		}
3978 		if (ch->chunk_type == SCTP_INITIATION) {
3979 			/* need to update the Vtag */
3980 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
3981 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
3982 			if (init_chk != NULL) {
3983 				*vtagfill = ntohl(init_chk->init.initiate_tag);
3984 			}
3985 		}
3986 		/* Nope, move to the next chunk */
3987 		offset += SCTP_SIZE32(chk_length);
3988 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3989 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3990 	}
3991 	return (0);
3992 }
3993 
3994 /*
3995  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
3996  * set (i.e. it's 0) so, create this function to compare link local scopes
3997  */
3998 #ifdef INET6
3999 uint32_t
4000 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4001 {
4002 	struct sockaddr_in6 a, b;
4003 
4004 	/* save copies */
4005 	a = *addr1;
4006 	b = *addr2;
4007 
4008 	if (a.sin6_scope_id == 0)
4009 		if (sa6_recoverscope(&a)) {
4010 			/* can't get scope, so can't match */
4011 			return (0);
4012 		}
4013 	if (b.sin6_scope_id == 0)
4014 		if (sa6_recoverscope(&b)) {
4015 			/* can't get scope, so can't match */
4016 			return (0);
4017 		}
4018 	if (a.sin6_scope_id != b.sin6_scope_id)
4019 		return (0);
4020 
4021 	return (1);
4022 }
4023 
4024 /*
4025  * returns a sockaddr_in6 with embedded scope recovered and removed
4026  */
4027 struct sockaddr_in6 *
4028 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4029 {
4030 	/* check and strip embedded scope junk */
4031 	if (addr->sin6_family == AF_INET6) {
4032 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4033 			if (addr->sin6_scope_id == 0) {
4034 				*store = *addr;
4035 				if (!sa6_recoverscope(store)) {
4036 					/* use the recovered scope */
4037 					addr = store;
4038 				}
4039 			} else {
4040 				/* else, return the original "to" addr */
4041 				in6_clearscope(&addr->sin6_addr);
4042 			}
4043 		}
4044 	}
4045 	return (addr);
4046 }
4047 
4048 #endif
4049 
4050 /*
4051  * are the two addresses the same?  currently a "scopeless" check returns: 1
4052  * if same, 0 if not
4053  */
4054 int
4055 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4056 {
4057 
4058 	/* must be valid */
4059 	if (sa1 == NULL || sa2 == NULL)
4060 		return (0);
4061 
4062 	/* must be the same family */
4063 	if (sa1->sa_family != sa2->sa_family)
4064 		return (0);
4065 
4066 	switch (sa1->sa_family) {
4067 #ifdef INET6
4068 	case AF_INET6:
4069 		{
4070 			/* IPv6 addresses */
4071 			struct sockaddr_in6 *sin6_1, *sin6_2;
4072 
4073 			sin6_1 = (struct sockaddr_in6 *)sa1;
4074 			sin6_2 = (struct sockaddr_in6 *)sa2;
4075 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4076 			    sin6_2));
4077 		}
4078 #endif
4079 	case AF_INET:
4080 		{
4081 			/* IPv4 addresses */
4082 			struct sockaddr_in *sin_1, *sin_2;
4083 
4084 			sin_1 = (struct sockaddr_in *)sa1;
4085 			sin_2 = (struct sockaddr_in *)sa2;
4086 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4087 		}
4088 	default:
4089 		/* we don't do these... */
4090 		return (0);
4091 	}
4092 }
4093 
4094 void
4095 sctp_print_address(struct sockaddr *sa)
4096 {
4097 #ifdef INET6
4098 	char ip6buf[INET6_ADDRSTRLEN];
4099 
4100 	ip6buf[0] = 0;
4101 #endif
4102 
4103 	switch (sa->sa_family) {
4104 #ifdef INET6
4105 	case AF_INET6:
4106 		{
4107 			struct sockaddr_in6 *sin6;
4108 
4109 			sin6 = (struct sockaddr_in6 *)sa;
4110 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4111 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4112 			    ntohs(sin6->sin6_port),
4113 			    sin6->sin6_scope_id);
4114 			break;
4115 		}
4116 #endif
4117 	case AF_INET:
4118 		{
4119 			struct sockaddr_in *sin;
4120 			unsigned char *p;
4121 
4122 			sin = (struct sockaddr_in *)sa;
4123 			p = (unsigned char *)&sin->sin_addr;
4124 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4125 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4126 			break;
4127 		}
4128 	default:
4129 		SCTP_PRINTF("?\n");
4130 		break;
4131 	}
4132 }
4133 
4134 void
4135 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4136 {
4137 	switch (iph->ip_v) {
4138 	case IPVERSION:
4139 		{
4140 			struct sockaddr_in lsa, fsa;
4141 
4142 			bzero(&lsa, sizeof(lsa));
4143 			lsa.sin_len = sizeof(lsa);
4144 			lsa.sin_family = AF_INET;
4145 			lsa.sin_addr = iph->ip_src;
4146 			lsa.sin_port = sh->src_port;
4147 			bzero(&fsa, sizeof(fsa));
4148 			fsa.sin_len = sizeof(fsa);
4149 			fsa.sin_family = AF_INET;
4150 			fsa.sin_addr = iph->ip_dst;
4151 			fsa.sin_port = sh->dest_port;
4152 			SCTP_PRINTF("src: ");
4153 			sctp_print_address((struct sockaddr *)&lsa);
4154 			SCTP_PRINTF("dest: ");
4155 			sctp_print_address((struct sockaddr *)&fsa);
4156 			break;
4157 		}
4158 #ifdef INET6
4159 	case IPV6_VERSION >> 4:
4160 		{
4161 			struct ip6_hdr *ip6;
4162 			struct sockaddr_in6 lsa6, fsa6;
4163 
4164 			ip6 = (struct ip6_hdr *)iph;
4165 			bzero(&lsa6, sizeof(lsa6));
4166 			lsa6.sin6_len = sizeof(lsa6);
4167 			lsa6.sin6_family = AF_INET6;
4168 			lsa6.sin6_addr = ip6->ip6_src;
4169 			lsa6.sin6_port = sh->src_port;
4170 			bzero(&fsa6, sizeof(fsa6));
4171 			fsa6.sin6_len = sizeof(fsa6);
4172 			fsa6.sin6_family = AF_INET6;
4173 			fsa6.sin6_addr = ip6->ip6_dst;
4174 			fsa6.sin6_port = sh->dest_port;
4175 			SCTP_PRINTF("src: ");
4176 			sctp_print_address((struct sockaddr *)&lsa6);
4177 			SCTP_PRINTF("dest: ");
4178 			sctp_print_address((struct sockaddr *)&fsa6);
4179 			break;
4180 		}
4181 #endif
4182 	default:
4183 		/* TSNH */
4184 		break;
4185 	}
4186 }
4187 
4188 void
4189 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4190     struct sctp_inpcb *new_inp,
4191     struct sctp_tcb *stcb,
4192     int waitflags)
4193 {
4194 	/*
4195 	 * go through our old INP and pull off any control structures that
4196 	 * belong to stcb and move then to the new inp.
4197 	 */
4198 	struct socket *old_so, *new_so;
4199 	struct sctp_queued_to_read *control, *nctl;
4200 	struct sctp_readhead tmp_queue;
4201 	struct mbuf *m;
4202 	int error = 0;
4203 
4204 	old_so = old_inp->sctp_socket;
4205 	new_so = new_inp->sctp_socket;
4206 	TAILQ_INIT(&tmp_queue);
4207 	error = sblock(&old_so->so_rcv, waitflags);
4208 	if (error) {
4209 		/*
4210 		 * Gak, can't get sblock, we have a problem. data will be
4211 		 * left stranded.. and we don't dare look at it since the
4212 		 * other thread may be reading something. Oh well, its a
4213 		 * screwed up app that does a peeloff OR a accept while
4214 		 * reading from the main socket... actually its only the
4215 		 * peeloff() case, since I think read will fail on a
4216 		 * listening socket..
4217 		 */
4218 		return;
4219 	}
4220 	/* lock the socket buffers */
4221 	SCTP_INP_READ_LOCK(old_inp);
4222 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4223 		/* Pull off all for out target stcb */
4224 		if (control->stcb == stcb) {
4225 			/* remove it we want it */
4226 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4227 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4228 			m = control->data;
4229 			while (m) {
4230 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4231 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4232 				}
4233 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4234 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4235 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4236 				}
4237 				m = SCTP_BUF_NEXT(m);
4238 			}
4239 		}
4240 	}
4241 	SCTP_INP_READ_UNLOCK(old_inp);
4242 	/* Remove the sb-lock on the old socket */
4243 
4244 	sbunlock(&old_so->so_rcv);
4245 	/* Now we move them over to the new socket buffer */
4246 	SCTP_INP_READ_LOCK(new_inp);
4247 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4248 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4249 		m = control->data;
4250 		while (m) {
4251 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4252 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4253 			}
4254 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4255 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4256 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4257 			}
4258 			m = SCTP_BUF_NEXT(m);
4259 		}
4260 	}
4261 	SCTP_INP_READ_UNLOCK(new_inp);
4262 }
4263 
4264 void
4265 sctp_add_to_readq(struct sctp_inpcb *inp,
4266     struct sctp_tcb *stcb,
4267     struct sctp_queued_to_read *control,
4268     struct sockbuf *sb,
4269     int end,
4270     int inp_read_lock_held,
4271     int so_locked
4272 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4273     SCTP_UNUSED
4274 #endif
4275 )
4276 {
4277 	/*
4278 	 * Here we must place the control on the end of the socket read
4279 	 * queue AND increment sb_cc so that select will work properly on
4280 	 * read.
4281 	 */
4282 	struct mbuf *m, *prev = NULL;
4283 
4284 	if (inp == NULL) {
4285 		/* Gak, TSNH!! */
4286 #ifdef INVARIANTS
4287 		panic("Gak, inp NULL on add_to_readq");
4288 #endif
4289 		return;
4290 	}
4291 	if (inp_read_lock_held == 0)
4292 		SCTP_INP_READ_LOCK(inp);
4293 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4294 		sctp_free_remote_addr(control->whoFrom);
4295 		if (control->data) {
4296 			sctp_m_freem(control->data);
4297 			control->data = NULL;
4298 		}
4299 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4300 		if (inp_read_lock_held == 0)
4301 			SCTP_INP_READ_UNLOCK(inp);
4302 		return;
4303 	}
4304 	if (!(control->spec_flags & M_NOTIFICATION)) {
4305 		atomic_add_int(&inp->total_recvs, 1);
4306 		if (!control->do_not_ref_stcb) {
4307 			atomic_add_int(&stcb->total_recvs, 1);
4308 		}
4309 	}
4310 	m = control->data;
4311 	control->held_length = 0;
4312 	control->length = 0;
4313 	while (m) {
4314 		if (SCTP_BUF_LEN(m) == 0) {
4315 			/* Skip mbufs with NO length */
4316 			if (prev == NULL) {
4317 				/* First one */
4318 				control->data = sctp_m_free(m);
4319 				m = control->data;
4320 			} else {
4321 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4322 				m = SCTP_BUF_NEXT(prev);
4323 			}
4324 			if (m == NULL) {
4325 				control->tail_mbuf = prev;
4326 			}
4327 			continue;
4328 		}
4329 		prev = m;
4330 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4331 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4332 		}
4333 		sctp_sballoc(stcb, sb, m);
4334 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4335 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4336 		}
4337 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4338 		m = SCTP_BUF_NEXT(m);
4339 	}
4340 	if (prev != NULL) {
4341 		control->tail_mbuf = prev;
4342 	} else {
4343 		/* Everything got collapsed out?? */
4344 		sctp_free_remote_addr(control->whoFrom);
4345 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4346 		if (inp_read_lock_held == 0)
4347 			SCTP_INP_READ_UNLOCK(inp);
4348 		return;
4349 	}
4350 	if (end) {
4351 		control->end_added = 1;
4352 	}
4353 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4354 	if (inp_read_lock_held == 0)
4355 		SCTP_INP_READ_UNLOCK(inp);
4356 	if (inp && inp->sctp_socket) {
4357 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4358 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4359 		} else {
4360 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4361 			struct socket *so;
4362 
4363 			so = SCTP_INP_SO(inp);
4364 			if (!so_locked) {
4365 				atomic_add_int(&stcb->asoc.refcnt, 1);
4366 				SCTP_TCB_UNLOCK(stcb);
4367 				SCTP_SOCKET_LOCK(so, 1);
4368 				SCTP_TCB_LOCK(stcb);
4369 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4370 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4371 					SCTP_SOCKET_UNLOCK(so, 1);
4372 					return;
4373 				}
4374 			}
4375 #endif
4376 			sctp_sorwakeup(inp, inp->sctp_socket);
4377 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4378 			if (!so_locked) {
4379 				SCTP_SOCKET_UNLOCK(so, 1);
4380 			}
4381 #endif
4382 		}
4383 	}
4384 }
4385 
4386 
4387 int
4388 sctp_append_to_readq(struct sctp_inpcb *inp,
4389     struct sctp_tcb *stcb,
4390     struct sctp_queued_to_read *control,
4391     struct mbuf *m,
4392     int end,
4393     int ctls_cumack,
4394     struct sockbuf *sb)
4395 {
4396 	/*
4397 	 * A partial delivery API event is underway. OR we are appending on
4398 	 * the reassembly queue.
4399 	 *
4400 	 * If PDAPI this means we need to add m to the end of the data.
4401 	 * Increase the length in the control AND increment the sb_cc.
4402 	 * Otherwise sb is NULL and all we need to do is put it at the end
4403 	 * of the mbuf chain.
4404 	 */
4405 	int len = 0;
4406 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4407 
4408 	if (inp) {
4409 		SCTP_INP_READ_LOCK(inp);
4410 	}
4411 	if (control == NULL) {
4412 get_out:
4413 		if (inp) {
4414 			SCTP_INP_READ_UNLOCK(inp);
4415 		}
4416 		return (-1);
4417 	}
4418 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4419 		SCTP_INP_READ_UNLOCK(inp);
4420 		return 0;
4421 	}
4422 	if (control->end_added) {
4423 		/* huh this one is complete? */
4424 		goto get_out;
4425 	}
4426 	mm = m;
4427 	if (mm == NULL) {
4428 		goto get_out;
4429 	}
4430 	while (mm) {
4431 		if (SCTP_BUF_LEN(mm) == 0) {
4432 			/* Skip mbufs with NO lenght */
4433 			if (prev == NULL) {
4434 				/* First one */
4435 				m = sctp_m_free(mm);
4436 				mm = m;
4437 			} else {
4438 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4439 				mm = SCTP_BUF_NEXT(prev);
4440 			}
4441 			continue;
4442 		}
4443 		prev = mm;
4444 		len += SCTP_BUF_LEN(mm);
4445 		if (sb) {
4446 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4447 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4448 			}
4449 			sctp_sballoc(stcb, sb, mm);
4450 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4451 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4452 			}
4453 		}
4454 		mm = SCTP_BUF_NEXT(mm);
4455 	}
4456 	if (prev) {
4457 		tail = prev;
4458 	} else {
4459 		/* Really there should always be a prev */
4460 		if (m == NULL) {
4461 			/* Huh nothing left? */
4462 #ifdef INVARIANTS
4463 			panic("Nothing left to add?");
4464 #else
4465 			goto get_out;
4466 #endif
4467 		}
4468 		tail = m;
4469 	}
4470 	if (control->tail_mbuf) {
4471 		/* append */
4472 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4473 		control->tail_mbuf = tail;
4474 	} else {
4475 		/* nothing there */
4476 #ifdef INVARIANTS
4477 		if (control->data != NULL) {
4478 			panic("This should NOT happen");
4479 		}
4480 #endif
4481 		control->data = m;
4482 		control->tail_mbuf = tail;
4483 	}
4484 	atomic_add_int(&control->length, len);
4485 	if (end) {
4486 		/* message is complete */
4487 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4488 			stcb->asoc.control_pdapi = NULL;
4489 		}
4490 		control->held_length = 0;
4491 		control->end_added = 1;
4492 	}
4493 	if (stcb == NULL) {
4494 		control->do_not_ref_stcb = 1;
4495 	}
4496 	/*
4497 	 * When we are appending in partial delivery, the cum-ack is used
4498 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4499 	 * is populated in the outbound sinfo structure from the true cumack
4500 	 * if the association exists...
4501 	 */
4502 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4503 	if (inp) {
4504 		SCTP_INP_READ_UNLOCK(inp);
4505 	}
4506 	if (inp && inp->sctp_socket) {
4507 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4508 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4509 		} else {
4510 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4511 			struct socket *so;
4512 
4513 			so = SCTP_INP_SO(inp);
4514 			atomic_add_int(&stcb->asoc.refcnt, 1);
4515 			SCTP_TCB_UNLOCK(stcb);
4516 			SCTP_SOCKET_LOCK(so, 1);
4517 			SCTP_TCB_LOCK(stcb);
4518 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4519 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4520 				SCTP_SOCKET_UNLOCK(so, 1);
4521 				return (0);
4522 			}
4523 #endif
4524 			sctp_sorwakeup(inp, inp->sctp_socket);
4525 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4526 			SCTP_SOCKET_UNLOCK(so, 1);
4527 #endif
4528 		}
4529 	}
4530 	return (0);
4531 }
4532 
4533 
4534 
4535 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4536  *************ALTERNATE ROUTING CODE
4537  */
4538 
4539 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4540  *************ALTERNATE ROUTING CODE
4541  */
4542 
4543 struct mbuf *
4544 sctp_generate_invmanparam(int err)
4545 {
4546 	/* Return a MBUF with a invalid mandatory parameter */
4547 	struct mbuf *m;
4548 
4549 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4550 	if (m) {
4551 		struct sctp_paramhdr *ph;
4552 
4553 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4554 		ph = mtod(m, struct sctp_paramhdr *);
4555 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4556 		ph->param_type = htons(err);
4557 	}
4558 	return (m);
4559 }
4560 
4561 #ifdef SCTP_MBCNT_LOGGING
4562 void
4563 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4564     struct sctp_tmit_chunk *tp1, int chk_cnt)
4565 {
4566 	if (tp1->data == NULL) {
4567 		return;
4568 	}
4569 	asoc->chunks_on_out_queue -= chk_cnt;
4570 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4571 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4572 		    asoc->total_output_queue_size,
4573 		    tp1->book_size,
4574 		    0,
4575 		    tp1->mbcnt);
4576 	}
4577 	if (asoc->total_output_queue_size >= tp1->book_size) {
4578 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4579 	} else {
4580 		asoc->total_output_queue_size = 0;
4581 	}
4582 
4583 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4584 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4585 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4586 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4587 		} else {
4588 			stcb->sctp_socket->so_snd.sb_cc = 0;
4589 
4590 		}
4591 	}
4592 }
4593 
4594 #endif
4595 
4596 int
4597 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4598     int reason, int so_locked
4599 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4600     SCTP_UNUSED
4601 #endif
4602 )
4603 {
4604 	struct sctp_stream_out *strq;
4605 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4606 	struct sctp_stream_queue_pending *sp;
4607 	uint16_t stream = 0, seq = 0;
4608 	uint8_t foundeom = 0;
4609 	int ret_sz = 0;
4610 	int notdone;
4611 	int do_wakeup_routine = 0;
4612 
4613 	stream = tp1->rec.data.stream_number;
4614 	seq = tp1->rec.data.stream_seq;
4615 	do {
4616 		ret_sz += tp1->book_size;
4617 		if (tp1->data != NULL) {
4618 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4619 				sctp_flight_size_decrease(tp1);
4620 				sctp_total_flight_decrease(stcb, tp1);
4621 			}
4622 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4623 			stcb->asoc.peers_rwnd += tp1->send_size;
4624 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4625 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4626 			if (tp1->data) {
4627 				sctp_m_freem(tp1->data);
4628 				tp1->data = NULL;
4629 			}
4630 			do_wakeup_routine = 1;
4631 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4632 				stcb->asoc.sent_queue_cnt_removeable--;
4633 			}
4634 		}
4635 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4636 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4637 		    SCTP_DATA_NOT_FRAG) {
4638 			/* not frag'ed we ae done   */
4639 			notdone = 0;
4640 			foundeom = 1;
4641 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4642 			/* end of frag, we are done */
4643 			notdone = 0;
4644 			foundeom = 1;
4645 		} else {
4646 			/*
4647 			 * Its a begin or middle piece, we must mark all of
4648 			 * it
4649 			 */
4650 			notdone = 1;
4651 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4652 		}
4653 	} while (tp1 && notdone);
4654 	if (foundeom == 0) {
4655 		/*
4656 		 * The multi-part message was scattered across the send and
4657 		 * sent queue.
4658 		 */
4659 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4660 			if ((tp1->rec.data.stream_number != stream) ||
4661 			    (tp1->rec.data.stream_seq != seq)) {
4662 				break;
4663 			}
4664 			/*
4665 			 * save to chk in case we have some on stream out
4666 			 * queue. If so and we have an un-transmitted one we
4667 			 * don't have to fudge the TSN.
4668 			 */
4669 			chk = tp1;
4670 			ret_sz += tp1->book_size;
4671 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4672 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4673 			if (tp1->data) {
4674 				sctp_m_freem(tp1->data);
4675 				tp1->data = NULL;
4676 			}
4677 			/* No flight involved here book the size to 0 */
4678 			tp1->book_size = 0;
4679 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4680 				foundeom = 1;
4681 			}
4682 			do_wakeup_routine = 1;
4683 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4684 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4685 			/*
4686 			 * on to the sent queue so we can wait for it to be
4687 			 * passed by.
4688 			 */
4689 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4690 			    sctp_next);
4691 			stcb->asoc.send_queue_cnt--;
4692 			stcb->asoc.sent_queue_cnt++;
4693 		}
4694 	}
4695 	if (foundeom == 0) {
4696 		/*
4697 		 * Still no eom found. That means there is stuff left on the
4698 		 * stream out queue.. yuck.
4699 		 */
4700 		strq = &stcb->asoc.strmout[stream];
4701 		SCTP_TCB_SEND_LOCK(stcb);
4702 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4703 			/* FIXME: Shouldn't this be a serial number check? */
4704 			if (sp->strseq > seq) {
4705 				break;
4706 			}
4707 			/* Check if its our SEQ */
4708 			if (sp->strseq == seq) {
4709 				sp->discard_rest = 1;
4710 				/*
4711 				 * We may need to put a chunk on the queue
4712 				 * that holds the TSN that would have been
4713 				 * sent with the LAST bit.
4714 				 */
4715 				if (chk == NULL) {
4716 					/* Yep, we have to */
4717 					sctp_alloc_a_chunk(stcb, chk);
4718 					if (chk == NULL) {
4719 						/*
4720 						 * we are hosed. All we can
4721 						 * do is nothing.. which
4722 						 * will cause an abort if
4723 						 * the peer is paying
4724 						 * attention.
4725 						 */
4726 						goto oh_well;
4727 					}
4728 					memset(chk, 0, sizeof(*chk));
4729 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4730 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4731 					chk->asoc = &stcb->asoc;
4732 					chk->rec.data.stream_seq = sp->strseq;
4733 					chk->rec.data.stream_number = sp->stream;
4734 					chk->rec.data.payloadtype = sp->ppid;
4735 					chk->rec.data.context = sp->context;
4736 					chk->flags = sp->act_flags;
4737 					if (sp->net)
4738 						chk->whoTo = sp->net;
4739 					else
4740 						chk->whoTo = stcb->asoc.primary_destination;
4741 					atomic_add_int(&chk->whoTo->ref_count, 1);
4742 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4743 					stcb->asoc.pr_sctp_cnt++;
4744 					chk->pr_sctp_on = 1;
4745 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4746 					stcb->asoc.sent_queue_cnt++;
4747 					stcb->asoc.pr_sctp_cnt++;
4748 				} else {
4749 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4750 				}
4751 		oh_well:
4752 				if (sp->data) {
4753 					/*
4754 					 * Pull any data to free up the SB
4755 					 * and allow sender to "add more"
4756 					 * whilc we will throw away :-)
4757 					 */
4758 					sctp_free_spbufspace(stcb, &stcb->asoc,
4759 					    sp);
4760 					ret_sz += sp->length;
4761 					do_wakeup_routine = 1;
4762 					sp->some_taken = 1;
4763 					sctp_m_freem(sp->data);
4764 					sp->length = 0;
4765 					sp->data = NULL;
4766 					sp->tail_mbuf = NULL;
4767 				}
4768 				break;
4769 			}
4770 		}		/* End tailq_foreach */
4771 		SCTP_TCB_SEND_UNLOCK(stcb);
4772 	}
4773 	if (do_wakeup_routine) {
4774 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4775 		struct socket *so;
4776 
4777 		so = SCTP_INP_SO(stcb->sctp_ep);
4778 		if (!so_locked) {
4779 			atomic_add_int(&stcb->asoc.refcnt, 1);
4780 			SCTP_TCB_UNLOCK(stcb);
4781 			SCTP_SOCKET_LOCK(so, 1);
4782 			SCTP_TCB_LOCK(stcb);
4783 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4784 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4785 				/* assoc was freed while we were unlocked */
4786 				SCTP_SOCKET_UNLOCK(so, 1);
4787 				return (ret_sz);
4788 			}
4789 		}
4790 #endif
4791 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4792 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4793 		if (!so_locked) {
4794 			SCTP_SOCKET_UNLOCK(so, 1);
4795 		}
4796 #endif
4797 	}
4798 	return (ret_sz);
4799 }
4800 
4801 /*
4802  * checks to see if the given address, sa, is one that is currently known by
4803  * the kernel note: can't distinguish the same address on multiple interfaces
4804  * and doesn't handle multiple addresses with different zone/scope id's note:
4805  * ifa_ifwithaddr() compares the entire sockaddr struct
4806  */
4807 struct sctp_ifa *
4808 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4809     int holds_lock)
4810 {
4811 	struct sctp_laddr *laddr;
4812 
4813 	if (holds_lock == 0) {
4814 		SCTP_INP_RLOCK(inp);
4815 	}
4816 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4817 		if (laddr->ifa == NULL)
4818 			continue;
4819 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4820 			continue;
4821 		if (addr->sa_family == AF_INET) {
4822 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4823 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4824 				/* found him. */
4825 				if (holds_lock == 0) {
4826 					SCTP_INP_RUNLOCK(inp);
4827 				}
4828 				return (laddr->ifa);
4829 				break;
4830 			}
4831 		}
4832 #ifdef INET6
4833 		if (addr->sa_family == AF_INET6) {
4834 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4835 			    &laddr->ifa->address.sin6)) {
4836 				/* found him. */
4837 				if (holds_lock == 0) {
4838 					SCTP_INP_RUNLOCK(inp);
4839 				}
4840 				return (laddr->ifa);
4841 				break;
4842 			}
4843 		}
4844 #endif
4845 	}
4846 	if (holds_lock == 0) {
4847 		SCTP_INP_RUNLOCK(inp);
4848 	}
4849 	return (NULL);
4850 }
4851 
4852 uint32_t
4853 sctp_get_ifa_hash_val(struct sockaddr *addr)
4854 {
4855 	if (addr->sa_family == AF_INET) {
4856 		struct sockaddr_in *sin;
4857 
4858 		sin = (struct sockaddr_in *)addr;
4859 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4860 	} else if (addr->sa_family == AF_INET6) {
4861 		struct sockaddr_in6 *sin6;
4862 		uint32_t hash_of_addr;
4863 
4864 		sin6 = (struct sockaddr_in6 *)addr;
4865 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4866 		    sin6->sin6_addr.s6_addr32[1] +
4867 		    sin6->sin6_addr.s6_addr32[2] +
4868 		    sin6->sin6_addr.s6_addr32[3]);
4869 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4870 		return (hash_of_addr);
4871 	}
4872 	return (0);
4873 }
4874 
4875 struct sctp_ifa *
4876 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4877 {
4878 	struct sctp_ifa *sctp_ifap;
4879 	struct sctp_vrf *vrf;
4880 	struct sctp_ifalist *hash_head;
4881 	uint32_t hash_of_addr;
4882 
4883 	if (holds_lock == 0)
4884 		SCTP_IPI_ADDR_RLOCK();
4885 
4886 	vrf = sctp_find_vrf(vrf_id);
4887 	if (vrf == NULL) {
4888 stage_right:
4889 		if (holds_lock == 0)
4890 			SCTP_IPI_ADDR_RUNLOCK();
4891 		return (NULL);
4892 	}
4893 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4894 
4895 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4896 	if (hash_head == NULL) {
4897 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4898 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4899 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4900 		sctp_print_address(addr);
4901 		SCTP_PRINTF("No such bucket for address\n");
4902 		if (holds_lock == 0)
4903 			SCTP_IPI_ADDR_RUNLOCK();
4904 
4905 		return (NULL);
4906 	}
4907 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4908 		if (sctp_ifap == NULL) {
4909 #ifdef INVARIANTS
4910 			panic("Huh LIST_FOREACH corrupt");
4911 			goto stage_right;
4912 #else
4913 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4914 			goto stage_right;
4915 #endif
4916 		}
4917 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4918 			continue;
4919 		if (addr->sa_family == AF_INET) {
4920 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4921 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4922 				/* found him. */
4923 				if (holds_lock == 0)
4924 					SCTP_IPI_ADDR_RUNLOCK();
4925 				return (sctp_ifap);
4926 				break;
4927 			}
4928 		}
4929 #ifdef INET6
4930 		if (addr->sa_family == AF_INET6) {
4931 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4932 			    &sctp_ifap->address.sin6)) {
4933 				/* found him. */
4934 				if (holds_lock == 0)
4935 					SCTP_IPI_ADDR_RUNLOCK();
4936 				return (sctp_ifap);
4937 				break;
4938 			}
4939 		}
4940 #endif
4941 	}
4942 	if (holds_lock == 0)
4943 		SCTP_IPI_ADDR_RUNLOCK();
4944 	return (NULL);
4945 }
4946 
4947 static void
4948 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4949     uint32_t rwnd_req)
4950 {
4951 	/* User pulled some data, do we need a rwnd update? */
4952 	int r_unlocked = 0;
4953 	uint32_t dif, rwnd;
4954 	struct socket *so = NULL;
4955 
4956 	if (stcb == NULL)
4957 		return;
4958 
4959 	atomic_add_int(&stcb->asoc.refcnt, 1);
4960 
4961 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4962 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4963 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4964 		/* Pre-check If we are freeing no update */
4965 		goto no_lock;
4966 	}
4967 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4968 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4969 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4970 		goto out;
4971 	}
4972 	so = stcb->sctp_socket;
4973 	if (so == NULL) {
4974 		goto out;
4975 	}
4976 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4977 	/* Have you have freed enough to look */
4978 	*freed_so_far = 0;
4979 	/* Yep, its worth a look and the lock overhead */
4980 
4981 	/* Figure out what the rwnd would be */
4982 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
4983 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
4984 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
4985 	} else {
4986 		dif = 0;
4987 	}
4988 	if (dif >= rwnd_req) {
4989 		if (hold_rlock) {
4990 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
4991 			r_unlocked = 1;
4992 		}
4993 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4994 			/*
4995 			 * One last check before we allow the guy possibly
4996 			 * to get in. There is a race, where the guy has not
4997 			 * reached the gate. In that case
4998 			 */
4999 			goto out;
5000 		}
5001 		SCTP_TCB_LOCK(stcb);
5002 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5003 			/* No reports here */
5004 			SCTP_TCB_UNLOCK(stcb);
5005 			goto out;
5006 		}
5007 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5008 		sctp_send_sack(stcb);
5009 
5010 		sctp_chunk_output(stcb->sctp_ep, stcb,
5011 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5012 		/* make sure no timer is running */
5013 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5014 		SCTP_TCB_UNLOCK(stcb);
5015 	} else {
5016 		/* Update how much we have pending */
5017 		stcb->freed_by_sorcv_sincelast = dif;
5018 	}
5019 out:
5020 	if (so && r_unlocked && hold_rlock) {
5021 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5022 	}
5023 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5024 no_lock:
5025 	atomic_add_int(&stcb->asoc.refcnt, -1);
5026 	return;
5027 }
5028 
5029 int
5030 sctp_sorecvmsg(struct socket *so,
5031     struct uio *uio,
5032     struct mbuf **mp,
5033     struct sockaddr *from,
5034     int fromlen,
5035     int *msg_flags,
5036     struct sctp_sndrcvinfo *sinfo,
5037     int filling_sinfo)
5038 {
5039 	/*
5040 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5041 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5042 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5043 	 * On the way out we may send out any combination of:
5044 	 * MSG_NOTIFICATION MSG_EOR
5045 	 *
5046 	 */
5047 	struct sctp_inpcb *inp = NULL;
5048 	int my_len = 0;
5049 	int cp_len = 0, error = 0;
5050 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5051 	struct mbuf *m = NULL;
5052 	struct sctp_tcb *stcb = NULL;
5053 	int wakeup_read_socket = 0;
5054 	int freecnt_applied = 0;
5055 	int out_flags = 0, in_flags = 0;
5056 	int block_allowed = 1;
5057 	uint32_t freed_so_far = 0;
5058 	uint32_t copied_so_far = 0;
5059 	int in_eeor_mode = 0;
5060 	int no_rcv_needed = 0;
5061 	uint32_t rwnd_req = 0;
5062 	int hold_sblock = 0;
5063 	int hold_rlock = 0;
5064 	int slen = 0;
5065 	uint32_t held_length = 0;
5066 	int sockbuf_lock = 0;
5067 
5068 	if (uio == NULL) {
5069 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5070 		return (EINVAL);
5071 	}
5072 	if (msg_flags) {
5073 		in_flags = *msg_flags;
5074 		if (in_flags & MSG_PEEK)
5075 			SCTP_STAT_INCR(sctps_read_peeks);
5076 	} else {
5077 		in_flags = 0;
5078 	}
5079 	slen = uio->uio_resid;
5080 
5081 	/* Pull in and set up our int flags */
5082 	if (in_flags & MSG_OOB) {
5083 		/* Out of band's NOT supported */
5084 		return (EOPNOTSUPP);
5085 	}
5086 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5087 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5088 		return (EINVAL);
5089 	}
5090 	if ((in_flags & (MSG_DONTWAIT
5091 	    | MSG_NBIO
5092 	    )) ||
5093 	    SCTP_SO_IS_NBIO(so)) {
5094 		block_allowed = 0;
5095 	}
5096 	/* setup the endpoint */
5097 	inp = (struct sctp_inpcb *)so->so_pcb;
5098 	if (inp == NULL) {
5099 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5100 		return (EFAULT);
5101 	}
5102 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5103 	/* Must be at least a MTU's worth */
5104 	if (rwnd_req < SCTP_MIN_RWND)
5105 		rwnd_req = SCTP_MIN_RWND;
5106 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5107 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5108 		sctp_misc_ints(SCTP_SORECV_ENTER,
5109 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5110 	}
5111 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5112 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5113 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5114 	}
5115 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5116 	sockbuf_lock = 1;
5117 	if (error) {
5118 		goto release_unlocked;
5119 	}
5120 restart:
5121 
5122 
5123 restart_nosblocks:
5124 	if (hold_sblock == 0) {
5125 		SOCKBUF_LOCK(&so->so_rcv);
5126 		hold_sblock = 1;
5127 	}
5128 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5129 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5130 		goto out;
5131 	}
5132 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5133 		if (so->so_error) {
5134 			error = so->so_error;
5135 			if ((in_flags & MSG_PEEK) == 0)
5136 				so->so_error = 0;
5137 			goto out;
5138 		} else {
5139 			if (so->so_rcv.sb_cc == 0) {
5140 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5141 				/* indicate EOF */
5142 				error = 0;
5143 				goto out;
5144 			}
5145 		}
5146 	}
5147 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5148 		/* we need to wait for data */
5149 		if ((so->so_rcv.sb_cc == 0) &&
5150 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5151 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5152 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5153 				/*
5154 				 * For active open side clear flags for
5155 				 * re-use passive open is blocked by
5156 				 * connect.
5157 				 */
5158 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5159 					/*
5160 					 * You were aborted, passive side
5161 					 * always hits here
5162 					 */
5163 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5164 					error = ECONNRESET;
5165 					/*
5166 					 * You get this once if you are
5167 					 * active open side
5168 					 */
5169 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5170 						/*
5171 						 * Remove flag if on the
5172 						 * active open side
5173 						 */
5174 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5175 					}
5176 				}
5177 				so->so_state &= ~(SS_ISCONNECTING |
5178 				    SS_ISDISCONNECTING |
5179 				    SS_ISCONFIRMING |
5180 				    SS_ISCONNECTED);
5181 				if (error == 0) {
5182 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5183 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5184 						error = ENOTCONN;
5185 					} else {
5186 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5187 					}
5188 				}
5189 				goto out;
5190 			}
5191 		}
5192 		error = sbwait(&so->so_rcv);
5193 		if (error) {
5194 			goto out;
5195 		}
5196 		held_length = 0;
5197 		goto restart_nosblocks;
5198 	} else if (so->so_rcv.sb_cc == 0) {
5199 		if (so->so_error) {
5200 			error = so->so_error;
5201 			if ((in_flags & MSG_PEEK) == 0)
5202 				so->so_error = 0;
5203 		} else {
5204 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5205 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5206 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5207 					/*
5208 					 * For active open side clear flags
5209 					 * for re-use passive open is
5210 					 * blocked by connect.
5211 					 */
5212 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5213 						/*
5214 						 * You were aborted, passive
5215 						 * side always hits here
5216 						 */
5217 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5218 						error = ECONNRESET;
5219 						/*
5220 						 * You get this once if you
5221 						 * are active open side
5222 						 */
5223 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5224 							/*
5225 							 * Remove flag if on
5226 							 * the active open
5227 							 * side
5228 							 */
5229 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5230 						}
5231 					}
5232 					so->so_state &= ~(SS_ISCONNECTING |
5233 					    SS_ISDISCONNECTING |
5234 					    SS_ISCONFIRMING |
5235 					    SS_ISCONNECTED);
5236 					if (error == 0) {
5237 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5238 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5239 							error = ENOTCONN;
5240 						} else {
5241 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5242 						}
5243 					}
5244 					goto out;
5245 				}
5246 			}
5247 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5248 			error = EWOULDBLOCK;
5249 		}
5250 		goto out;
5251 	}
5252 	if (hold_sblock == 1) {
5253 		SOCKBUF_UNLOCK(&so->so_rcv);
5254 		hold_sblock = 0;
5255 	}
5256 	/* we possibly have data we can read */
5257 	/* sa_ignore FREED_MEMORY */
5258 	control = TAILQ_FIRST(&inp->read_queue);
5259 	if (control == NULL) {
5260 		/*
5261 		 * This could be happening since the appender did the
5262 		 * increment but as not yet did the tailq insert onto the
5263 		 * read_queue
5264 		 */
5265 		if (hold_rlock == 0) {
5266 			SCTP_INP_READ_LOCK(inp);
5267 			hold_rlock = 1;
5268 		}
5269 		control = TAILQ_FIRST(&inp->read_queue);
5270 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5271 #ifdef INVARIANTS
5272 			panic("Huh, its non zero and nothing on control?");
5273 #endif
5274 			so->so_rcv.sb_cc = 0;
5275 		}
5276 		SCTP_INP_READ_UNLOCK(inp);
5277 		hold_rlock = 0;
5278 		goto restart;
5279 	}
5280 	if ((control->length == 0) &&
5281 	    (control->do_not_ref_stcb)) {
5282 		/*
5283 		 * Clean up code for freeing assoc that left behind a
5284 		 * pdapi.. maybe a peer in EEOR that just closed after
5285 		 * sending and never indicated a EOR.
5286 		 */
5287 		if (hold_rlock == 0) {
5288 			hold_rlock = 1;
5289 			SCTP_INP_READ_LOCK(inp);
5290 		}
5291 		control->held_length = 0;
5292 		if (control->data) {
5293 			/* Hmm there is data here .. fix */
5294 			struct mbuf *m_tmp;
5295 			int cnt = 0;
5296 
5297 			m_tmp = control->data;
5298 			while (m_tmp) {
5299 				cnt += SCTP_BUF_LEN(m_tmp);
5300 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5301 					control->tail_mbuf = m_tmp;
5302 					control->end_added = 1;
5303 				}
5304 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5305 			}
5306 			control->length = cnt;
5307 		} else {
5308 			/* remove it */
5309 			TAILQ_REMOVE(&inp->read_queue, control, next);
5310 			/* Add back any hiddend data */
5311 			sctp_free_remote_addr(control->whoFrom);
5312 			sctp_free_a_readq(stcb, control);
5313 		}
5314 		if (hold_rlock) {
5315 			hold_rlock = 0;
5316 			SCTP_INP_READ_UNLOCK(inp);
5317 		}
5318 		goto restart;
5319 	}
5320 	if ((control->length == 0) &&
5321 	    (control->end_added == 1)) {
5322 		/*
5323 		 * Do we also need to check for (control->pdapi_aborted ==
5324 		 * 1)?
5325 		 */
5326 		if (hold_rlock == 0) {
5327 			hold_rlock = 1;
5328 			SCTP_INP_READ_LOCK(inp);
5329 		}
5330 		TAILQ_REMOVE(&inp->read_queue, control, next);
5331 		if (control->data) {
5332 #ifdef INVARIANTS
5333 			panic("control->data not null but control->length == 0");
5334 #else
5335 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5336 			sctp_m_freem(control->data);
5337 			control->data = NULL;
5338 #endif
5339 		}
5340 		if (control->aux_data) {
5341 			sctp_m_free(control->aux_data);
5342 			control->aux_data = NULL;
5343 		}
5344 		sctp_free_remote_addr(control->whoFrom);
5345 		sctp_free_a_readq(stcb, control);
5346 		if (hold_rlock) {
5347 			hold_rlock = 0;
5348 			SCTP_INP_READ_UNLOCK(inp);
5349 		}
5350 		goto restart;
5351 	}
5352 	if (control->length == 0) {
5353 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5354 		    (filling_sinfo)) {
5355 			/* find a more suitable one then this */
5356 			ctl = TAILQ_NEXT(control, next);
5357 			while (ctl) {
5358 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5359 				    (ctl->some_taken ||
5360 				    (ctl->spec_flags & M_NOTIFICATION) ||
5361 				    ((ctl->do_not_ref_stcb == 0) &&
5362 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5363 				    ) {
5364 					/*-
5365 					 * If we have a different TCB next, and there is data
5366 					 * present. If we have already taken some (pdapi), OR we can
5367 					 * ref the tcb and no delivery as started on this stream, we
5368 					 * take it. Note we allow a notification on a different
5369 					 * assoc to be delivered..
5370 					 */
5371 					control = ctl;
5372 					goto found_one;
5373 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5374 					    (ctl->length) &&
5375 					    ((ctl->some_taken) ||
5376 					    ((ctl->do_not_ref_stcb == 0) &&
5377 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5378 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5379 					/*-
5380 					 * If we have the same tcb, and there is data present, and we
5381 					 * have the strm interleave feature present. Then if we have
5382 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5383 					 * not started a delivery for this stream, we can take it.
5384 					 * Note we do NOT allow a notificaiton on the same assoc to
5385 					 * be delivered.
5386 					 */
5387 					control = ctl;
5388 					goto found_one;
5389 				}
5390 				ctl = TAILQ_NEXT(ctl, next);
5391 			}
5392 		}
5393 		/*
5394 		 * if we reach here, not suitable replacement is available
5395 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5396 		 * into the our held count, and its time to sleep again.
5397 		 */
5398 		held_length = so->so_rcv.sb_cc;
5399 		control->held_length = so->so_rcv.sb_cc;
5400 		goto restart;
5401 	}
5402 	/* Clear the held length since there is something to read */
5403 	control->held_length = 0;
5404 	if (hold_rlock) {
5405 		SCTP_INP_READ_UNLOCK(inp);
5406 		hold_rlock = 0;
5407 	}
5408 found_one:
5409 	/*
5410 	 * If we reach here, control has a some data for us to read off.
5411 	 * Note that stcb COULD be NULL.
5412 	 */
5413 	control->some_taken++;
5414 	if (hold_sblock) {
5415 		SOCKBUF_UNLOCK(&so->so_rcv);
5416 		hold_sblock = 0;
5417 	}
5418 	stcb = control->stcb;
5419 	if (stcb) {
5420 		if ((control->do_not_ref_stcb == 0) &&
5421 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5422 			if (freecnt_applied == 0)
5423 				stcb = NULL;
5424 		} else if (control->do_not_ref_stcb == 0) {
5425 			/* you can't free it on me please */
5426 			/*
5427 			 * The lock on the socket buffer protects us so the
5428 			 * free code will stop. But since we used the
5429 			 * socketbuf lock and the sender uses the tcb_lock
5430 			 * to increment, we need to use the atomic add to
5431 			 * the refcnt
5432 			 */
5433 			if (freecnt_applied) {
5434 #ifdef INVARIANTS
5435 				panic("refcnt already incremented");
5436 #else
5437 				printf("refcnt already incremented?\n");
5438 #endif
5439 			} else {
5440 				atomic_add_int(&stcb->asoc.refcnt, 1);
5441 				freecnt_applied = 1;
5442 			}
5443 			/*
5444 			 * Setup to remember how much we have not yet told
5445 			 * the peer our rwnd has opened up. Note we grab the
5446 			 * value from the tcb from last time. Note too that
5447 			 * sack sending clears this when a sack is sent,
5448 			 * which is fine. Once we hit the rwnd_req, we then
5449 			 * will go to the sctp_user_rcvd() that will not
5450 			 * lock until it KNOWs it MUST send a WUP-SACK.
5451 			 */
5452 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5453 			stcb->freed_by_sorcv_sincelast = 0;
5454 		}
5455 	}
5456 	if (stcb &&
5457 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5458 	    control->do_not_ref_stcb == 0) {
5459 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5460 	}
5461 	/* First lets get off the sinfo and sockaddr info */
5462 	if ((sinfo) && filling_sinfo) {
5463 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5464 		nxt = TAILQ_NEXT(control, next);
5465 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5466 			struct sctp_extrcvinfo *s_extra;
5467 
5468 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5469 			if ((nxt) &&
5470 			    (nxt->length)) {
5471 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5472 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5473 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5474 				}
5475 				if (nxt->spec_flags & M_NOTIFICATION) {
5476 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5477 				}
5478 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5479 				s_extra->sreinfo_next_length = nxt->length;
5480 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5481 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5482 				if (nxt->tail_mbuf != NULL) {
5483 					if (nxt->end_added) {
5484 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5485 					}
5486 				}
5487 			} else {
5488 				/*
5489 				 * we explicitly 0 this, since the memcpy
5490 				 * got some other things beyond the older
5491 				 * sinfo_ that is on the control's structure
5492 				 * :-D
5493 				 */
5494 				nxt = NULL;
5495 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5496 				s_extra->sreinfo_next_aid = 0;
5497 				s_extra->sreinfo_next_length = 0;
5498 				s_extra->sreinfo_next_ppid = 0;
5499 				s_extra->sreinfo_next_stream = 0;
5500 			}
5501 		}
5502 		/*
5503 		 * update off the real current cum-ack, if we have an stcb.
5504 		 */
5505 		if ((control->do_not_ref_stcb == 0) && stcb)
5506 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5507 		/*
5508 		 * mask off the high bits, we keep the actual chunk bits in
5509 		 * there.
5510 		 */
5511 		sinfo->sinfo_flags &= 0x00ff;
5512 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5513 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5514 		}
5515 	}
5516 #ifdef SCTP_ASOCLOG_OF_TSNS
5517 	{
5518 		int index, newindex;
5519 		struct sctp_pcbtsn_rlog *entry;
5520 
5521 		do {
5522 			index = inp->readlog_index;
5523 			newindex = index + 1;
5524 			if (newindex >= SCTP_READ_LOG_SIZE) {
5525 				newindex = 0;
5526 			}
5527 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5528 		entry = &inp->readlog[index];
5529 		entry->vtag = control->sinfo_assoc_id;
5530 		entry->strm = control->sinfo_stream;
5531 		entry->seq = control->sinfo_ssn;
5532 		entry->sz = control->length;
5533 		entry->flgs = control->sinfo_flags;
5534 	}
5535 #endif
5536 	if (fromlen && from) {
5537 		struct sockaddr *to;
5538 
5539 #ifdef INET
5540 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5541 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5542 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5543 #else
5544 		/* No AF_INET use AF_INET6 */
5545 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5546 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5547 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5548 #endif
5549 
5550 		to = from;
5551 #if defined(INET) && defined(INET6)
5552 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5553 		    (to->sa_family == AF_INET) &&
5554 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5555 			struct sockaddr_in *sin;
5556 			struct sockaddr_in6 sin6;
5557 
5558 			sin = (struct sockaddr_in *)to;
5559 			bzero(&sin6, sizeof(sin6));
5560 			sin6.sin6_family = AF_INET6;
5561 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5562 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5563 			bcopy(&sin->sin_addr,
5564 			    &sin6.sin6_addr.s6_addr32[3],
5565 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5566 			sin6.sin6_port = sin->sin_port;
5567 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5568 		}
5569 #endif
5570 #if defined(INET6)
5571 		{
5572 			struct sockaddr_in6 lsa6, *to6;
5573 
5574 			to6 = (struct sockaddr_in6 *)to;
5575 			sctp_recover_scope_mac(to6, (&lsa6));
5576 		}
5577 #endif
5578 	}
5579 	/* now copy out what data we can */
5580 	if (mp == NULL) {
5581 		/* copy out each mbuf in the chain up to length */
5582 get_more_data:
5583 		m = control->data;
5584 		while (m) {
5585 			/* Move out all we can */
5586 			cp_len = (int)uio->uio_resid;
5587 			my_len = (int)SCTP_BUF_LEN(m);
5588 			if (cp_len > my_len) {
5589 				/* not enough in this buf */
5590 				cp_len = my_len;
5591 			}
5592 			if (hold_rlock) {
5593 				SCTP_INP_READ_UNLOCK(inp);
5594 				hold_rlock = 0;
5595 			}
5596 			if (cp_len > 0)
5597 				error = uiomove(mtod(m, char *), cp_len, uio);
5598 			/* re-read */
5599 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5600 				goto release;
5601 			}
5602 			if ((control->do_not_ref_stcb == 0) && stcb &&
5603 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5604 				no_rcv_needed = 1;
5605 			}
5606 			if (error) {
5607 				/* error we are out of here */
5608 				goto release;
5609 			}
5610 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5611 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5612 			    ((control->end_added == 0) ||
5613 			    (control->end_added &&
5614 			    (TAILQ_NEXT(control, next) == NULL)))
5615 			    ) {
5616 				SCTP_INP_READ_LOCK(inp);
5617 				hold_rlock = 1;
5618 			}
5619 			if (cp_len == SCTP_BUF_LEN(m)) {
5620 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5621 				    (control->end_added)) {
5622 					out_flags |= MSG_EOR;
5623 					if ((control->do_not_ref_stcb == 0) &&
5624 					    (control->stcb != NULL) &&
5625 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5626 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5627 				}
5628 				if (control->spec_flags & M_NOTIFICATION) {
5629 					out_flags |= MSG_NOTIFICATION;
5630 				}
5631 				/* we ate up the mbuf */
5632 				if (in_flags & MSG_PEEK) {
5633 					/* just looking */
5634 					m = SCTP_BUF_NEXT(m);
5635 					copied_so_far += cp_len;
5636 				} else {
5637 					/* dispose of the mbuf */
5638 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5639 						sctp_sblog(&so->so_rcv,
5640 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5641 					}
5642 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5643 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5644 						sctp_sblog(&so->so_rcv,
5645 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5646 					}
5647 					copied_so_far += cp_len;
5648 					freed_so_far += cp_len;
5649 					freed_so_far += MSIZE;
5650 					atomic_subtract_int(&control->length, cp_len);
5651 					control->data = sctp_m_free(m);
5652 					m = control->data;
5653 					/*
5654 					 * been through it all, must hold sb
5655 					 * lock ok to null tail
5656 					 */
5657 					if (control->data == NULL) {
5658 #ifdef INVARIANTS
5659 						if ((control->end_added == 0) ||
5660 						    (TAILQ_NEXT(control, next) == NULL)) {
5661 							/*
5662 							 * If the end is not
5663 							 * added, OR the
5664 							 * next is NOT null
5665 							 * we MUST have the
5666 							 * lock.
5667 							 */
5668 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5669 								panic("Hmm we don't own the lock?");
5670 							}
5671 						}
5672 #endif
5673 						control->tail_mbuf = NULL;
5674 #ifdef INVARIANTS
5675 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5676 							panic("end_added, nothing left and no MSG_EOR");
5677 						}
5678 #endif
5679 					}
5680 				}
5681 			} else {
5682 				/* Do we need to trim the mbuf? */
5683 				if (control->spec_flags & M_NOTIFICATION) {
5684 					out_flags |= MSG_NOTIFICATION;
5685 				}
5686 				if ((in_flags & MSG_PEEK) == 0) {
5687 					SCTP_BUF_RESV_UF(m, cp_len);
5688 					SCTP_BUF_LEN(m) -= cp_len;
5689 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5690 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5691 					}
5692 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5693 					if ((control->do_not_ref_stcb == 0) &&
5694 					    stcb) {
5695 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5696 					}
5697 					copied_so_far += cp_len;
5698 					freed_so_far += cp_len;
5699 					freed_so_far += MSIZE;
5700 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5701 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5702 						    SCTP_LOG_SBRESULT, 0);
5703 					}
5704 					atomic_subtract_int(&control->length, cp_len);
5705 				} else {
5706 					copied_so_far += cp_len;
5707 				}
5708 			}
5709 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5710 				break;
5711 			}
5712 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5713 			    (control->do_not_ref_stcb == 0) &&
5714 			    (freed_so_far >= rwnd_req)) {
5715 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5716 			}
5717 		}		/* end while(m) */
5718 		/*
5719 		 * At this point we have looked at it all and we either have
5720 		 * a MSG_EOR/or read all the user wants... <OR>
5721 		 * control->length == 0.
5722 		 */
5723 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5724 			/* we are done with this control */
5725 			if (control->length == 0) {
5726 				if (control->data) {
5727 #ifdef INVARIANTS
5728 					panic("control->data not null at read eor?");
5729 #else
5730 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5731 					sctp_m_freem(control->data);
5732 					control->data = NULL;
5733 #endif
5734 				}
5735 		done_with_control:
5736 				if (TAILQ_NEXT(control, next) == NULL) {
5737 					/*
5738 					 * If we don't have a next we need a
5739 					 * lock, if there is a next
5740 					 * interrupt is filling ahead of us
5741 					 * and we don't need a lock to
5742 					 * remove this guy (which is the
5743 					 * head of the queue).
5744 					 */
5745 					if (hold_rlock == 0) {
5746 						SCTP_INP_READ_LOCK(inp);
5747 						hold_rlock = 1;
5748 					}
5749 				}
5750 				TAILQ_REMOVE(&inp->read_queue, control, next);
5751 				/* Add back any hiddend data */
5752 				if (control->held_length) {
5753 					held_length = 0;
5754 					control->held_length = 0;
5755 					wakeup_read_socket = 1;
5756 				}
5757 				if (control->aux_data) {
5758 					sctp_m_free(control->aux_data);
5759 					control->aux_data = NULL;
5760 				}
5761 				no_rcv_needed = control->do_not_ref_stcb;
5762 				sctp_free_remote_addr(control->whoFrom);
5763 				control->data = NULL;
5764 				sctp_free_a_readq(stcb, control);
5765 				control = NULL;
5766 				if ((freed_so_far >= rwnd_req) &&
5767 				    (no_rcv_needed == 0))
5768 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5769 
5770 			} else {
5771 				/*
5772 				 * The user did not read all of this
5773 				 * message, turn off the returned MSG_EOR
5774 				 * since we are leaving more behind on the
5775 				 * control to read.
5776 				 */
5777 #ifdef INVARIANTS
5778 				if (control->end_added &&
5779 				    (control->data == NULL) &&
5780 				    (control->tail_mbuf == NULL)) {
5781 					panic("Gak, control->length is corrupt?");
5782 				}
5783 #endif
5784 				no_rcv_needed = control->do_not_ref_stcb;
5785 				out_flags &= ~MSG_EOR;
5786 			}
5787 		}
5788 		if (out_flags & MSG_EOR) {
5789 			goto release;
5790 		}
5791 		if ((uio->uio_resid == 0) ||
5792 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5793 		    ) {
5794 			goto release;
5795 		}
5796 		/*
5797 		 * If I hit here the receiver wants more and this message is
5798 		 * NOT done (pd-api). So two questions. Can we block? if not
5799 		 * we are done. Did the user NOT set MSG_WAITALL?
5800 		 */
5801 		if (block_allowed == 0) {
5802 			goto release;
5803 		}
5804 		/*
5805 		 * We need to wait for more data a few things: - We don't
5806 		 * sbunlock() so we don't get someone else reading. - We
5807 		 * must be sure to account for the case where what is added
5808 		 * is NOT to our control when we wakeup.
5809 		 */
5810 
5811 		/*
5812 		 * Do we need to tell the transport a rwnd update might be
5813 		 * needed before we go to sleep?
5814 		 */
5815 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5816 		    ((freed_so_far >= rwnd_req) &&
5817 		    (control->do_not_ref_stcb == 0) &&
5818 		    (no_rcv_needed == 0))) {
5819 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5820 		}
5821 wait_some_more:
5822 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5823 			goto release;
5824 		}
5825 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5826 			goto release;
5827 
5828 		if (hold_rlock == 1) {
5829 			SCTP_INP_READ_UNLOCK(inp);
5830 			hold_rlock = 0;
5831 		}
5832 		if (hold_sblock == 0) {
5833 			SOCKBUF_LOCK(&so->so_rcv);
5834 			hold_sblock = 1;
5835 		}
5836 		if ((copied_so_far) && (control->length == 0) &&
5837 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5838 			goto release;
5839 		}
5840 		if (so->so_rcv.sb_cc <= control->held_length) {
5841 			error = sbwait(&so->so_rcv);
5842 			if (error) {
5843 				goto release;
5844 			}
5845 			control->held_length = 0;
5846 		}
5847 		if (hold_sblock) {
5848 			SOCKBUF_UNLOCK(&so->so_rcv);
5849 			hold_sblock = 0;
5850 		}
5851 		if (control->length == 0) {
5852 			/* still nothing here */
5853 			if (control->end_added == 1) {
5854 				/* he aborted, or is done i.e.did a shutdown */
5855 				out_flags |= MSG_EOR;
5856 				if (control->pdapi_aborted) {
5857 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5858 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5859 
5860 					out_flags |= MSG_TRUNC;
5861 				} else {
5862 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5863 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5864 				}
5865 				goto done_with_control;
5866 			}
5867 			if (so->so_rcv.sb_cc > held_length) {
5868 				control->held_length = so->so_rcv.sb_cc;
5869 				held_length = 0;
5870 			}
5871 			goto wait_some_more;
5872 		} else if (control->data == NULL) {
5873 			/*
5874 			 * we must re-sync since data is probably being
5875 			 * added
5876 			 */
5877 			SCTP_INP_READ_LOCK(inp);
5878 			if ((control->length > 0) && (control->data == NULL)) {
5879 				/*
5880 				 * big trouble.. we have the lock and its
5881 				 * corrupt?
5882 				 */
5883 #ifdef INVARIANTS
5884 				panic("Impossible data==NULL length !=0");
5885 #endif
5886 				out_flags |= MSG_EOR;
5887 				out_flags |= MSG_TRUNC;
5888 				control->length = 0;
5889 				SCTP_INP_READ_UNLOCK(inp);
5890 				goto done_with_control;
5891 			}
5892 			SCTP_INP_READ_UNLOCK(inp);
5893 			/* We will fall around to get more data */
5894 		}
5895 		goto get_more_data;
5896 	} else {
5897 		/*-
5898 		 * Give caller back the mbuf chain,
5899 		 * store in uio_resid the length
5900 		 */
5901 		wakeup_read_socket = 0;
5902 		if ((control->end_added == 0) ||
5903 		    (TAILQ_NEXT(control, next) == NULL)) {
5904 			/* Need to get rlock */
5905 			if (hold_rlock == 0) {
5906 				SCTP_INP_READ_LOCK(inp);
5907 				hold_rlock = 1;
5908 			}
5909 		}
5910 		if (control->end_added) {
5911 			out_flags |= MSG_EOR;
5912 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5913 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5914 		}
5915 		if (control->spec_flags & M_NOTIFICATION) {
5916 			out_flags |= MSG_NOTIFICATION;
5917 		}
5918 		uio->uio_resid = control->length;
5919 		*mp = control->data;
5920 		m = control->data;
5921 		while (m) {
5922 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5923 				sctp_sblog(&so->so_rcv,
5924 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5925 			}
5926 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5927 			freed_so_far += SCTP_BUF_LEN(m);
5928 			freed_so_far += MSIZE;
5929 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5930 				sctp_sblog(&so->so_rcv,
5931 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5932 			}
5933 			m = SCTP_BUF_NEXT(m);
5934 		}
5935 		control->data = control->tail_mbuf = NULL;
5936 		control->length = 0;
5937 		if (out_flags & MSG_EOR) {
5938 			/* Done with this control */
5939 			goto done_with_control;
5940 		}
5941 	}
5942 release:
5943 	if (hold_rlock == 1) {
5944 		SCTP_INP_READ_UNLOCK(inp);
5945 		hold_rlock = 0;
5946 	}
5947 	if (hold_sblock == 1) {
5948 		SOCKBUF_UNLOCK(&so->so_rcv);
5949 		hold_sblock = 0;
5950 	}
5951 	sbunlock(&so->so_rcv);
5952 	sockbuf_lock = 0;
5953 
5954 release_unlocked:
5955 	if (hold_sblock) {
5956 		SOCKBUF_UNLOCK(&so->so_rcv);
5957 		hold_sblock = 0;
5958 	}
5959 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5960 		if ((freed_so_far >= rwnd_req) &&
5961 		    (control && (control->do_not_ref_stcb == 0)) &&
5962 		    (no_rcv_needed == 0))
5963 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5964 	}
5965 out:
5966 	if (msg_flags) {
5967 		*msg_flags = out_flags;
5968 	}
5969 	if (((out_flags & MSG_EOR) == 0) &&
5970 	    ((in_flags & MSG_PEEK) == 0) &&
5971 	    (sinfo) &&
5972 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5973 		struct sctp_extrcvinfo *s_extra;
5974 
5975 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5976 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5977 	}
5978 	if (hold_rlock == 1) {
5979 		SCTP_INP_READ_UNLOCK(inp);
5980 		hold_rlock = 0;
5981 	}
5982 	if (hold_sblock) {
5983 		SOCKBUF_UNLOCK(&so->so_rcv);
5984 		hold_sblock = 0;
5985 	}
5986 	if (sockbuf_lock) {
5987 		sbunlock(&so->so_rcv);
5988 	}
5989 	if (freecnt_applied) {
5990 		/*
5991 		 * The lock on the socket buffer protects us so the free
5992 		 * code will stop. But since we used the socketbuf lock and
5993 		 * the sender uses the tcb_lock to increment, we need to use
5994 		 * the atomic add to the refcnt.
5995 		 */
5996 		if (stcb == NULL) {
5997 #ifdef INVARIANTS
5998 			panic("stcb for refcnt has gone NULL?");
5999 			goto stage_left;
6000 #else
6001 			goto stage_left;
6002 #endif
6003 		}
6004 		atomic_add_int(&stcb->asoc.refcnt, -1);
6005 		freecnt_applied = 0;
6006 		/* Save the value back for next time */
6007 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6008 	}
6009 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6010 		if (stcb) {
6011 			sctp_misc_ints(SCTP_SORECV_DONE,
6012 			    freed_so_far,
6013 			    ((uio) ? (slen - uio->uio_resid) : slen),
6014 			    stcb->asoc.my_rwnd,
6015 			    so->so_rcv.sb_cc);
6016 		} else {
6017 			sctp_misc_ints(SCTP_SORECV_DONE,
6018 			    freed_so_far,
6019 			    ((uio) ? (slen - uio->uio_resid) : slen),
6020 			    0,
6021 			    so->so_rcv.sb_cc);
6022 		}
6023 	}
6024 stage_left:
6025 	if (wakeup_read_socket) {
6026 		sctp_sorwakeup(inp, so);
6027 	}
6028 	return (error);
6029 }
6030 
6031 
6032 #ifdef SCTP_MBUF_LOGGING
6033 struct mbuf *
6034 sctp_m_free(struct mbuf *m)
6035 {
6036 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6037 		if (SCTP_BUF_IS_EXTENDED(m)) {
6038 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6039 		}
6040 	}
6041 	return (m_free(m));
6042 }
6043 
6044 void
6045 sctp_m_freem(struct mbuf *mb)
6046 {
6047 	while (mb != NULL)
6048 		mb = sctp_m_free(mb);
6049 }
6050 
6051 #endif
6052 
6053 int
6054 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6055 {
6056 	/*
6057 	 * Given a local address. For all associations that holds the
6058 	 * address, request a peer-set-primary.
6059 	 */
6060 	struct sctp_ifa *ifa;
6061 	struct sctp_laddr *wi;
6062 
6063 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6064 	if (ifa == NULL) {
6065 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6066 		return (EADDRNOTAVAIL);
6067 	}
6068 	/*
6069 	 * Now that we have the ifa we must awaken the iterator with this
6070 	 * message.
6071 	 */
6072 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6073 	if (wi == NULL) {
6074 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6075 		return (ENOMEM);
6076 	}
6077 	/* Now incr the count and int wi structure */
6078 	SCTP_INCR_LADDR_COUNT();
6079 	bzero(wi, sizeof(*wi));
6080 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6081 	wi->ifa = ifa;
6082 	wi->action = SCTP_SET_PRIM_ADDR;
6083 	atomic_add_int(&ifa->refcount, 1);
6084 
6085 	/* Now add it to the work queue */
6086 	SCTP_WQ_ADDR_LOCK();
6087 	/*
6088 	 * Should this really be a tailq? As it is we will process the
6089 	 * newest first :-0
6090 	 */
6091 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6092 	SCTP_WQ_ADDR_UNLOCK();
6093 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6094 	    (struct sctp_inpcb *)NULL,
6095 	    (struct sctp_tcb *)NULL,
6096 	    (struct sctp_nets *)NULL);
6097 	return (0);
6098 }
6099 
6100 
6101 int
6102 sctp_soreceive(struct socket *so,
6103     struct sockaddr **psa,
6104     struct uio *uio,
6105     struct mbuf **mp0,
6106     struct mbuf **controlp,
6107     int *flagsp)
6108 {
6109 	int error, fromlen;
6110 	uint8_t sockbuf[256];
6111 	struct sockaddr *from;
6112 	struct sctp_extrcvinfo sinfo;
6113 	int filling_sinfo = 1;
6114 	struct sctp_inpcb *inp;
6115 
6116 	inp = (struct sctp_inpcb *)so->so_pcb;
6117 	/* pickup the assoc we are reading from */
6118 	if (inp == NULL) {
6119 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6120 		return (EINVAL);
6121 	}
6122 	if ((sctp_is_feature_off(inp,
6123 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6124 	    (controlp == NULL)) {
6125 		/* user does not want the sndrcv ctl */
6126 		filling_sinfo = 0;
6127 	}
6128 	if (psa) {
6129 		from = (struct sockaddr *)sockbuf;
6130 		fromlen = sizeof(sockbuf);
6131 		from->sa_len = 0;
6132 	} else {
6133 		from = NULL;
6134 		fromlen = 0;
6135 	}
6136 
6137 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6138 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6139 	if ((controlp) && (filling_sinfo)) {
6140 		/* copy back the sinfo in a CMSG format */
6141 		if (filling_sinfo)
6142 			*controlp = sctp_build_ctl_nchunk(inp,
6143 			    (struct sctp_sndrcvinfo *)&sinfo);
6144 		else
6145 			*controlp = NULL;
6146 	}
6147 	if (psa) {
6148 		/* copy back the address info */
6149 		if (from && from->sa_len) {
6150 			*psa = sodupsockaddr(from, M_NOWAIT);
6151 		} else {
6152 			*psa = NULL;
6153 		}
6154 	}
6155 	return (error);
6156 }
6157 
6158 
6159 int
6160 sctp_l_soreceive(struct socket *so,
6161     struct sockaddr **name,
6162     struct uio *uio,
6163     char **controlp,
6164     int *controllen,
6165     int *flag)
6166 {
6167 	int error, fromlen;
6168 	uint8_t sockbuf[256];
6169 	struct sockaddr *from;
6170 	struct sctp_extrcvinfo sinfo;
6171 	int filling_sinfo = 1;
6172 	struct sctp_inpcb *inp;
6173 
6174 	inp = (struct sctp_inpcb *)so->so_pcb;
6175 	/* pickup the assoc we are reading from */
6176 	if (inp == NULL) {
6177 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6178 		return (EINVAL);
6179 	}
6180 	if ((sctp_is_feature_off(inp,
6181 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6182 	    (controlp == NULL)) {
6183 		/* user does not want the sndrcv ctl */
6184 		filling_sinfo = 0;
6185 	}
6186 	if (name) {
6187 		from = (struct sockaddr *)sockbuf;
6188 		fromlen = sizeof(sockbuf);
6189 		from->sa_len = 0;
6190 	} else {
6191 		from = NULL;
6192 		fromlen = 0;
6193 	}
6194 
6195 	error = sctp_sorecvmsg(so, uio,
6196 	    (struct mbuf **)NULL,
6197 	    from, fromlen, flag,
6198 	    (struct sctp_sndrcvinfo *)&sinfo,
6199 	    filling_sinfo);
6200 	if ((controlp) && (filling_sinfo)) {
6201 		/*
6202 		 * copy back the sinfo in a CMSG format note that the caller
6203 		 * has reponsibility for freeing the memory.
6204 		 */
6205 		if (filling_sinfo)
6206 			*controlp = sctp_build_ctl_cchunk(inp,
6207 			    controllen,
6208 			    (struct sctp_sndrcvinfo *)&sinfo);
6209 	}
6210 	if (name) {
6211 		/* copy back the address info */
6212 		if (from && from->sa_len) {
6213 			*name = sodupsockaddr(from, M_WAIT);
6214 		} else {
6215 			*name = NULL;
6216 		}
6217 	}
6218 	return (error);
6219 }
6220 
6221 
6222 
6223 
6224 
6225 
6226 
6227 int
6228 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6229     int totaddr, int *error)
6230 {
6231 	int added = 0;
6232 	int i;
6233 	struct sctp_inpcb *inp;
6234 	struct sockaddr *sa;
6235 	size_t incr = 0;
6236 
6237 	sa = addr;
6238 	inp = stcb->sctp_ep;
6239 	*error = 0;
6240 	for (i = 0; i < totaddr; i++) {
6241 		if (sa->sa_family == AF_INET) {
6242 			incr = sizeof(struct sockaddr_in);
6243 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6244 				/* assoc gone no un-lock */
6245 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6246 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6247 				*error = ENOBUFS;
6248 				goto out_now;
6249 			}
6250 			added++;
6251 		} else if (sa->sa_family == AF_INET6) {
6252 			incr = sizeof(struct sockaddr_in6);
6253 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6254 				/* assoc gone no un-lock */
6255 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6256 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6257 				*error = ENOBUFS;
6258 				goto out_now;
6259 			}
6260 			added++;
6261 		}
6262 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6263 	}
6264 out_now:
6265 	return (added);
6266 }
6267 
6268 struct sctp_tcb *
6269 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6270     int *totaddr, int *num_v4, int *num_v6, int *error,
6271     int limit, int *bad_addr)
6272 {
6273 	struct sockaddr *sa;
6274 	struct sctp_tcb *stcb = NULL;
6275 	size_t incr, at, i;
6276 
6277 	at = incr = 0;
6278 	sa = addr;
6279 	*error = *num_v6 = *num_v4 = 0;
6280 	/* account and validate addresses */
6281 	for (i = 0; i < (size_t)*totaddr; i++) {
6282 		if (sa->sa_family == AF_INET) {
6283 			(*num_v4) += 1;
6284 			incr = sizeof(struct sockaddr_in);
6285 			if (sa->sa_len != incr) {
6286 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6287 				*error = EINVAL;
6288 				*bad_addr = 1;
6289 				return (NULL);
6290 			}
6291 		} else if (sa->sa_family == AF_INET6) {
6292 			struct sockaddr_in6 *sin6;
6293 
6294 			sin6 = (struct sockaddr_in6 *)sa;
6295 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6296 				/* Must be non-mapped for connectx */
6297 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6298 				*error = EINVAL;
6299 				*bad_addr = 1;
6300 				return (NULL);
6301 			}
6302 			(*num_v6) += 1;
6303 			incr = sizeof(struct sockaddr_in6);
6304 			if (sa->sa_len != incr) {
6305 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6306 				*error = EINVAL;
6307 				*bad_addr = 1;
6308 				return (NULL);
6309 			}
6310 		} else {
6311 			*totaddr = i;
6312 			/* we are done */
6313 			break;
6314 		}
6315 		SCTP_INP_INCR_REF(inp);
6316 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6317 		if (stcb != NULL) {
6318 			/* Already have or am bring up an association */
6319 			return (stcb);
6320 		} else {
6321 			SCTP_INP_DECR_REF(inp);
6322 		}
6323 		if ((at + incr) > (size_t)limit) {
6324 			*totaddr = i;
6325 			break;
6326 		}
6327 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6328 	}
6329 	return ((struct sctp_tcb *)NULL);
6330 }
6331 
6332 /*
6333  * sctp_bindx(ADD) for one address.
6334  * assumes all arguments are valid/checked by caller.
6335  */
6336 void
6337 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6338     struct sockaddr *sa, sctp_assoc_t assoc_id,
6339     uint32_t vrf_id, int *error, void *p)
6340 {
6341 	struct sockaddr *addr_touse;
6342 
6343 #ifdef INET6
6344 	struct sockaddr_in sin;
6345 
6346 #endif
6347 
6348 	/* see if we're bound all already! */
6349 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6350 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6351 		*error = EINVAL;
6352 		return;
6353 	}
6354 	addr_touse = sa;
6355 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6356 	if (sa->sa_family == AF_INET6) {
6357 		struct sockaddr_in6 *sin6;
6358 
6359 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6360 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6361 			*error = EINVAL;
6362 			return;
6363 		}
6364 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6365 			/* can only bind v6 on PF_INET6 sockets */
6366 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6367 			*error = EINVAL;
6368 			return;
6369 		}
6370 		sin6 = (struct sockaddr_in6 *)addr_touse;
6371 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6372 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6373 			    SCTP_IPV6_V6ONLY(inp)) {
6374 				/* can't bind v4-mapped on PF_INET sockets */
6375 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6376 				*error = EINVAL;
6377 				return;
6378 			}
6379 			in6_sin6_2_sin(&sin, sin6);
6380 			addr_touse = (struct sockaddr *)&sin;
6381 		}
6382 	}
6383 #endif
6384 	if (sa->sa_family == AF_INET) {
6385 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6386 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6387 			*error = EINVAL;
6388 			return;
6389 		}
6390 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6391 		    SCTP_IPV6_V6ONLY(inp)) {
6392 			/* can't bind v4 on PF_INET sockets */
6393 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6394 			*error = EINVAL;
6395 			return;
6396 		}
6397 	}
6398 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6399 		if (p == NULL) {
6400 			/* Can't get proc for Net/Open BSD */
6401 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6402 			*error = EINVAL;
6403 			return;
6404 		}
6405 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6406 		return;
6407 	}
6408 	/*
6409 	 * No locks required here since bind and mgmt_ep_sa all do their own
6410 	 * locking. If we do something for the FIX: below we may need to
6411 	 * lock in that case.
6412 	 */
6413 	if (assoc_id == 0) {
6414 		/* add the address */
6415 		struct sctp_inpcb *lep;
6416 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6417 
6418 		/* validate the incoming port */
6419 		if ((lsin->sin_port != 0) &&
6420 		    (lsin->sin_port != inp->sctp_lport)) {
6421 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6422 			*error = EINVAL;
6423 			return;
6424 		} else {
6425 			/* user specified 0 port, set it to existing port */
6426 			lsin->sin_port = inp->sctp_lport;
6427 		}
6428 
6429 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6430 		if (lep != NULL) {
6431 			/*
6432 			 * We must decrement the refcount since we have the
6433 			 * ep already and are binding. No remove going on
6434 			 * here.
6435 			 */
6436 			SCTP_INP_DECR_REF(lep);
6437 		}
6438 		if (lep == inp) {
6439 			/* already bound to it.. ok */
6440 			return;
6441 		} else if (lep == NULL) {
6442 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6443 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6444 			    SCTP_ADD_IP_ADDRESS,
6445 			    vrf_id, NULL);
6446 		} else {
6447 			*error = EADDRINUSE;
6448 		}
6449 		if (*error)
6450 			return;
6451 	} else {
6452 		/*
6453 		 * FIX: decide whether we allow assoc based bindx
6454 		 */
6455 	}
6456 }
6457 
6458 /*
6459  * sctp_bindx(DELETE) for one address.
6460  * assumes all arguments are valid/checked by caller.
6461  */
6462 void
6463 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6464     struct sockaddr *sa, sctp_assoc_t assoc_id,
6465     uint32_t vrf_id, int *error)
6466 {
6467 	struct sockaddr *addr_touse;
6468 
6469 #ifdef INET6
6470 	struct sockaddr_in sin;
6471 
6472 #endif
6473 
6474 	/* see if we're bound all already! */
6475 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6476 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6477 		*error = EINVAL;
6478 		return;
6479 	}
6480 	addr_touse = sa;
6481 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6482 	if (sa->sa_family == AF_INET6) {
6483 		struct sockaddr_in6 *sin6;
6484 
6485 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6486 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6487 			*error = EINVAL;
6488 			return;
6489 		}
6490 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6491 			/* can only bind v6 on PF_INET6 sockets */
6492 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6493 			*error = EINVAL;
6494 			return;
6495 		}
6496 		sin6 = (struct sockaddr_in6 *)addr_touse;
6497 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6498 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6499 			    SCTP_IPV6_V6ONLY(inp)) {
6500 				/* can't bind mapped-v4 on PF_INET sockets */
6501 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6502 				*error = EINVAL;
6503 				return;
6504 			}
6505 			in6_sin6_2_sin(&sin, sin6);
6506 			addr_touse = (struct sockaddr *)&sin;
6507 		}
6508 	}
6509 #endif
6510 	if (sa->sa_family == AF_INET) {
6511 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6512 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6513 			*error = EINVAL;
6514 			return;
6515 		}
6516 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6517 		    SCTP_IPV6_V6ONLY(inp)) {
6518 			/* can't bind v4 on PF_INET sockets */
6519 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6520 			*error = EINVAL;
6521 			return;
6522 		}
6523 	}
6524 	/*
6525 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6526 	 * below is ever changed we may need to lock before calling
6527 	 * association level binding.
6528 	 */
6529 	if (assoc_id == 0) {
6530 		/* delete the address */
6531 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6532 		    SCTP_DEL_IP_ADDRESS,
6533 		    vrf_id, NULL);
6534 	} else {
6535 		/*
6536 		 * FIX: decide whether we allow assoc based bindx
6537 		 */
6538 	}
6539 }
6540 
6541 /*
6542  * returns the valid local address count for an assoc, taking into account
6543  * all scoping rules
6544  */
6545 int
6546 sctp_local_addr_count(struct sctp_tcb *stcb)
6547 {
6548 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6549 	int ipv4_addr_legal, ipv6_addr_legal;
6550 	struct sctp_vrf *vrf;
6551 	struct sctp_ifn *sctp_ifn;
6552 	struct sctp_ifa *sctp_ifa;
6553 	int count = 0;
6554 
6555 	/* Turn on all the appropriate scopes */
6556 	loopback_scope = stcb->asoc.loopback_scope;
6557 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6558 	local_scope = stcb->asoc.local_scope;
6559 	site_scope = stcb->asoc.site_scope;
6560 	ipv4_addr_legal = ipv6_addr_legal = 0;
6561 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6562 		ipv6_addr_legal = 1;
6563 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6564 			ipv4_addr_legal = 1;
6565 		}
6566 	} else {
6567 		ipv4_addr_legal = 1;
6568 	}
6569 
6570 	SCTP_IPI_ADDR_RLOCK();
6571 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6572 	if (vrf == NULL) {
6573 		/* no vrf, no addresses */
6574 		SCTP_IPI_ADDR_RUNLOCK();
6575 		return (0);
6576 	}
6577 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6578 		/*
6579 		 * bound all case: go through all ifns on the vrf
6580 		 */
6581 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6582 			if ((loopback_scope == 0) &&
6583 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6584 				continue;
6585 			}
6586 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6587 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6588 					continue;
6589 				switch (sctp_ifa->address.sa.sa_family) {
6590 				case AF_INET:
6591 					if (ipv4_addr_legal) {
6592 						struct sockaddr_in *sin;
6593 
6594 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6595 						if (sin->sin_addr.s_addr == 0) {
6596 							/*
6597 							 * skip unspecified
6598 							 * addrs
6599 							 */
6600 							continue;
6601 						}
6602 						if ((ipv4_local_scope == 0) &&
6603 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6604 							continue;
6605 						}
6606 						/* count this one */
6607 						count++;
6608 					} else {
6609 						continue;
6610 					}
6611 					break;
6612 #ifdef INET6
6613 				case AF_INET6:
6614 					if (ipv6_addr_legal) {
6615 						struct sockaddr_in6 *sin6;
6616 
6617 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6618 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6619 							continue;
6620 						}
6621 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6622 							if (local_scope == 0)
6623 								continue;
6624 							if (sin6->sin6_scope_id == 0) {
6625 								if (sa6_recoverscope(sin6) != 0)
6626 									/*
6627 									 *
6628 									 * bad
6629 									 *
6630 									 * li
6631 									 * nk
6632 									 *
6633 									 * loc
6634 									 * al
6635 									 *
6636 									 * add
6637 									 * re
6638 									 * ss
6639 									 * */
6640 									continue;
6641 							}
6642 						}
6643 						if ((site_scope == 0) &&
6644 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6645 							continue;
6646 						}
6647 						/* count this one */
6648 						count++;
6649 					}
6650 					break;
6651 #endif
6652 				default:
6653 					/* TSNH */
6654 					break;
6655 				}
6656 			}
6657 		}
6658 	} else {
6659 		/*
6660 		 * subset bound case
6661 		 */
6662 		struct sctp_laddr *laddr;
6663 
6664 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6665 		    sctp_nxt_addr) {
6666 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6667 				continue;
6668 			}
6669 			/* count this one */
6670 			count++;
6671 		}
6672 	}
6673 	SCTP_IPI_ADDR_RUNLOCK();
6674 	return (count);
6675 }
6676 
6677 #if defined(SCTP_LOCAL_TRACE_BUF)
6678 
6679 void
6680 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6681 {
6682 	uint32_t saveindex, newindex;
6683 
6684 	do {
6685 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6686 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6687 			newindex = 1;
6688 		} else {
6689 			newindex = saveindex + 1;
6690 		}
6691 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6692 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6693 		saveindex = 0;
6694 	}
6695 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6696 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6697 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6698 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6699 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6700 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6701 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6702 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6703 }
6704 
6705 #endif
6706 /* We will need to add support
6707  * to bind the ports and such here
6708  * so we can do UDP tunneling. In
6709  * the mean-time, we return error
6710  */
6711 #include <netinet/udp.h>
6712 #include <netinet/udp_var.h>
6713 #include <sys/proc.h>
6714 #ifdef INET6
6715 #include <netinet6/sctp6_var.h>
6716 #endif
6717 
6718 static void
6719 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6720 {
6721 	struct ip *iph;
6722 	struct mbuf *sp, *last;
6723 	struct udphdr *uhdr;
6724 	uint16_t port = 0, len;
6725 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6726 
6727 	/*
6728 	 * Split out the mbuf chain. Leave the IP header in m, place the
6729 	 * rest in the sp.
6730 	 */
6731 	if ((m->m_flags & M_PKTHDR) == 0) {
6732 		/* Can't handle one that is not a pkt hdr */
6733 		goto out;
6734 	}
6735 	/* pull the src port */
6736 	iph = mtod(m, struct ip *);
6737 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6738 
6739 	port = uhdr->uh_sport;
6740 	sp = m_split(m, off, M_DONTWAIT);
6741 	if (sp == NULL) {
6742 		/* Gak, drop packet, we can't do a split */
6743 		goto out;
6744 	}
6745 	if (sp->m_pkthdr.len < header_size) {
6746 		/* Gak, packet can't have an SCTP header in it - to small */
6747 		m_freem(sp);
6748 		goto out;
6749 	}
6750 	/* ok now pull up the UDP header and SCTP header together */
6751 	sp = m_pullup(sp, header_size);
6752 	if (sp == NULL) {
6753 		/* Gak pullup failed */
6754 		goto out;
6755 	}
6756 	/* trim out the UDP header */
6757 	m_adj(sp, sizeof(struct udphdr));
6758 
6759 	/* Now reconstruct the mbuf chain */
6760 	/* 1) find last one */
6761 	last = m;
6762 	while (last->m_next != NULL) {
6763 		last = last->m_next;
6764 	}
6765 	last->m_next = sp;
6766 	m->m_pkthdr.len += sp->m_pkthdr.len;
6767 	last = m;
6768 	while (last != NULL) {
6769 		last = last->m_next;
6770 	}
6771 	/* Now its ready for sctp_input or sctp6_input */
6772 	iph = mtod(m, struct ip *);
6773 	switch (iph->ip_v) {
6774 	case IPVERSION:
6775 		{
6776 			/* its IPv4 */
6777 			len = SCTP_GET_IPV4_LENGTH(iph);
6778 			len -= sizeof(struct udphdr);
6779 			SCTP_GET_IPV4_LENGTH(iph) = len;
6780 			sctp_input_with_port(m, off, port);
6781 			break;
6782 		}
6783 #ifdef INET6
6784 	case IPV6_VERSION >> 4:
6785 		{
6786 			/* its IPv6 - NOT supported */
6787 			goto out;
6788 			break;
6789 
6790 		}
6791 #endif
6792 	default:
6793 		{
6794 			m_freem(m);
6795 			break;
6796 		}
6797 	}
6798 	return;
6799 out:
6800 	m_freem(m);
6801 }
6802 
6803 void
6804 sctp_over_udp_stop(void)
6805 {
6806 	struct socket *sop;
6807 
6808 	/*
6809 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6810 	 * for writting!
6811 	 */
6812 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6813 		/* Nothing to do */
6814 		return;
6815 	}
6816 	sop = SCTP_BASE_INFO(udp_tun_socket);
6817 	soclose(sop);
6818 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6819 }
6820 int
6821 sctp_over_udp_start(void)
6822 {
6823 	uint16_t port;
6824 	int ret;
6825 	struct sockaddr_in sin;
6826 	struct socket *sop = NULL;
6827 	struct thread *th;
6828 	struct ucred *cred;
6829 
6830 	/*
6831 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6832 	 * for writting!
6833 	 */
6834 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6835 	if (port == 0) {
6836 		/* Must have a port set */
6837 		return (EINVAL);
6838 	}
6839 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6840 		/* Already running -- must stop first */
6841 		return (EALREADY);
6842 	}
6843 	th = curthread;
6844 	cred = th->td_ucred;
6845 	if ((ret = socreate(PF_INET, &sop,
6846 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6847 		return (ret);
6848 	}
6849 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6850 	/* call the special UDP hook */
6851 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6852 	if (ret) {
6853 		goto exit_stage_left;
6854 	}
6855 	/* Ok we have a socket, bind it to the port */
6856 	memset(&sin, 0, sizeof(sin));
6857 	sin.sin_len = sizeof(sin);
6858 	sin.sin_family = AF_INET;
6859 	sin.sin_port = htons(port);
6860 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6861 	if (ret) {
6862 		/* Close up we cant get the port */
6863 exit_stage_left:
6864 		sctp_over_udp_stop();
6865 		return (ret);
6866 	}
6867 	/*
6868 	 * Ok we should now get UDP packets directly to our input routine
6869 	 * sctp_recv_upd_tunneled_packet().
6870 	 */
6871 	return (0);
6872 }
6873