xref: /freebsd/sys/netinet/sctputil.c (revision 145992504973bd16cf3518af9ba5ce185fefa82a)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_bsd_addr.h>
51 
52 
53 #ifndef KTR_SCTP
54 #define KTR_SCTP KTR_SUBSYS
55 #endif
56 
57 extern struct sctp_cc_functions sctp_cc_functions[];
58 extern struct sctp_ss_functions sctp_ss_functions[];
59 
60 void
61 sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
62 {
63 	struct sctp_cwnd_log sctp_clog;
64 
65 	sctp_clog.x.sb.stcb = stcb;
66 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
67 	if (stcb)
68 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
69 	else
70 		sctp_clog.x.sb.stcb_sbcc = 0;
71 	sctp_clog.x.sb.incr = incr;
72 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
73 	    SCTP_LOG_EVENT_SB,
74 	    from,
75 	    sctp_clog.x.misc.log1,
76 	    sctp_clog.x.misc.log2,
77 	    sctp_clog.x.misc.log3,
78 	    sctp_clog.x.misc.log4);
79 }
80 
81 void
82 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
83 {
84 	struct sctp_cwnd_log sctp_clog;
85 
86 	sctp_clog.x.close.inp = (void *)inp;
87 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
88 	if (stcb) {
89 		sctp_clog.x.close.stcb = (void *)stcb;
90 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
91 	} else {
92 		sctp_clog.x.close.stcb = 0;
93 		sctp_clog.x.close.state = 0;
94 	}
95 	sctp_clog.x.close.loc = loc;
96 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
97 	    SCTP_LOG_EVENT_CLOSE,
98 	    0,
99 	    sctp_clog.x.misc.log1,
100 	    sctp_clog.x.misc.log2,
101 	    sctp_clog.x.misc.log3,
102 	    sctp_clog.x.misc.log4);
103 }
104 
105 void
106 rto_logging(struct sctp_nets *net, int from)
107 {
108 	struct sctp_cwnd_log sctp_clog;
109 
110 	memset(&sctp_clog, 0, sizeof(sctp_clog));
111 	sctp_clog.x.rto.net = (void *)net;
112 	sctp_clog.x.rto.rtt = net->rtt / 1000;
113 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
114 	    SCTP_LOG_EVENT_RTT,
115 	    from,
116 	    sctp_clog.x.misc.log1,
117 	    sctp_clog.x.misc.log2,
118 	    sctp_clog.x.misc.log3,
119 	    sctp_clog.x.misc.log4);
120 }
121 
122 void
123 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
124 {
125 	struct sctp_cwnd_log sctp_clog;
126 
127 	sctp_clog.x.strlog.stcb = stcb;
128 	sctp_clog.x.strlog.n_tsn = tsn;
129 	sctp_clog.x.strlog.n_sseq = sseq;
130 	sctp_clog.x.strlog.e_tsn = 0;
131 	sctp_clog.x.strlog.e_sseq = 0;
132 	sctp_clog.x.strlog.strm = stream;
133 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
134 	    SCTP_LOG_EVENT_STRM,
135 	    from,
136 	    sctp_clog.x.misc.log1,
137 	    sctp_clog.x.misc.log2,
138 	    sctp_clog.x.misc.log3,
139 	    sctp_clog.x.misc.log4);
140 }
141 
142 void
143 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
144 {
145 	struct sctp_cwnd_log sctp_clog;
146 
147 	sctp_clog.x.nagle.stcb = (void *)stcb;
148 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
149 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
150 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
151 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
152 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
153 	    SCTP_LOG_EVENT_NAGLE,
154 	    action,
155 	    sctp_clog.x.misc.log1,
156 	    sctp_clog.x.misc.log2,
157 	    sctp_clog.x.misc.log3,
158 	    sctp_clog.x.misc.log4);
159 }
160 
161 void
162 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
163 {
164 	struct sctp_cwnd_log sctp_clog;
165 
166 	sctp_clog.x.sack.cumack = cumack;
167 	sctp_clog.x.sack.oldcumack = old_cumack;
168 	sctp_clog.x.sack.tsn = tsn;
169 	sctp_clog.x.sack.numGaps = gaps;
170 	sctp_clog.x.sack.numDups = dups;
171 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
172 	    SCTP_LOG_EVENT_SACK,
173 	    from,
174 	    sctp_clog.x.misc.log1,
175 	    sctp_clog.x.misc.log2,
176 	    sctp_clog.x.misc.log3,
177 	    sctp_clog.x.misc.log4);
178 }
179 
180 void
181 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
182 {
183 	struct sctp_cwnd_log sctp_clog;
184 
185 	memset(&sctp_clog, 0, sizeof(sctp_clog));
186 	sctp_clog.x.map.base = map;
187 	sctp_clog.x.map.cum = cum;
188 	sctp_clog.x.map.high = high;
189 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
190 	    SCTP_LOG_EVENT_MAP,
191 	    from,
192 	    sctp_clog.x.misc.log1,
193 	    sctp_clog.x.misc.log2,
194 	    sctp_clog.x.misc.log3,
195 	    sctp_clog.x.misc.log4);
196 }
197 
198 void
199 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from)
200 {
201 	struct sctp_cwnd_log sctp_clog;
202 
203 	memset(&sctp_clog, 0, sizeof(sctp_clog));
204 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
205 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
206 	sctp_clog.x.fr.tsn = tsn;
207 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
208 	    SCTP_LOG_EVENT_FR,
209 	    from,
210 	    sctp_clog.x.misc.log1,
211 	    sctp_clog.x.misc.log2,
212 	    sctp_clog.x.misc.log3,
213 	    sctp_clog.x.misc.log4);
214 }
215 
216 void
217 sctp_log_mb(struct mbuf *m, int from)
218 {
219 	struct sctp_cwnd_log sctp_clog;
220 
221 	sctp_clog.x.mb.mp = m;
222 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
223 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
224 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
225 	if (SCTP_BUF_IS_EXTENDED(m)) {
226 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
227 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
228 	} else {
229 		sctp_clog.x.mb.ext = 0;
230 		sctp_clog.x.mb.refcnt = 0;
231 	}
232 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
233 	    SCTP_LOG_EVENT_MBUF,
234 	    from,
235 	    sctp_clog.x.misc.log1,
236 	    sctp_clog.x.misc.log2,
237 	    sctp_clog.x.misc.log3,
238 	    sctp_clog.x.misc.log4);
239 }
240 
241 void
242 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from)
243 {
244 	struct sctp_cwnd_log sctp_clog;
245 
246 	if (control == NULL) {
247 		SCTP_PRINTF("Gak log of NULL?\n");
248 		return;
249 	}
250 	sctp_clog.x.strlog.stcb = control->stcb;
251 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
252 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
253 	sctp_clog.x.strlog.strm = control->sinfo_stream;
254 	if (poschk != NULL) {
255 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
256 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
257 	} else {
258 		sctp_clog.x.strlog.e_tsn = 0;
259 		sctp_clog.x.strlog.e_sseq = 0;
260 	}
261 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
262 	    SCTP_LOG_EVENT_STRM,
263 	    from,
264 	    sctp_clog.x.misc.log1,
265 	    sctp_clog.x.misc.log2,
266 	    sctp_clog.x.misc.log3,
267 	    sctp_clog.x.misc.log4);
268 }
269 
270 void
271 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
272 {
273 	struct sctp_cwnd_log sctp_clog;
274 
275 	sctp_clog.x.cwnd.net = net;
276 	if (stcb->asoc.send_queue_cnt > 255)
277 		sctp_clog.x.cwnd.cnt_in_send = 255;
278 	else
279 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
280 	if (stcb->asoc.stream_queue_cnt > 255)
281 		sctp_clog.x.cwnd.cnt_in_str = 255;
282 	else
283 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
284 
285 	if (net) {
286 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
287 		sctp_clog.x.cwnd.inflight = net->flight_size;
288 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
289 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
290 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
291 	}
292 	if (SCTP_CWNDLOG_PRESEND == from) {
293 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
294 	}
295 	sctp_clog.x.cwnd.cwnd_augment = augment;
296 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
297 	    SCTP_LOG_EVENT_CWND,
298 	    from,
299 	    sctp_clog.x.misc.log1,
300 	    sctp_clog.x.misc.log2,
301 	    sctp_clog.x.misc.log3,
302 	    sctp_clog.x.misc.log4);
303 }
304 
305 void
306 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
307 {
308 	struct sctp_cwnd_log sctp_clog;
309 
310 	memset(&sctp_clog, 0, sizeof(sctp_clog));
311 	if (inp) {
312 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
313 
314 	} else {
315 		sctp_clog.x.lock.sock = (void *)NULL;
316 	}
317 	sctp_clog.x.lock.inp = (void *)inp;
318 	if (stcb) {
319 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
320 	} else {
321 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
322 	}
323 	if (inp) {
324 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
325 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
326 	} else {
327 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
328 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
329 	}
330 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
331 	if (inp && (inp->sctp_socket)) {
332 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
333 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
334 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
335 	} else {
336 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
337 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
338 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
339 	}
340 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
341 	    SCTP_LOG_LOCK_EVENT,
342 	    from,
343 	    sctp_clog.x.misc.log1,
344 	    sctp_clog.x.misc.log2,
345 	    sctp_clog.x.misc.log3,
346 	    sctp_clog.x.misc.log4);
347 }
348 
349 void
350 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
351 {
352 	struct sctp_cwnd_log sctp_clog;
353 
354 	memset(&sctp_clog, 0, sizeof(sctp_clog));
355 	sctp_clog.x.cwnd.net = net;
356 	sctp_clog.x.cwnd.cwnd_new_value = error;
357 	sctp_clog.x.cwnd.inflight = net->flight_size;
358 	sctp_clog.x.cwnd.cwnd_augment = burst;
359 	if (stcb->asoc.send_queue_cnt > 255)
360 		sctp_clog.x.cwnd.cnt_in_send = 255;
361 	else
362 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
363 	if (stcb->asoc.stream_queue_cnt > 255)
364 		sctp_clog.x.cwnd.cnt_in_str = 255;
365 	else
366 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
367 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
368 	    SCTP_LOG_EVENT_MAXBURST,
369 	    from,
370 	    sctp_clog.x.misc.log1,
371 	    sctp_clog.x.misc.log2,
372 	    sctp_clog.x.misc.log3,
373 	    sctp_clog.x.misc.log4);
374 }
375 
376 void
377 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
378 {
379 	struct sctp_cwnd_log sctp_clog;
380 
381 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
382 	sctp_clog.x.rwnd.send_size = snd_size;
383 	sctp_clog.x.rwnd.overhead = overhead;
384 	sctp_clog.x.rwnd.new_rwnd = 0;
385 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
386 	    SCTP_LOG_EVENT_RWND,
387 	    from,
388 	    sctp_clog.x.misc.log1,
389 	    sctp_clog.x.misc.log2,
390 	    sctp_clog.x.misc.log3,
391 	    sctp_clog.x.misc.log4);
392 }
393 
394 void
395 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
396 {
397 	struct sctp_cwnd_log sctp_clog;
398 
399 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
400 	sctp_clog.x.rwnd.send_size = flight_size;
401 	sctp_clog.x.rwnd.overhead = overhead;
402 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
403 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
404 	    SCTP_LOG_EVENT_RWND,
405 	    from,
406 	    sctp_clog.x.misc.log1,
407 	    sctp_clog.x.misc.log2,
408 	    sctp_clog.x.misc.log3,
409 	    sctp_clog.x.misc.log4);
410 }
411 
412 void
413 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
414 {
415 	struct sctp_cwnd_log sctp_clog;
416 
417 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
418 	sctp_clog.x.mbcnt.size_change = book;
419 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
420 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
421 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
422 	    SCTP_LOG_EVENT_MBCNT,
423 	    from,
424 	    sctp_clog.x.misc.log1,
425 	    sctp_clog.x.misc.log2,
426 	    sctp_clog.x.misc.log3,
427 	    sctp_clog.x.misc.log4);
428 }
429 
430 void
431 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
432 {
433 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
434 	    SCTP_LOG_MISC_EVENT,
435 	    from,
436 	    a, b, c, d);
437 }
438 
439 void
440 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
441 {
442 	struct sctp_cwnd_log sctp_clog;
443 
444 	sctp_clog.x.wake.stcb = (void *)stcb;
445 	sctp_clog.x.wake.wake_cnt = wake_cnt;
446 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
447 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
448 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
449 
450 	if (stcb->asoc.stream_queue_cnt < 0xff)
451 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
452 	else
453 		sctp_clog.x.wake.stream_qcnt = 0xff;
454 
455 	if (stcb->asoc.chunks_on_out_queue < 0xff)
456 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
457 	else
458 		sctp_clog.x.wake.chunks_on_oque = 0xff;
459 
460 	sctp_clog.x.wake.sctpflags = 0;
461 	/* set in the defered mode stuff */
462 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
463 		sctp_clog.x.wake.sctpflags |= 1;
464 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
465 		sctp_clog.x.wake.sctpflags |= 2;
466 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
467 		sctp_clog.x.wake.sctpflags |= 4;
468 	/* what about the sb */
469 	if (stcb->sctp_socket) {
470 		struct socket *so = stcb->sctp_socket;
471 
472 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
473 	} else {
474 		sctp_clog.x.wake.sbflags = 0xff;
475 	}
476 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
477 	    SCTP_LOG_EVENT_WAKE,
478 	    from,
479 	    sctp_clog.x.misc.log1,
480 	    sctp_clog.x.misc.log2,
481 	    sctp_clog.x.misc.log3,
482 	    sctp_clog.x.misc.log4);
483 }
484 
485 void
486 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
487 {
488 	struct sctp_cwnd_log sctp_clog;
489 
490 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
491 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
492 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
493 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
494 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
495 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
496 	sctp_clog.x.blk.sndlen = sendlen;
497 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
498 	    SCTP_LOG_EVENT_BLOCK,
499 	    from,
500 	    sctp_clog.x.misc.log1,
501 	    sctp_clog.x.misc.log2,
502 	    sctp_clog.x.misc.log3,
503 	    sctp_clog.x.misc.log4);
504 }
505 
506 int
507 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
508 {
509 	/* May need to fix this if ktrdump does not work */
510 	return (0);
511 }
512 
513 #ifdef SCTP_AUDITING_ENABLED
514 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
515 static int sctp_audit_indx = 0;
516 
517 static
518 void
519 sctp_print_audit_report(void)
520 {
521 	int i;
522 	int cnt;
523 
524 	cnt = 0;
525 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
526 		if ((sctp_audit_data[i][0] == 0xe0) &&
527 		    (sctp_audit_data[i][1] == 0x01)) {
528 			cnt = 0;
529 			SCTP_PRINTF("\n");
530 		} else if (sctp_audit_data[i][0] == 0xf0) {
531 			cnt = 0;
532 			SCTP_PRINTF("\n");
533 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
534 		    (sctp_audit_data[i][1] == 0x01)) {
535 			SCTP_PRINTF("\n");
536 			cnt = 0;
537 		}
538 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
539 		    (uint32_t) sctp_audit_data[i][1]);
540 		cnt++;
541 		if ((cnt % 14) == 0)
542 			SCTP_PRINTF("\n");
543 	}
544 	for (i = 0; i < sctp_audit_indx; i++) {
545 		if ((sctp_audit_data[i][0] == 0xe0) &&
546 		    (sctp_audit_data[i][1] == 0x01)) {
547 			cnt = 0;
548 			SCTP_PRINTF("\n");
549 		} else if (sctp_audit_data[i][0] == 0xf0) {
550 			cnt = 0;
551 			SCTP_PRINTF("\n");
552 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
553 		    (sctp_audit_data[i][1] == 0x01)) {
554 			SCTP_PRINTF("\n");
555 			cnt = 0;
556 		}
557 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
558 		    (uint32_t) sctp_audit_data[i][1]);
559 		cnt++;
560 		if ((cnt % 14) == 0)
561 			SCTP_PRINTF("\n");
562 	}
563 	SCTP_PRINTF("\n");
564 }
565 
566 void
567 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
568     struct sctp_nets *net)
569 {
570 	int resend_cnt, tot_out, rep, tot_book_cnt;
571 	struct sctp_nets *lnet;
572 	struct sctp_tmit_chunk *chk;
573 
574 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
575 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
576 	sctp_audit_indx++;
577 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
578 		sctp_audit_indx = 0;
579 	}
580 	if (inp == NULL) {
581 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
582 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
583 		sctp_audit_indx++;
584 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
585 			sctp_audit_indx = 0;
586 		}
587 		return;
588 	}
589 	if (stcb == NULL) {
590 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
591 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
592 		sctp_audit_indx++;
593 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
594 			sctp_audit_indx = 0;
595 		}
596 		return;
597 	}
598 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
599 	sctp_audit_data[sctp_audit_indx][1] =
600 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
601 	sctp_audit_indx++;
602 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
603 		sctp_audit_indx = 0;
604 	}
605 	rep = 0;
606 	tot_book_cnt = 0;
607 	resend_cnt = tot_out = 0;
608 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
609 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
610 			resend_cnt++;
611 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
612 			tot_out += chk->book_size;
613 			tot_book_cnt++;
614 		}
615 	}
616 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
617 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
618 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
619 		sctp_audit_indx++;
620 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
621 			sctp_audit_indx = 0;
622 		}
623 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
624 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
625 		rep = 1;
626 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
627 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
628 		sctp_audit_data[sctp_audit_indx][1] =
629 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
630 		sctp_audit_indx++;
631 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
632 			sctp_audit_indx = 0;
633 		}
634 	}
635 	if (tot_out != stcb->asoc.total_flight) {
636 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
637 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
638 		sctp_audit_indx++;
639 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
640 			sctp_audit_indx = 0;
641 		}
642 		rep = 1;
643 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
644 		    (int)stcb->asoc.total_flight);
645 		stcb->asoc.total_flight = tot_out;
646 	}
647 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
648 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
649 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
650 		sctp_audit_indx++;
651 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
652 			sctp_audit_indx = 0;
653 		}
654 		rep = 1;
655 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
656 
657 		stcb->asoc.total_flight_count = tot_book_cnt;
658 	}
659 	tot_out = 0;
660 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
661 		tot_out += lnet->flight_size;
662 	}
663 	if (tot_out != stcb->asoc.total_flight) {
664 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
665 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
666 		sctp_audit_indx++;
667 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 			sctp_audit_indx = 0;
669 		}
670 		rep = 1;
671 		SCTP_PRINTF("real flight:%d net total was %d\n",
672 		    stcb->asoc.total_flight, tot_out);
673 		/* now corrective action */
674 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
675 
676 			tot_out = 0;
677 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
678 				if ((chk->whoTo == lnet) &&
679 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
680 					tot_out += chk->book_size;
681 				}
682 			}
683 			if (lnet->flight_size != tot_out) {
684 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
685 				    (void *)lnet, lnet->flight_size,
686 				    tot_out);
687 				lnet->flight_size = tot_out;
688 			}
689 		}
690 	}
691 	if (rep) {
692 		sctp_print_audit_report();
693 	}
694 }
695 
696 void
697 sctp_audit_log(uint8_t ev, uint8_t fd)
698 {
699 
700 	sctp_audit_data[sctp_audit_indx][0] = ev;
701 	sctp_audit_data[sctp_audit_indx][1] = fd;
702 	sctp_audit_indx++;
703 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
704 		sctp_audit_indx = 0;
705 	}
706 }
707 
708 #endif
709 
710 /*
711  * sctp_stop_timers_for_shutdown() should be called
712  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
713  * state to make sure that all timers are stopped.
714  */
715 void
716 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
717 {
718 	struct sctp_association *asoc;
719 	struct sctp_nets *net;
720 
721 	asoc = &stcb->asoc;
722 
723 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
724 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
725 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
726 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
727 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
728 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
729 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
730 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
731 	}
732 }
733 
734 /*
735  * a list of sizes based on typical mtu's, used only if next hop size not
736  * returned.
737  */
738 static uint32_t sctp_mtu_sizes[] = {
739 	68,
740 	296,
741 	508,
742 	512,
743 	544,
744 	576,
745 	1006,
746 	1492,
747 	1500,
748 	1536,
749 	2002,
750 	2048,
751 	4352,
752 	4464,
753 	8166,
754 	17914,
755 	32000,
756 	65535
757 };
758 
759 /*
760  * Return the largest MTU smaller than val. If there is no
761  * entry, just return val.
762  */
763 uint32_t
764 sctp_get_prev_mtu(uint32_t val)
765 {
766 	uint32_t i;
767 
768 	if (val <= sctp_mtu_sizes[0]) {
769 		return (val);
770 	}
771 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
772 		if (val <= sctp_mtu_sizes[i]) {
773 			break;
774 		}
775 	}
776 	return (sctp_mtu_sizes[i - 1]);
777 }
778 
779 /*
780  * Return the smallest MTU larger than val. If there is no
781  * entry, just return val.
782  */
783 uint32_t
784 sctp_get_next_mtu(uint32_t val)
785 {
786 	/* select another MTU that is just bigger than this one */
787 	uint32_t i;
788 
789 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
790 		if (val < sctp_mtu_sizes[i]) {
791 			return (sctp_mtu_sizes[i]);
792 		}
793 	}
794 	return (val);
795 }
796 
797 void
798 sctp_fill_random_store(struct sctp_pcb *m)
799 {
800 	/*
801 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
802 	 * our counter. The result becomes our good random numbers and we
803 	 * then setup to give these out. Note that we do no locking to
804 	 * protect this. This is ok, since if competing folks call this we
805 	 * will get more gobbled gook in the random store which is what we
806 	 * want. There is a danger that two guys will use the same random
807 	 * numbers, but thats ok too since that is random as well :->
808 	 */
809 	m->store_at = 0;
810 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
811 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
812 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
813 	m->random_counter++;
814 }
815 
816 uint32_t
817 sctp_select_initial_TSN(struct sctp_pcb *inp)
818 {
819 	/*
820 	 * A true implementation should use random selection process to get
821 	 * the initial stream sequence number, using RFC1750 as a good
822 	 * guideline
823 	 */
824 	uint32_t x, *xp;
825 	uint8_t *p;
826 	int store_at, new_store;
827 
828 	if (inp->initial_sequence_debug != 0) {
829 		uint32_t ret;
830 
831 		ret = inp->initial_sequence_debug;
832 		inp->initial_sequence_debug++;
833 		return (ret);
834 	}
835 retry:
836 	store_at = inp->store_at;
837 	new_store = store_at + sizeof(uint32_t);
838 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
839 		new_store = 0;
840 	}
841 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
842 		goto retry;
843 	}
844 	if (new_store == 0) {
845 		/* Refill the random store */
846 		sctp_fill_random_store(inp);
847 	}
848 	p = &inp->random_store[store_at];
849 	xp = (uint32_t *) p;
850 	x = *xp;
851 	return (x);
852 }
853 
854 uint32_t
855 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
856 {
857 	uint32_t x;
858 	struct timeval now;
859 
860 	if (check) {
861 		(void)SCTP_GETTIME_TIMEVAL(&now);
862 	}
863 	for (;;) {
864 		x = sctp_select_initial_TSN(&inp->sctp_ep);
865 		if (x == 0) {
866 			/* we never use 0 */
867 			continue;
868 		}
869 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
870 			break;
871 		}
872 	}
873 	return (x);
874 }
875 
876 int
877 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
878     uint32_t override_tag, uint32_t vrf_id)
879 {
880 	struct sctp_association *asoc;
881 
882 	/*
883 	 * Anything set to zero is taken care of by the allocation routine's
884 	 * bzero
885 	 */
886 
887 	/*
888 	 * Up front select what scoping to apply on addresses I tell my peer
889 	 * Not sure what to do with these right now, we will need to come up
890 	 * with a way to set them. We may need to pass them through from the
891 	 * caller in the sctp_aloc_assoc() function.
892 	 */
893 	int i;
894 
895 	asoc = &stcb->asoc;
896 	/* init all variables to a known value. */
897 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
898 	asoc->max_burst = m->sctp_ep.max_burst;
899 	asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
900 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
901 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
902 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
903 	asoc->ecn_allowed = m->sctp_ecn_enable;
904 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
905 	asoc->sctp_cmt_pf = (uint8_t) 0;
906 	asoc->sctp_frag_point = m->sctp_frag_point;
907 	asoc->sctp_features = m->sctp_features;
908 	asoc->default_dscp = m->sctp_ep.default_dscp;
909 #ifdef INET6
910 	if (m->sctp_ep.default_flowlabel) {
911 		asoc->default_flowlabel = m->sctp_ep.default_flowlabel;
912 	} else {
913 		if (m->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
914 			asoc->default_flowlabel = sctp_select_initial_TSN(&m->sctp_ep);
915 			asoc->default_flowlabel &= 0x000fffff;
916 			asoc->default_flowlabel |= 0x80000000;
917 		} else {
918 			asoc->default_flowlabel = 0;
919 		}
920 	}
921 #endif
922 	asoc->sb_send_resv = 0;
923 	if (override_tag) {
924 		asoc->my_vtag = override_tag;
925 	} else {
926 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
927 	}
928 	/* Get the nonce tags */
929 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
930 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
931 	asoc->vrf_id = vrf_id;
932 
933 #ifdef SCTP_ASOCLOG_OF_TSNS
934 	asoc->tsn_in_at = 0;
935 	asoc->tsn_out_at = 0;
936 	asoc->tsn_in_wrapped = 0;
937 	asoc->tsn_out_wrapped = 0;
938 	asoc->cumack_log_at = 0;
939 	asoc->cumack_log_atsnt = 0;
940 #endif
941 #ifdef SCTP_FS_SPEC_LOG
942 	asoc->fs_index = 0;
943 #endif
944 	asoc->refcnt = 0;
945 	asoc->assoc_up_sent = 0;
946 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
947 	    sctp_select_initial_TSN(&m->sctp_ep);
948 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
949 	/* we are optimisitic here */
950 	asoc->peer_supports_pktdrop = 1;
951 	asoc->peer_supports_nat = 0;
952 	asoc->sent_queue_retran_cnt = 0;
953 
954 	/* for CMT */
955 	asoc->last_net_cmt_send_started = NULL;
956 
957 	/* This will need to be adjusted */
958 	asoc->last_acked_seq = asoc->init_seq_number - 1;
959 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
960 	asoc->asconf_seq_in = asoc->last_acked_seq;
961 
962 	/* here we are different, we hold the next one we expect */
963 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
964 
965 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
966 	asoc->initial_rto = m->sctp_ep.initial_rto;
967 
968 	asoc->max_init_times = m->sctp_ep.max_init_times;
969 	asoc->max_send_times = m->sctp_ep.max_send_times;
970 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
971 	asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold;
972 	asoc->free_chunk_cnt = 0;
973 
974 	asoc->iam_blocking = 0;
975 	asoc->context = m->sctp_context;
976 	asoc->local_strreset_support = m->local_strreset_support;
977 	asoc->def_send = m->def_send;
978 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
979 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
980 	asoc->pr_sctp_cnt = 0;
981 	asoc->total_output_queue_size = 0;
982 
983 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
984 		struct in6pcb *inp6;
985 
986 		/* Its a V6 socket */
987 		inp6 = (struct in6pcb *)m;
988 		asoc->ipv6_addr_legal = 1;
989 		/* Now look at the binding flag to see if V4 will be legal */
990 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
991 			asoc->ipv4_addr_legal = 1;
992 		} else {
993 			/* V4 addresses are NOT legal on the association */
994 			asoc->ipv4_addr_legal = 0;
995 		}
996 	} else {
997 		/* Its a V4 socket, no - V6 */
998 		asoc->ipv4_addr_legal = 1;
999 		asoc->ipv6_addr_legal = 0;
1000 	}
1001 
1002 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1003 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1004 
1005 	asoc->smallest_mtu = m->sctp_frag_point;
1006 	asoc->minrto = m->sctp_ep.sctp_minrto;
1007 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1008 
1009 	asoc->locked_on_sending = NULL;
1010 	asoc->stream_locked_on = 0;
1011 	asoc->ecn_echo_cnt_onq = 0;
1012 	asoc->stream_locked = 0;
1013 
1014 	asoc->send_sack = 1;
1015 
1016 	LIST_INIT(&asoc->sctp_restricted_addrs);
1017 
1018 	TAILQ_INIT(&asoc->nets);
1019 	TAILQ_INIT(&asoc->pending_reply_queue);
1020 	TAILQ_INIT(&asoc->asconf_ack_sent);
1021 	/* Setup to fill the hb random cache at first HB */
1022 	asoc->hb_random_idx = 4;
1023 
1024 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1025 
1026 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1027 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1028 
1029 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1030 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1031 
1032 	/*
1033 	 * Now the stream parameters, here we allocate space for all streams
1034 	 * that we request by default.
1035 	 */
1036 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1037 	    m->sctp_ep.pre_open_stream_count;
1038 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1039 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1040 	    SCTP_M_STRMO);
1041 	if (asoc->strmout == NULL) {
1042 		/* big trouble no memory */
1043 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1044 		return (ENOMEM);
1045 	}
1046 	for (i = 0; i < asoc->streamoutcnt; i++) {
1047 		/*
1048 		 * inbound side must be set to 0xffff, also NOTE when we get
1049 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1050 		 * count (streamoutcnt) but first check if we sent to any of
1051 		 * the upper streams that were dropped (if some were). Those
1052 		 * that were dropped must be notified to the upper layer as
1053 		 * failed to send.
1054 		 */
1055 		asoc->strmout[i].next_sequence_sent = 0x0;
1056 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1057 		asoc->strmout[i].stream_no = i;
1058 		asoc->strmout[i].last_msg_incomplete = 0;
1059 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1060 	}
1061 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1062 
1063 	/* Now the mapping array */
1064 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1065 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1066 	    SCTP_M_MAP);
1067 	if (asoc->mapping_array == NULL) {
1068 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1069 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1070 		return (ENOMEM);
1071 	}
1072 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1073 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1074 	    SCTP_M_MAP);
1075 	if (asoc->nr_mapping_array == NULL) {
1076 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1077 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1078 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1079 		return (ENOMEM);
1080 	}
1081 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1082 
1083 	/* Now the init of the other outqueues */
1084 	TAILQ_INIT(&asoc->free_chunks);
1085 	TAILQ_INIT(&asoc->control_send_queue);
1086 	TAILQ_INIT(&asoc->asconf_send_queue);
1087 	TAILQ_INIT(&asoc->send_queue);
1088 	TAILQ_INIT(&asoc->sent_queue);
1089 	TAILQ_INIT(&asoc->reasmqueue);
1090 	TAILQ_INIT(&asoc->resetHead);
1091 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1092 	TAILQ_INIT(&asoc->asconf_queue);
1093 	/* authentication fields */
1094 	asoc->authinfo.random = NULL;
1095 	asoc->authinfo.active_keyid = 0;
1096 	asoc->authinfo.assoc_key = NULL;
1097 	asoc->authinfo.assoc_keyid = 0;
1098 	asoc->authinfo.recv_key = NULL;
1099 	asoc->authinfo.recv_keyid = 0;
1100 	LIST_INIT(&asoc->shared_keys);
1101 	asoc->marked_retrans = 0;
1102 	asoc->port = m->sctp_ep.port;
1103 	asoc->timoinit = 0;
1104 	asoc->timodata = 0;
1105 	asoc->timosack = 0;
1106 	asoc->timoshutdown = 0;
1107 	asoc->timoheartbeat = 0;
1108 	asoc->timocookie = 0;
1109 	asoc->timoshutdownack = 0;
1110 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1111 	asoc->discontinuity_time = asoc->start_time;
1112 	/*
1113 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1114 	 * freed later when the association is freed.
1115 	 */
1116 	return (0);
1117 }
1118 
1119 void
1120 sctp_print_mapping_array(struct sctp_association *asoc)
1121 {
1122 	unsigned int i, limit;
1123 
1124 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1125 	    asoc->mapping_array_size,
1126 	    asoc->mapping_array_base_tsn,
1127 	    asoc->cumulative_tsn,
1128 	    asoc->highest_tsn_inside_map,
1129 	    asoc->highest_tsn_inside_nr_map);
1130 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1131 		if (asoc->mapping_array[limit - 1] != 0) {
1132 			break;
1133 		}
1134 	}
1135 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1136 	for (i = 0; i < limit; i++) {
1137 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1138 	}
1139 	if (limit % 16)
1140 		SCTP_PRINTF("\n");
1141 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1142 		if (asoc->nr_mapping_array[limit - 1]) {
1143 			break;
1144 		}
1145 	}
1146 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1147 	for (i = 0; i < limit; i++) {
1148 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1149 	}
1150 	if (limit % 16)
1151 		SCTP_PRINTF("\n");
1152 }
1153 
1154 int
1155 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1156 {
1157 	/* mapping array needs to grow */
1158 	uint8_t *new_array1, *new_array2;
1159 	uint32_t new_size;
1160 
1161 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1162 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1163 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1164 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1165 		/* can't get more, forget it */
1166 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1167 		if (new_array1) {
1168 			SCTP_FREE(new_array1, SCTP_M_MAP);
1169 		}
1170 		if (new_array2) {
1171 			SCTP_FREE(new_array2, SCTP_M_MAP);
1172 		}
1173 		return (-1);
1174 	}
1175 	memset(new_array1, 0, new_size);
1176 	memset(new_array2, 0, new_size);
1177 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1178 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1179 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1180 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1181 	asoc->mapping_array = new_array1;
1182 	asoc->nr_mapping_array = new_array2;
1183 	asoc->mapping_array_size = new_size;
1184 	return (0);
1185 }
1186 
1187 
1188 static void
1189 sctp_iterator_work(struct sctp_iterator *it)
1190 {
1191 	int iteration_count = 0;
1192 	int inp_skip = 0;
1193 	int first_in = 1;
1194 	struct sctp_inpcb *tinp;
1195 
1196 	SCTP_INP_INFO_RLOCK();
1197 	SCTP_ITERATOR_LOCK();
1198 	if (it->inp) {
1199 		SCTP_INP_RLOCK(it->inp);
1200 		SCTP_INP_DECR_REF(it->inp);
1201 	}
1202 	if (it->inp == NULL) {
1203 		/* iterator is complete */
1204 done_with_iterator:
1205 		SCTP_ITERATOR_UNLOCK();
1206 		SCTP_INP_INFO_RUNLOCK();
1207 		if (it->function_atend != NULL) {
1208 			(*it->function_atend) (it->pointer, it->val);
1209 		}
1210 		SCTP_FREE(it, SCTP_M_ITER);
1211 		return;
1212 	}
1213 select_a_new_ep:
1214 	if (first_in) {
1215 		first_in = 0;
1216 	} else {
1217 		SCTP_INP_RLOCK(it->inp);
1218 	}
1219 	while (((it->pcb_flags) &&
1220 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1221 	    ((it->pcb_features) &&
1222 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1223 		/* endpoint flags or features don't match, so keep looking */
1224 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1225 			SCTP_INP_RUNLOCK(it->inp);
1226 			goto done_with_iterator;
1227 		}
1228 		tinp = it->inp;
1229 		it->inp = LIST_NEXT(it->inp, sctp_list);
1230 		SCTP_INP_RUNLOCK(tinp);
1231 		if (it->inp == NULL) {
1232 			goto done_with_iterator;
1233 		}
1234 		SCTP_INP_RLOCK(it->inp);
1235 	}
1236 	/* now go through each assoc which is in the desired state */
1237 	if (it->done_current_ep == 0) {
1238 		if (it->function_inp != NULL)
1239 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1240 		it->done_current_ep = 1;
1241 	}
1242 	if (it->stcb == NULL) {
1243 		/* run the per instance function */
1244 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1245 	}
1246 	if ((inp_skip) || it->stcb == NULL) {
1247 		if (it->function_inp_end != NULL) {
1248 			inp_skip = (*it->function_inp_end) (it->inp,
1249 			    it->pointer,
1250 			    it->val);
1251 		}
1252 		SCTP_INP_RUNLOCK(it->inp);
1253 		goto no_stcb;
1254 	}
1255 	while (it->stcb) {
1256 		SCTP_TCB_LOCK(it->stcb);
1257 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1258 			/* not in the right state... keep looking */
1259 			SCTP_TCB_UNLOCK(it->stcb);
1260 			goto next_assoc;
1261 		}
1262 		/* see if we have limited out the iterator loop */
1263 		iteration_count++;
1264 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1265 			/* Pause to let others grab the lock */
1266 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1267 			SCTP_TCB_UNLOCK(it->stcb);
1268 			SCTP_INP_INCR_REF(it->inp);
1269 			SCTP_INP_RUNLOCK(it->inp);
1270 			SCTP_ITERATOR_UNLOCK();
1271 			SCTP_INP_INFO_RUNLOCK();
1272 			SCTP_INP_INFO_RLOCK();
1273 			SCTP_ITERATOR_LOCK();
1274 			if (sctp_it_ctl.iterator_flags) {
1275 				/* We won't be staying here */
1276 				SCTP_INP_DECR_REF(it->inp);
1277 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1278 				if (sctp_it_ctl.iterator_flags &
1279 				    SCTP_ITERATOR_STOP_CUR_IT) {
1280 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1281 					goto done_with_iterator;
1282 				}
1283 				if (sctp_it_ctl.iterator_flags &
1284 				    SCTP_ITERATOR_STOP_CUR_INP) {
1285 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1286 					goto no_stcb;
1287 				}
1288 				/* If we reach here huh? */
1289 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1290 				    sctp_it_ctl.iterator_flags);
1291 				sctp_it_ctl.iterator_flags = 0;
1292 			}
1293 			SCTP_INP_RLOCK(it->inp);
1294 			SCTP_INP_DECR_REF(it->inp);
1295 			SCTP_TCB_LOCK(it->stcb);
1296 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1297 			iteration_count = 0;
1298 		}
1299 		/* run function on this one */
1300 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1301 
1302 		/*
1303 		 * we lie here, it really needs to have its own type but
1304 		 * first I must verify that this won't effect things :-0
1305 		 */
1306 		if (it->no_chunk_output == 0)
1307 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1308 
1309 		SCTP_TCB_UNLOCK(it->stcb);
1310 next_assoc:
1311 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1312 		if (it->stcb == NULL) {
1313 			/* Run last function */
1314 			if (it->function_inp_end != NULL) {
1315 				inp_skip = (*it->function_inp_end) (it->inp,
1316 				    it->pointer,
1317 				    it->val);
1318 			}
1319 		}
1320 	}
1321 	SCTP_INP_RUNLOCK(it->inp);
1322 no_stcb:
1323 	/* done with all assocs on this endpoint, move on to next endpoint */
1324 	it->done_current_ep = 0;
1325 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1326 		it->inp = NULL;
1327 	} else {
1328 		it->inp = LIST_NEXT(it->inp, sctp_list);
1329 	}
1330 	if (it->inp == NULL) {
1331 		goto done_with_iterator;
1332 	}
1333 	goto select_a_new_ep;
1334 }
1335 
1336 void
1337 sctp_iterator_worker(void)
1338 {
1339 	struct sctp_iterator *it, *nit;
1340 
1341 	/* This function is called with the WQ lock in place */
1342 
1343 	sctp_it_ctl.iterator_running = 1;
1344 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1345 		sctp_it_ctl.cur_it = it;
1346 		/* now lets work on this one */
1347 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1348 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1349 		CURVNET_SET(it->vn);
1350 		sctp_iterator_work(it);
1351 		sctp_it_ctl.cur_it = NULL;
1352 		CURVNET_RESTORE();
1353 		SCTP_IPI_ITERATOR_WQ_LOCK();
1354 		/* sa_ignore FREED_MEMORY */
1355 	}
1356 	sctp_it_ctl.iterator_running = 0;
1357 	return;
1358 }
1359 
1360 
1361 static void
1362 sctp_handle_addr_wq(void)
1363 {
1364 	/* deal with the ADDR wq from the rtsock calls */
1365 	struct sctp_laddr *wi, *nwi;
1366 	struct sctp_asconf_iterator *asc;
1367 
1368 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1369 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1370 	if (asc == NULL) {
1371 		/* Try later, no memory */
1372 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1373 		    (struct sctp_inpcb *)NULL,
1374 		    (struct sctp_tcb *)NULL,
1375 		    (struct sctp_nets *)NULL);
1376 		return;
1377 	}
1378 	LIST_INIT(&asc->list_of_work);
1379 	asc->cnt = 0;
1380 
1381 	SCTP_WQ_ADDR_LOCK();
1382 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1383 		LIST_REMOVE(wi, sctp_nxt_addr);
1384 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1385 		asc->cnt++;
1386 	}
1387 	SCTP_WQ_ADDR_UNLOCK();
1388 
1389 	if (asc->cnt == 0) {
1390 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1391 	} else {
1392 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1393 		    sctp_asconf_iterator_stcb,
1394 		    NULL,	/* No ep end for boundall */
1395 		    SCTP_PCB_FLAGS_BOUNDALL,
1396 		    SCTP_PCB_ANY_FEATURES,
1397 		    SCTP_ASOC_ANY_STATE,
1398 		    (void *)asc, 0,
1399 		    sctp_asconf_iterator_end, NULL, 0);
1400 	}
1401 }
1402 
1403 void
1404 sctp_timeout_handler(void *t)
1405 {
1406 	struct sctp_inpcb *inp;
1407 	struct sctp_tcb *stcb;
1408 	struct sctp_nets *net;
1409 	struct sctp_timer *tmr;
1410 
1411 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1412 	struct socket *so;
1413 
1414 #endif
1415 	int did_output, type;
1416 
1417 	tmr = (struct sctp_timer *)t;
1418 	inp = (struct sctp_inpcb *)tmr->ep;
1419 	stcb = (struct sctp_tcb *)tmr->tcb;
1420 	net = (struct sctp_nets *)tmr->net;
1421 	CURVNET_SET((struct vnet *)tmr->vnet);
1422 	did_output = 1;
1423 
1424 #ifdef SCTP_AUDITING_ENABLED
1425 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1426 	sctp_auditing(3, inp, stcb, net);
1427 #endif
1428 
1429 	/* sanity checks... */
1430 	if (tmr->self != (void *)tmr) {
1431 		/*
1432 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1433 		 * (void *)tmr);
1434 		 */
1435 		CURVNET_RESTORE();
1436 		return;
1437 	}
1438 	tmr->stopped_from = 0xa001;
1439 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1440 		/*
1441 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1442 		 * tmr->type);
1443 		 */
1444 		CURVNET_RESTORE();
1445 		return;
1446 	}
1447 	tmr->stopped_from = 0xa002;
1448 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1449 		CURVNET_RESTORE();
1450 		return;
1451 	}
1452 	/* if this is an iterator timeout, get the struct and clear inp */
1453 	tmr->stopped_from = 0xa003;
1454 	type = tmr->type;
1455 	if (inp) {
1456 		SCTP_INP_INCR_REF(inp);
1457 		if ((inp->sctp_socket == NULL) &&
1458 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1459 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1460 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1461 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1462 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1463 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1464 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1465 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1466 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1467 		    ) {
1468 			SCTP_INP_DECR_REF(inp);
1469 			CURVNET_RESTORE();
1470 			return;
1471 		}
1472 	}
1473 	tmr->stopped_from = 0xa004;
1474 	if (stcb) {
1475 		atomic_add_int(&stcb->asoc.refcnt, 1);
1476 		if (stcb->asoc.state == 0) {
1477 			atomic_add_int(&stcb->asoc.refcnt, -1);
1478 			if (inp) {
1479 				SCTP_INP_DECR_REF(inp);
1480 			}
1481 			CURVNET_RESTORE();
1482 			return;
1483 		}
1484 	}
1485 	tmr->stopped_from = 0xa005;
1486 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1487 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1488 		if (inp) {
1489 			SCTP_INP_DECR_REF(inp);
1490 		}
1491 		if (stcb) {
1492 			atomic_add_int(&stcb->asoc.refcnt, -1);
1493 		}
1494 		CURVNET_RESTORE();
1495 		return;
1496 	}
1497 	tmr->stopped_from = 0xa006;
1498 
1499 	if (stcb) {
1500 		SCTP_TCB_LOCK(stcb);
1501 		atomic_add_int(&stcb->asoc.refcnt, -1);
1502 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1503 		    ((stcb->asoc.state == 0) ||
1504 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1505 			SCTP_TCB_UNLOCK(stcb);
1506 			if (inp) {
1507 				SCTP_INP_DECR_REF(inp);
1508 			}
1509 			CURVNET_RESTORE();
1510 			return;
1511 		}
1512 	}
1513 	/* record in stopped what t-o occured */
1514 	tmr->stopped_from = tmr->type;
1515 
1516 	/* mark as being serviced now */
1517 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1518 		/*
1519 		 * Callout has been rescheduled.
1520 		 */
1521 		goto get_out;
1522 	}
1523 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1524 		/*
1525 		 * Not active, so no action.
1526 		 */
1527 		goto get_out;
1528 	}
1529 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1530 
1531 	/* call the handler for the appropriate timer type */
1532 	switch (tmr->type) {
1533 	case SCTP_TIMER_TYPE_ZERO_COPY:
1534 		if (inp == NULL) {
1535 			break;
1536 		}
1537 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1538 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1539 		}
1540 		break;
1541 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1542 		if (inp == NULL) {
1543 			break;
1544 		}
1545 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1546 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1547 		}
1548 		break;
1549 	case SCTP_TIMER_TYPE_ADDR_WQ:
1550 		sctp_handle_addr_wq();
1551 		break;
1552 	case SCTP_TIMER_TYPE_SEND:
1553 		if ((stcb == NULL) || (inp == NULL)) {
1554 			break;
1555 		}
1556 		SCTP_STAT_INCR(sctps_timodata);
1557 		stcb->asoc.timodata++;
1558 		stcb->asoc.num_send_timers_up--;
1559 		if (stcb->asoc.num_send_timers_up < 0) {
1560 			stcb->asoc.num_send_timers_up = 0;
1561 		}
1562 		SCTP_TCB_LOCK_ASSERT(stcb);
1563 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1564 			/* no need to unlock on tcb its gone */
1565 
1566 			goto out_decr;
1567 		}
1568 		SCTP_TCB_LOCK_ASSERT(stcb);
1569 #ifdef SCTP_AUDITING_ENABLED
1570 		sctp_auditing(4, inp, stcb, net);
1571 #endif
1572 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1573 		if ((stcb->asoc.num_send_timers_up == 0) &&
1574 		    (stcb->asoc.sent_queue_cnt > 0)) {
1575 			struct sctp_tmit_chunk *chk;
1576 
1577 			/*
1578 			 * safeguard. If there on some on the sent queue
1579 			 * somewhere but no timers running something is
1580 			 * wrong... so we start a timer on the first chunk
1581 			 * on the send queue on whatever net it is sent to.
1582 			 */
1583 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1584 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1585 			    chk->whoTo);
1586 		}
1587 		break;
1588 	case SCTP_TIMER_TYPE_INIT:
1589 		if ((stcb == NULL) || (inp == NULL)) {
1590 			break;
1591 		}
1592 		SCTP_STAT_INCR(sctps_timoinit);
1593 		stcb->asoc.timoinit++;
1594 		if (sctp_t1init_timer(inp, stcb, net)) {
1595 			/* no need to unlock on tcb its gone */
1596 			goto out_decr;
1597 		}
1598 		/* We do output but not here */
1599 		did_output = 0;
1600 		break;
1601 	case SCTP_TIMER_TYPE_RECV:
1602 		if ((stcb == NULL) || (inp == NULL)) {
1603 			break;
1604 		}
1605 		SCTP_STAT_INCR(sctps_timosack);
1606 		stcb->asoc.timosack++;
1607 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1608 #ifdef SCTP_AUDITING_ENABLED
1609 		sctp_auditing(4, inp, stcb, net);
1610 #endif
1611 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1612 		break;
1613 	case SCTP_TIMER_TYPE_SHUTDOWN:
1614 		if ((stcb == NULL) || (inp == NULL)) {
1615 			break;
1616 		}
1617 		if (sctp_shutdown_timer(inp, stcb, net)) {
1618 			/* no need to unlock on tcb its gone */
1619 			goto out_decr;
1620 		}
1621 		SCTP_STAT_INCR(sctps_timoshutdown);
1622 		stcb->asoc.timoshutdown++;
1623 #ifdef SCTP_AUDITING_ENABLED
1624 		sctp_auditing(4, inp, stcb, net);
1625 #endif
1626 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1627 		break;
1628 	case SCTP_TIMER_TYPE_HEARTBEAT:
1629 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1630 			break;
1631 		}
1632 		SCTP_STAT_INCR(sctps_timoheartbeat);
1633 		stcb->asoc.timoheartbeat++;
1634 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1635 			/* no need to unlock on tcb its gone */
1636 			goto out_decr;
1637 		}
1638 #ifdef SCTP_AUDITING_ENABLED
1639 		sctp_auditing(4, inp, stcb, net);
1640 #endif
1641 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1642 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1643 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1644 		}
1645 		break;
1646 	case SCTP_TIMER_TYPE_COOKIE:
1647 		if ((stcb == NULL) || (inp == NULL)) {
1648 			break;
1649 		}
1650 		if (sctp_cookie_timer(inp, stcb, net)) {
1651 			/* no need to unlock on tcb its gone */
1652 			goto out_decr;
1653 		}
1654 		SCTP_STAT_INCR(sctps_timocookie);
1655 		stcb->asoc.timocookie++;
1656 #ifdef SCTP_AUDITING_ENABLED
1657 		sctp_auditing(4, inp, stcb, net);
1658 #endif
1659 		/*
1660 		 * We consider T3 and Cookie timer pretty much the same with
1661 		 * respect to where from in chunk_output.
1662 		 */
1663 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1664 		break;
1665 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1666 		{
1667 			struct timeval tv;
1668 			int i, secret;
1669 
1670 			if (inp == NULL) {
1671 				break;
1672 			}
1673 			SCTP_STAT_INCR(sctps_timosecret);
1674 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1675 			SCTP_INP_WLOCK(inp);
1676 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1677 			inp->sctp_ep.last_secret_number =
1678 			    inp->sctp_ep.current_secret_number;
1679 			inp->sctp_ep.current_secret_number++;
1680 			if (inp->sctp_ep.current_secret_number >=
1681 			    SCTP_HOW_MANY_SECRETS) {
1682 				inp->sctp_ep.current_secret_number = 0;
1683 			}
1684 			secret = (int)inp->sctp_ep.current_secret_number;
1685 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1686 				inp->sctp_ep.secret_key[secret][i] =
1687 				    sctp_select_initial_TSN(&inp->sctp_ep);
1688 			}
1689 			SCTP_INP_WUNLOCK(inp);
1690 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1691 		}
1692 		did_output = 0;
1693 		break;
1694 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1695 		if ((stcb == NULL) || (inp == NULL)) {
1696 			break;
1697 		}
1698 		SCTP_STAT_INCR(sctps_timopathmtu);
1699 		sctp_pathmtu_timer(inp, stcb, net);
1700 		did_output = 0;
1701 		break;
1702 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1703 		if ((stcb == NULL) || (inp == NULL)) {
1704 			break;
1705 		}
1706 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1707 			/* no need to unlock on tcb its gone */
1708 			goto out_decr;
1709 		}
1710 		SCTP_STAT_INCR(sctps_timoshutdownack);
1711 		stcb->asoc.timoshutdownack++;
1712 #ifdef SCTP_AUDITING_ENABLED
1713 		sctp_auditing(4, inp, stcb, net);
1714 #endif
1715 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1716 		break;
1717 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1718 		if ((stcb == NULL) || (inp == NULL)) {
1719 			break;
1720 		}
1721 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1722 		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1723 		/* no need to unlock on tcb its gone */
1724 		goto out_decr;
1725 
1726 	case SCTP_TIMER_TYPE_STRRESET:
1727 		if ((stcb == NULL) || (inp == NULL)) {
1728 			break;
1729 		}
1730 		if (sctp_strreset_timer(inp, stcb, net)) {
1731 			/* no need to unlock on tcb its gone */
1732 			goto out_decr;
1733 		}
1734 		SCTP_STAT_INCR(sctps_timostrmrst);
1735 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1736 		break;
1737 	case SCTP_TIMER_TYPE_ASCONF:
1738 		if ((stcb == NULL) || (inp == NULL)) {
1739 			break;
1740 		}
1741 		if (sctp_asconf_timer(inp, stcb, net)) {
1742 			/* no need to unlock on tcb its gone */
1743 			goto out_decr;
1744 		}
1745 		SCTP_STAT_INCR(sctps_timoasconf);
1746 #ifdef SCTP_AUDITING_ENABLED
1747 		sctp_auditing(4, inp, stcb, net);
1748 #endif
1749 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1750 		break;
1751 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1752 		if ((stcb == NULL) || (inp == NULL)) {
1753 			break;
1754 		}
1755 		sctp_delete_prim_timer(inp, stcb, net);
1756 		SCTP_STAT_INCR(sctps_timodelprim);
1757 		break;
1758 
1759 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1760 		if ((stcb == NULL) || (inp == NULL)) {
1761 			break;
1762 		}
1763 		SCTP_STAT_INCR(sctps_timoautoclose);
1764 		sctp_autoclose_timer(inp, stcb, net);
1765 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1766 		did_output = 0;
1767 		break;
1768 	case SCTP_TIMER_TYPE_ASOCKILL:
1769 		if ((stcb == NULL) || (inp == NULL)) {
1770 			break;
1771 		}
1772 		SCTP_STAT_INCR(sctps_timoassockill);
1773 		/* Can we free it yet? */
1774 		SCTP_INP_DECR_REF(inp);
1775 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1776 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1777 		so = SCTP_INP_SO(inp);
1778 		atomic_add_int(&stcb->asoc.refcnt, 1);
1779 		SCTP_TCB_UNLOCK(stcb);
1780 		SCTP_SOCKET_LOCK(so, 1);
1781 		SCTP_TCB_LOCK(stcb);
1782 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1783 #endif
1784 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1785 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1786 		SCTP_SOCKET_UNLOCK(so, 1);
1787 #endif
1788 		/*
1789 		 * free asoc, always unlocks (or destroy's) so prevent
1790 		 * duplicate unlock or unlock of a free mtx :-0
1791 		 */
1792 		stcb = NULL;
1793 		goto out_no_decr;
1794 	case SCTP_TIMER_TYPE_INPKILL:
1795 		SCTP_STAT_INCR(sctps_timoinpkill);
1796 		if (inp == NULL) {
1797 			break;
1798 		}
1799 		/*
1800 		 * special case, take away our increment since WE are the
1801 		 * killer
1802 		 */
1803 		SCTP_INP_DECR_REF(inp);
1804 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1805 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1806 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1807 		inp = NULL;
1808 		goto out_no_decr;
1809 	default:
1810 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1811 		    tmr->type);
1812 		break;
1813 	}
1814 #ifdef SCTP_AUDITING_ENABLED
1815 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1816 	if (inp)
1817 		sctp_auditing(5, inp, stcb, net);
1818 #endif
1819 	if ((did_output) && stcb) {
1820 		/*
1821 		 * Now we need to clean up the control chunk chain if an
1822 		 * ECNE is on it. It must be marked as UNSENT again so next
1823 		 * call will continue to send it until such time that we get
1824 		 * a CWR, to remove it. It is, however, less likely that we
1825 		 * will find a ecn echo on the chain though.
1826 		 */
1827 		sctp_fix_ecn_echo(&stcb->asoc);
1828 	}
1829 get_out:
1830 	if (stcb) {
1831 		SCTP_TCB_UNLOCK(stcb);
1832 	}
1833 out_decr:
1834 	if (inp) {
1835 		SCTP_INP_DECR_REF(inp);
1836 	}
1837 out_no_decr:
1838 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1839 	    type);
1840 	CURVNET_RESTORE();
1841 }
1842 
1843 void
1844 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1845     struct sctp_nets *net)
1846 {
1847 	uint32_t to_ticks;
1848 	struct sctp_timer *tmr;
1849 
1850 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1851 		return;
1852 
1853 	tmr = NULL;
1854 	if (stcb) {
1855 		SCTP_TCB_LOCK_ASSERT(stcb);
1856 	}
1857 	switch (t_type) {
1858 	case SCTP_TIMER_TYPE_ZERO_COPY:
1859 		tmr = &inp->sctp_ep.zero_copy_timer;
1860 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1861 		break;
1862 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1863 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1864 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1865 		break;
1866 	case SCTP_TIMER_TYPE_ADDR_WQ:
1867 		/* Only 1 tick away :-) */
1868 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1869 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1870 		break;
1871 	case SCTP_TIMER_TYPE_SEND:
1872 		/* Here we use the RTO timer */
1873 		{
1874 			int rto_val;
1875 
1876 			if ((stcb == NULL) || (net == NULL)) {
1877 				return;
1878 			}
1879 			tmr = &net->rxt_timer;
1880 			if (net->RTO == 0) {
1881 				rto_val = stcb->asoc.initial_rto;
1882 			} else {
1883 				rto_val = net->RTO;
1884 			}
1885 			to_ticks = MSEC_TO_TICKS(rto_val);
1886 		}
1887 		break;
1888 	case SCTP_TIMER_TYPE_INIT:
1889 		/*
1890 		 * Here we use the INIT timer default usually about 1
1891 		 * minute.
1892 		 */
1893 		if ((stcb == NULL) || (net == NULL)) {
1894 			return;
1895 		}
1896 		tmr = &net->rxt_timer;
1897 		if (net->RTO == 0) {
1898 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1899 		} else {
1900 			to_ticks = MSEC_TO_TICKS(net->RTO);
1901 		}
1902 		break;
1903 	case SCTP_TIMER_TYPE_RECV:
1904 		/*
1905 		 * Here we use the Delayed-Ack timer value from the inp
1906 		 * ususually about 200ms.
1907 		 */
1908 		if (stcb == NULL) {
1909 			return;
1910 		}
1911 		tmr = &stcb->asoc.dack_timer;
1912 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1913 		break;
1914 	case SCTP_TIMER_TYPE_SHUTDOWN:
1915 		/* Here we use the RTO of the destination. */
1916 		if ((stcb == NULL) || (net == NULL)) {
1917 			return;
1918 		}
1919 		if (net->RTO == 0) {
1920 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1921 		} else {
1922 			to_ticks = MSEC_TO_TICKS(net->RTO);
1923 		}
1924 		tmr = &net->rxt_timer;
1925 		break;
1926 	case SCTP_TIMER_TYPE_HEARTBEAT:
1927 		/*
1928 		 * the net is used here so that we can add in the RTO. Even
1929 		 * though we use a different timer. We also add the HB timer
1930 		 * PLUS a random jitter.
1931 		 */
1932 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1933 			return;
1934 		} else {
1935 			uint32_t rndval;
1936 			uint32_t jitter;
1937 
1938 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1939 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1940 				return;
1941 			}
1942 			if (net->RTO == 0) {
1943 				to_ticks = stcb->asoc.initial_rto;
1944 			} else {
1945 				to_ticks = net->RTO;
1946 			}
1947 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1948 			jitter = rndval % to_ticks;
1949 			if (jitter >= (to_ticks >> 1)) {
1950 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1951 			} else {
1952 				to_ticks = to_ticks - jitter;
1953 			}
1954 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1955 			    !(net->dest_state & SCTP_ADDR_PF)) {
1956 				to_ticks += net->heart_beat_delay;
1957 			}
1958 			/*
1959 			 * Now we must convert the to_ticks that are now in
1960 			 * ms to ticks.
1961 			 */
1962 			to_ticks = MSEC_TO_TICKS(to_ticks);
1963 			tmr = &net->hb_timer;
1964 		}
1965 		break;
1966 	case SCTP_TIMER_TYPE_COOKIE:
1967 		/*
1968 		 * Here we can use the RTO timer from the network since one
1969 		 * RTT was compelete. If a retran happened then we will be
1970 		 * using the RTO initial value.
1971 		 */
1972 		if ((stcb == NULL) || (net == NULL)) {
1973 			return;
1974 		}
1975 		if (net->RTO == 0) {
1976 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1977 		} else {
1978 			to_ticks = MSEC_TO_TICKS(net->RTO);
1979 		}
1980 		tmr = &net->rxt_timer;
1981 		break;
1982 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1983 		/*
1984 		 * nothing needed but the endpoint here ususually about 60
1985 		 * minutes.
1986 		 */
1987 		if (inp == NULL) {
1988 			return;
1989 		}
1990 		tmr = &inp->sctp_ep.signature_change;
1991 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1992 		break;
1993 	case SCTP_TIMER_TYPE_ASOCKILL:
1994 		if (stcb == NULL) {
1995 			return;
1996 		}
1997 		tmr = &stcb->asoc.strreset_timer;
1998 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
1999 		break;
2000 	case SCTP_TIMER_TYPE_INPKILL:
2001 		/*
2002 		 * The inp is setup to die. We re-use the signature_chage
2003 		 * timer since that has stopped and we are in the GONE
2004 		 * state.
2005 		 */
2006 		if (inp == NULL) {
2007 			return;
2008 		}
2009 		tmr = &inp->sctp_ep.signature_change;
2010 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2011 		break;
2012 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2013 		/*
2014 		 * Here we use the value found in the EP for PMTU ususually
2015 		 * about 10 minutes.
2016 		 */
2017 		if ((stcb == NULL) || (inp == NULL)) {
2018 			return;
2019 		}
2020 		if (net == NULL) {
2021 			return;
2022 		}
2023 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2024 			return;
2025 		}
2026 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2027 		tmr = &net->pmtu_timer;
2028 		break;
2029 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2030 		/* Here we use the RTO of the destination */
2031 		if ((stcb == NULL) || (net == NULL)) {
2032 			return;
2033 		}
2034 		if (net->RTO == 0) {
2035 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2036 		} else {
2037 			to_ticks = MSEC_TO_TICKS(net->RTO);
2038 		}
2039 		tmr = &net->rxt_timer;
2040 		break;
2041 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2042 		/*
2043 		 * Here we use the endpoints shutdown guard timer usually
2044 		 * about 3 minutes.
2045 		 */
2046 		if ((inp == NULL) || (stcb == NULL)) {
2047 			return;
2048 		}
2049 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2050 		tmr = &stcb->asoc.shut_guard_timer;
2051 		break;
2052 	case SCTP_TIMER_TYPE_STRRESET:
2053 		/*
2054 		 * Here the timer comes from the stcb but its value is from
2055 		 * the net's RTO.
2056 		 */
2057 		if ((stcb == NULL) || (net == NULL)) {
2058 			return;
2059 		}
2060 		if (net->RTO == 0) {
2061 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2062 		} else {
2063 			to_ticks = MSEC_TO_TICKS(net->RTO);
2064 		}
2065 		tmr = &stcb->asoc.strreset_timer;
2066 		break;
2067 	case SCTP_TIMER_TYPE_ASCONF:
2068 		/*
2069 		 * Here the timer comes from the stcb but its value is from
2070 		 * the net's RTO.
2071 		 */
2072 		if ((stcb == NULL) || (net == NULL)) {
2073 			return;
2074 		}
2075 		if (net->RTO == 0) {
2076 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2077 		} else {
2078 			to_ticks = MSEC_TO_TICKS(net->RTO);
2079 		}
2080 		tmr = &stcb->asoc.asconf_timer;
2081 		break;
2082 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2083 		if ((stcb == NULL) || (net != NULL)) {
2084 			return;
2085 		}
2086 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2087 		tmr = &stcb->asoc.delete_prim_timer;
2088 		break;
2089 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2090 		if (stcb == NULL) {
2091 			return;
2092 		}
2093 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2094 			/*
2095 			 * Really an error since stcb is NOT set to
2096 			 * autoclose
2097 			 */
2098 			return;
2099 		}
2100 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2101 		tmr = &stcb->asoc.autoclose_timer;
2102 		break;
2103 	default:
2104 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2105 		    __FUNCTION__, t_type);
2106 		return;
2107 		break;
2108 	}
2109 	if ((to_ticks <= 0) || (tmr == NULL)) {
2110 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2111 		    __FUNCTION__, t_type, to_ticks, (void *)tmr);
2112 		return;
2113 	}
2114 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2115 		/*
2116 		 * we do NOT allow you to have it already running. if it is
2117 		 * we leave the current one up unchanged
2118 		 */
2119 		return;
2120 	}
2121 	/* At this point we can proceed */
2122 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2123 		stcb->asoc.num_send_timers_up++;
2124 	}
2125 	tmr->stopped_from = 0;
2126 	tmr->type = t_type;
2127 	tmr->ep = (void *)inp;
2128 	tmr->tcb = (void *)stcb;
2129 	tmr->net = (void *)net;
2130 	tmr->self = (void *)tmr;
2131 	tmr->vnet = (void *)curvnet;
2132 	tmr->ticks = sctp_get_tick_count();
2133 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2134 	return;
2135 }
2136 
2137 void
2138 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2139     struct sctp_nets *net, uint32_t from)
2140 {
2141 	struct sctp_timer *tmr;
2142 
2143 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2144 	    (inp == NULL))
2145 		return;
2146 
2147 	tmr = NULL;
2148 	if (stcb) {
2149 		SCTP_TCB_LOCK_ASSERT(stcb);
2150 	}
2151 	switch (t_type) {
2152 	case SCTP_TIMER_TYPE_ZERO_COPY:
2153 		tmr = &inp->sctp_ep.zero_copy_timer;
2154 		break;
2155 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2156 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2157 		break;
2158 	case SCTP_TIMER_TYPE_ADDR_WQ:
2159 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2160 		break;
2161 	case SCTP_TIMER_TYPE_SEND:
2162 		if ((stcb == NULL) || (net == NULL)) {
2163 			return;
2164 		}
2165 		tmr = &net->rxt_timer;
2166 		break;
2167 	case SCTP_TIMER_TYPE_INIT:
2168 		if ((stcb == NULL) || (net == NULL)) {
2169 			return;
2170 		}
2171 		tmr = &net->rxt_timer;
2172 		break;
2173 	case SCTP_TIMER_TYPE_RECV:
2174 		if (stcb == NULL) {
2175 			return;
2176 		}
2177 		tmr = &stcb->asoc.dack_timer;
2178 		break;
2179 	case SCTP_TIMER_TYPE_SHUTDOWN:
2180 		if ((stcb == NULL) || (net == NULL)) {
2181 			return;
2182 		}
2183 		tmr = &net->rxt_timer;
2184 		break;
2185 	case SCTP_TIMER_TYPE_HEARTBEAT:
2186 		if ((stcb == NULL) || (net == NULL)) {
2187 			return;
2188 		}
2189 		tmr = &net->hb_timer;
2190 		break;
2191 	case SCTP_TIMER_TYPE_COOKIE:
2192 		if ((stcb == NULL) || (net == NULL)) {
2193 			return;
2194 		}
2195 		tmr = &net->rxt_timer;
2196 		break;
2197 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2198 		/* nothing needed but the endpoint here */
2199 		tmr = &inp->sctp_ep.signature_change;
2200 		/*
2201 		 * We re-use the newcookie timer for the INP kill timer. We
2202 		 * must assure that we do not kill it by accident.
2203 		 */
2204 		break;
2205 	case SCTP_TIMER_TYPE_ASOCKILL:
2206 		/*
2207 		 * Stop the asoc kill timer.
2208 		 */
2209 		if (stcb == NULL) {
2210 			return;
2211 		}
2212 		tmr = &stcb->asoc.strreset_timer;
2213 		break;
2214 
2215 	case SCTP_TIMER_TYPE_INPKILL:
2216 		/*
2217 		 * The inp is setup to die. We re-use the signature_chage
2218 		 * timer since that has stopped and we are in the GONE
2219 		 * state.
2220 		 */
2221 		tmr = &inp->sctp_ep.signature_change;
2222 		break;
2223 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2224 		if ((stcb == NULL) || (net == NULL)) {
2225 			return;
2226 		}
2227 		tmr = &net->pmtu_timer;
2228 		break;
2229 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2230 		if ((stcb == NULL) || (net == NULL)) {
2231 			return;
2232 		}
2233 		tmr = &net->rxt_timer;
2234 		break;
2235 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2236 		if (stcb == NULL) {
2237 			return;
2238 		}
2239 		tmr = &stcb->asoc.shut_guard_timer;
2240 		break;
2241 	case SCTP_TIMER_TYPE_STRRESET:
2242 		if (stcb == NULL) {
2243 			return;
2244 		}
2245 		tmr = &stcb->asoc.strreset_timer;
2246 		break;
2247 	case SCTP_TIMER_TYPE_ASCONF:
2248 		if (stcb == NULL) {
2249 			return;
2250 		}
2251 		tmr = &stcb->asoc.asconf_timer;
2252 		break;
2253 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2254 		if (stcb == NULL) {
2255 			return;
2256 		}
2257 		tmr = &stcb->asoc.delete_prim_timer;
2258 		break;
2259 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2260 		if (stcb == NULL) {
2261 			return;
2262 		}
2263 		tmr = &stcb->asoc.autoclose_timer;
2264 		break;
2265 	default:
2266 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2267 		    __FUNCTION__, t_type);
2268 		break;
2269 	}
2270 	if (tmr == NULL) {
2271 		return;
2272 	}
2273 	if ((tmr->type != t_type) && tmr->type) {
2274 		/*
2275 		 * Ok we have a timer that is under joint use. Cookie timer
2276 		 * per chance with the SEND timer. We therefore are NOT
2277 		 * running the timer that the caller wants stopped.  So just
2278 		 * return.
2279 		 */
2280 		return;
2281 	}
2282 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2283 		stcb->asoc.num_send_timers_up--;
2284 		if (stcb->asoc.num_send_timers_up < 0) {
2285 			stcb->asoc.num_send_timers_up = 0;
2286 		}
2287 	}
2288 	tmr->self = NULL;
2289 	tmr->stopped_from = from;
2290 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2291 	return;
2292 }
2293 
2294 uint32_t
2295 sctp_calculate_len(struct mbuf *m)
2296 {
2297 	uint32_t tlen = 0;
2298 	struct mbuf *at;
2299 
2300 	at = m;
2301 	while (at) {
2302 		tlen += SCTP_BUF_LEN(at);
2303 		at = SCTP_BUF_NEXT(at);
2304 	}
2305 	return (tlen);
2306 }
2307 
2308 void
2309 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2310     struct sctp_association *asoc, uint32_t mtu)
2311 {
2312 	/*
2313 	 * Reset the P-MTU size on this association, this involves changing
2314 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2315 	 * allow the DF flag to be cleared.
2316 	 */
2317 	struct sctp_tmit_chunk *chk;
2318 	unsigned int eff_mtu, ovh;
2319 
2320 	asoc->smallest_mtu = mtu;
2321 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2322 		ovh = SCTP_MIN_OVERHEAD;
2323 	} else {
2324 		ovh = SCTP_MIN_V4_OVERHEAD;
2325 	}
2326 	eff_mtu = mtu - ovh;
2327 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2328 		if (chk->send_size > eff_mtu) {
2329 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2330 		}
2331 	}
2332 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2333 		if (chk->send_size > eff_mtu) {
2334 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2335 		}
2336 	}
2337 }
2338 
2339 
2340 /*
2341  * given an association and starting time of the current RTT period return
2342  * RTO in number of msecs net should point to the current network
2343  */
2344 
2345 uint32_t
2346 sctp_calculate_rto(struct sctp_tcb *stcb,
2347     struct sctp_association *asoc,
2348     struct sctp_nets *net,
2349     struct timeval *told,
2350     int safe, int rtt_from_sack)
2351 {
2352 	/*-
2353 	 * given an association and the starting time of the current RTT
2354 	 * period (in value1/value2) return RTO in number of msecs.
2355 	 */
2356 	int32_t rtt;		/* RTT in ms */
2357 	uint32_t new_rto;
2358 	int first_measure = 0;
2359 	struct timeval now, then, *old;
2360 
2361 	/* Copy it out for sparc64 */
2362 	if (safe == sctp_align_unsafe_makecopy) {
2363 		old = &then;
2364 		memcpy(&then, told, sizeof(struct timeval));
2365 	} else if (safe == sctp_align_safe_nocopy) {
2366 		old = told;
2367 	} else {
2368 		/* error */
2369 		SCTP_PRINTF("Huh, bad rto calc call\n");
2370 		return (0);
2371 	}
2372 	/************************/
2373 	/* 1. calculate new RTT */
2374 	/************************/
2375 	/* get the current time */
2376 	if (stcb->asoc.use_precise_time) {
2377 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2378 	} else {
2379 		(void)SCTP_GETTIME_TIMEVAL(&now);
2380 	}
2381 	timevalsub(&now, old);
2382 	/* store the current RTT in us */
2383 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2384 	        (uint64_t) now.tv_usec;
2385 
2386 	/* computer rtt in ms */
2387 	rtt = net->rtt / 1000;
2388 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2389 		/*
2390 		 * Tell the CC module that a new update has just occurred
2391 		 * from a sack
2392 		 */
2393 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2394 	}
2395 	/*
2396 	 * Do we need to determine the lan? We do this only on sacks i.e.
2397 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2398 	 */
2399 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2400 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2401 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2402 			net->lan_type = SCTP_LAN_INTERNET;
2403 		} else {
2404 			net->lan_type = SCTP_LAN_LOCAL;
2405 		}
2406 	}
2407 	/***************************/
2408 	/* 2. update RTTVAR & SRTT */
2409 	/***************************/
2410 	/*-
2411 	 * Compute the scaled average lastsa and the
2412 	 * scaled variance lastsv as described in van Jacobson
2413 	 * Paper "Congestion Avoidance and Control", Annex A.
2414 	 *
2415 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2416 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2417 	 */
2418 	if (net->RTO_measured) {
2419 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2420 		net->lastsa += rtt;
2421 		if (rtt < 0) {
2422 			rtt = -rtt;
2423 		}
2424 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2425 		net->lastsv += rtt;
2426 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2427 			rto_logging(net, SCTP_LOG_RTTVAR);
2428 		}
2429 	} else {
2430 		/* First RTO measurment */
2431 		net->RTO_measured = 1;
2432 		first_measure = 1;
2433 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2434 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2435 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2436 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2437 		}
2438 	}
2439 	if (net->lastsv == 0) {
2440 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2441 	}
2442 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2443 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2444 	    (stcb->asoc.sat_network_lockout == 0)) {
2445 		stcb->asoc.sat_network = 1;
2446 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2447 		stcb->asoc.sat_network = 0;
2448 		stcb->asoc.sat_network_lockout = 1;
2449 	}
2450 	/* bound it, per C6/C7 in Section 5.3.1 */
2451 	if (new_rto < stcb->asoc.minrto) {
2452 		new_rto = stcb->asoc.minrto;
2453 	}
2454 	if (new_rto > stcb->asoc.maxrto) {
2455 		new_rto = stcb->asoc.maxrto;
2456 	}
2457 	/* we are now returning the RTO */
2458 	return (new_rto);
2459 }
2460 
2461 /*
2462  * return a pointer to a contiguous piece of data from the given mbuf chain
2463  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2464  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2465  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2466  */
2467 caddr_t
2468 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2469 {
2470 	uint32_t count;
2471 	uint8_t *ptr;
2472 
2473 	ptr = in_ptr;
2474 	if ((off < 0) || (len <= 0))
2475 		return (NULL);
2476 
2477 	/* find the desired start location */
2478 	while ((m != NULL) && (off > 0)) {
2479 		if (off < SCTP_BUF_LEN(m))
2480 			break;
2481 		off -= SCTP_BUF_LEN(m);
2482 		m = SCTP_BUF_NEXT(m);
2483 	}
2484 	if (m == NULL)
2485 		return (NULL);
2486 
2487 	/* is the current mbuf large enough (eg. contiguous)? */
2488 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2489 		return (mtod(m, caddr_t)+off);
2490 	} else {
2491 		/* else, it spans more than one mbuf, so save a temp copy... */
2492 		while ((m != NULL) && (len > 0)) {
2493 			count = min(SCTP_BUF_LEN(m) - off, len);
2494 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2495 			len -= count;
2496 			ptr += count;
2497 			off = 0;
2498 			m = SCTP_BUF_NEXT(m);
2499 		}
2500 		if ((m == NULL) && (len > 0))
2501 			return (NULL);
2502 		else
2503 			return ((caddr_t)in_ptr);
2504 	}
2505 }
2506 
2507 
2508 
2509 struct sctp_paramhdr *
2510 sctp_get_next_param(struct mbuf *m,
2511     int offset,
2512     struct sctp_paramhdr *pull,
2513     int pull_limit)
2514 {
2515 	/* This just provides a typed signature to Peter's Pull routine */
2516 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2517 	    (uint8_t *) pull));
2518 }
2519 
2520 
2521 int
2522 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2523 {
2524 	/*
2525 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2526 	 * padlen is > 3 this routine will fail.
2527 	 */
2528 	uint8_t *dp;
2529 	int i;
2530 
2531 	if (padlen > 3) {
2532 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2533 		return (ENOBUFS);
2534 	}
2535 	if (padlen <= M_TRAILINGSPACE(m)) {
2536 		/*
2537 		 * The easy way. We hope the majority of the time we hit
2538 		 * here :)
2539 		 */
2540 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2541 		SCTP_BUF_LEN(m) += padlen;
2542 	} else {
2543 		/* Hard way we must grow the mbuf */
2544 		struct mbuf *tmp;
2545 
2546 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2547 		if (tmp == NULL) {
2548 			/* Out of space GAK! we are in big trouble. */
2549 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2550 			return (ENOBUFS);
2551 		}
2552 		/* setup and insert in middle */
2553 		SCTP_BUF_LEN(tmp) = padlen;
2554 		SCTP_BUF_NEXT(tmp) = NULL;
2555 		SCTP_BUF_NEXT(m) = tmp;
2556 		dp = mtod(tmp, uint8_t *);
2557 	}
2558 	/* zero out the pad */
2559 	for (i = 0; i < padlen; i++) {
2560 		*dp = 0;
2561 		dp++;
2562 	}
2563 	return (0);
2564 }
2565 
2566 int
2567 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2568 {
2569 	/* find the last mbuf in chain and pad it */
2570 	struct mbuf *m_at;
2571 
2572 	if (last_mbuf) {
2573 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2574 	} else {
2575 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2576 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2577 				return (sctp_add_pad_tombuf(m_at, padval));
2578 			}
2579 		}
2580 	}
2581 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2582 	return (EFAULT);
2583 }
2584 
2585 static void
2586 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2587     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2588 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2589     SCTP_UNUSED
2590 #endif
2591 )
2592 {
2593 	struct mbuf *m_notify;
2594 	struct sctp_assoc_change *sac;
2595 	struct sctp_queued_to_read *control;
2596 	size_t notif_len, abort_len;
2597 	unsigned int i;
2598 
2599 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2600 	struct socket *so;
2601 
2602 #endif
2603 
2604 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2605 		notif_len = sizeof(struct sctp_assoc_change);
2606 		if (abort != NULL) {
2607 			abort_len = htons(abort->ch.chunk_length);
2608 		} else {
2609 			abort_len = 0;
2610 		}
2611 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2612 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2613 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2614 			notif_len += abort_len;
2615 		}
2616 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
2617 		if (m_notify == NULL) {
2618 			/* Retry with smaller value. */
2619 			notif_len = sizeof(struct sctp_assoc_change);
2620 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
2621 			if (m_notify == NULL) {
2622 				goto set_error;
2623 			}
2624 		}
2625 		SCTP_BUF_NEXT(m_notify) = NULL;
2626 		sac = mtod(m_notify, struct sctp_assoc_change *);
2627 		sac->sac_type = SCTP_ASSOC_CHANGE;
2628 		sac->sac_flags = 0;
2629 		sac->sac_length = sizeof(struct sctp_assoc_change);
2630 		sac->sac_state = state;
2631 		sac->sac_error = error;
2632 		/* XXX verify these stream counts */
2633 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2634 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2635 		sac->sac_assoc_id = sctp_get_associd(stcb);
2636 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2637 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2638 				i = 0;
2639 				if (stcb->asoc.peer_supports_prsctp) {
2640 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2641 				}
2642 				if (stcb->asoc.peer_supports_auth) {
2643 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2644 				}
2645 				if (stcb->asoc.peer_supports_asconf) {
2646 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2647 				}
2648 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2649 				if (stcb->asoc.peer_supports_strreset) {
2650 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2651 				}
2652 				sac->sac_length += i;
2653 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2654 				memcpy(sac->sac_info, abort, abort_len);
2655 				sac->sac_length += abort_len;
2656 			}
2657 		}
2658 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2659 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2660 		    0, 0, stcb->asoc.context, 0, 0, 0,
2661 		    m_notify);
2662 		if (control != NULL) {
2663 			control->length = SCTP_BUF_LEN(m_notify);
2664 			/* not that we need this */
2665 			control->tail_mbuf = m_notify;
2666 			control->spec_flags = M_NOTIFICATION;
2667 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2668 			    control,
2669 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2670 			    so_locked);
2671 		} else {
2672 			sctp_m_freem(m_notify);
2673 		}
2674 	}
2675 	/*
2676 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2677 	 * comes in.
2678 	 */
2679 set_error:
2680 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2681 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2682 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2683 		if (from_peer) {
2684 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2685 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2686 				stcb->sctp_socket->so_error = ECONNREFUSED;
2687 			} else {
2688 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2689 				stcb->sctp_socket->so_error = ECONNRESET;
2690 			}
2691 		} else {
2692 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2693 			stcb->sctp_socket->so_error = ECONNABORTED;
2694 		}
2695 	}
2696 	/* Wake ANY sleepers */
2697 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2698 	so = SCTP_INP_SO(stcb->sctp_ep);
2699 	if (!so_locked) {
2700 		atomic_add_int(&stcb->asoc.refcnt, 1);
2701 		SCTP_TCB_UNLOCK(stcb);
2702 		SCTP_SOCKET_LOCK(so, 1);
2703 		SCTP_TCB_LOCK(stcb);
2704 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2705 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2706 			SCTP_SOCKET_UNLOCK(so, 1);
2707 			return;
2708 		}
2709 	}
2710 #endif
2711 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2712 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2713 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2714 		socantrcvmore(stcb->sctp_socket);
2715 	}
2716 	sorwakeup(stcb->sctp_socket);
2717 	sowwakeup(stcb->sctp_socket);
2718 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2719 	if (!so_locked) {
2720 		SCTP_SOCKET_UNLOCK(so, 1);
2721 	}
2722 #endif
2723 }
2724 
2725 static void
2726 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2727     struct sockaddr *sa, uint32_t error)
2728 {
2729 	struct mbuf *m_notify;
2730 	struct sctp_paddr_change *spc;
2731 	struct sctp_queued_to_read *control;
2732 
2733 	if ((stcb == NULL) ||
2734 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2735 		/* event not enabled */
2736 		return;
2737 	}
2738 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2739 	if (m_notify == NULL)
2740 		return;
2741 	SCTP_BUF_LEN(m_notify) = 0;
2742 	spc = mtod(m_notify, struct sctp_paddr_change *);
2743 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2744 	spc->spc_flags = 0;
2745 	spc->spc_length = sizeof(struct sctp_paddr_change);
2746 	switch (sa->sa_family) {
2747 #ifdef INET
2748 	case AF_INET:
2749 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2750 		break;
2751 #endif
2752 #ifdef INET6
2753 	case AF_INET6:
2754 		{
2755 			struct sockaddr_in6 *sin6;
2756 
2757 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2758 
2759 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2760 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2761 				if (sin6->sin6_scope_id == 0) {
2762 					/* recover scope_id for user */
2763 					(void)sa6_recoverscope(sin6);
2764 				} else {
2765 					/* clear embedded scope_id for user */
2766 					in6_clearscope(&sin6->sin6_addr);
2767 				}
2768 			}
2769 			break;
2770 		}
2771 #endif
2772 	default:
2773 		/* TSNH */
2774 		break;
2775 	}
2776 	spc->spc_state = state;
2777 	spc->spc_error = error;
2778 	spc->spc_assoc_id = sctp_get_associd(stcb);
2779 
2780 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2781 	SCTP_BUF_NEXT(m_notify) = NULL;
2782 
2783 	/* append to socket */
2784 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2785 	    0, 0, stcb->asoc.context, 0, 0, 0,
2786 	    m_notify);
2787 	if (control == NULL) {
2788 		/* no memory */
2789 		sctp_m_freem(m_notify);
2790 		return;
2791 	}
2792 	control->length = SCTP_BUF_LEN(m_notify);
2793 	control->spec_flags = M_NOTIFICATION;
2794 	/* not that we need this */
2795 	control->tail_mbuf = m_notify;
2796 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2797 	    control,
2798 	    &stcb->sctp_socket->so_rcv, 1,
2799 	    SCTP_READ_LOCK_NOT_HELD,
2800 	    SCTP_SO_NOT_LOCKED);
2801 }
2802 
2803 
2804 static void
2805 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2806     struct sctp_tmit_chunk *chk, int so_locked
2807 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2808     SCTP_UNUSED
2809 #endif
2810 )
2811 {
2812 	struct mbuf *m_notify;
2813 	struct sctp_send_failed *ssf;
2814 	struct sctp_send_failed_event *ssfe;
2815 	struct sctp_queued_to_read *control;
2816 	int length;
2817 
2818 	if ((stcb == NULL) ||
2819 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2820 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2821 		/* event not enabled */
2822 		return;
2823 	}
2824 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2825 		length = sizeof(struct sctp_send_failed_event);
2826 	} else {
2827 		length = sizeof(struct sctp_send_failed);
2828 	}
2829 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA);
2830 	if (m_notify == NULL)
2831 		/* no space left */
2832 		return;
2833 	length += chk->send_size;
2834 	length -= sizeof(struct sctp_data_chunk);
2835 	SCTP_BUF_LEN(m_notify) = 0;
2836 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2837 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2838 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2839 		if (sent) {
2840 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2841 		} else {
2842 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2843 		}
2844 		ssfe->ssfe_length = length;
2845 		ssfe->ssfe_error = error;
2846 		/* not exactly what the user sent in, but should be close :) */
2847 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2848 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2849 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2850 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2851 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2852 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2853 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2854 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2855 	} else {
2856 		ssf = mtod(m_notify, struct sctp_send_failed *);
2857 		ssf->ssf_type = SCTP_SEND_FAILED;
2858 		if (sent) {
2859 			ssf->ssf_flags = SCTP_DATA_SENT;
2860 		} else {
2861 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2862 		}
2863 		ssf->ssf_length = length;
2864 		ssf->ssf_error = error;
2865 		/* not exactly what the user sent in, but should be close :) */
2866 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2867 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2868 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2869 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2870 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2871 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2872 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2873 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2874 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2875 	}
2876 	if (chk->data) {
2877 		/*
2878 		 * trim off the sctp chunk header(it should be there)
2879 		 */
2880 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2881 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2882 			sctp_mbuf_crush(chk->data);
2883 			chk->send_size -= sizeof(struct sctp_data_chunk);
2884 		}
2885 	}
2886 	SCTP_BUF_NEXT(m_notify) = chk->data;
2887 	/* Steal off the mbuf */
2888 	chk->data = NULL;
2889 	/*
2890 	 * For this case, we check the actual socket buffer, since the assoc
2891 	 * is going away we don't want to overfill the socket buffer for a
2892 	 * non-reader
2893 	 */
2894 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2895 		sctp_m_freem(m_notify);
2896 		return;
2897 	}
2898 	/* append to socket */
2899 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2900 	    0, 0, stcb->asoc.context, 0, 0, 0,
2901 	    m_notify);
2902 	if (control == NULL) {
2903 		/* no memory */
2904 		sctp_m_freem(m_notify);
2905 		return;
2906 	}
2907 	control->spec_flags = M_NOTIFICATION;
2908 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2909 	    control,
2910 	    &stcb->sctp_socket->so_rcv, 1,
2911 	    SCTP_READ_LOCK_NOT_HELD,
2912 	    so_locked);
2913 }
2914 
2915 
2916 static void
2917 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2918     struct sctp_stream_queue_pending *sp, int so_locked
2919 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2920     SCTP_UNUSED
2921 #endif
2922 )
2923 {
2924 	struct mbuf *m_notify;
2925 	struct sctp_send_failed *ssf;
2926 	struct sctp_send_failed_event *ssfe;
2927 	struct sctp_queued_to_read *control;
2928 	int length;
2929 
2930 	if ((stcb == NULL) ||
2931 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2932 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2933 		/* event not enabled */
2934 		return;
2935 	}
2936 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2937 		length = sizeof(struct sctp_send_failed_event);
2938 	} else {
2939 		length = sizeof(struct sctp_send_failed);
2940 	}
2941 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA);
2942 	if (m_notify == NULL) {
2943 		/* no space left */
2944 		return;
2945 	}
2946 	length += sp->length;
2947 	SCTP_BUF_LEN(m_notify) = 0;
2948 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2949 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2950 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2951 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2952 		ssfe->ssfe_length = length;
2953 		ssfe->ssfe_error = error;
2954 		/* not exactly what the user sent in, but should be close :) */
2955 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2956 		ssfe->ssfe_info.snd_sid = sp->stream;
2957 		if (sp->some_taken) {
2958 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2959 		} else {
2960 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2961 		}
2962 		ssfe->ssfe_info.snd_ppid = sp->ppid;
2963 		ssfe->ssfe_info.snd_context = sp->context;
2964 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2965 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2966 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2967 	} else {
2968 		ssf = mtod(m_notify, struct sctp_send_failed *);
2969 		ssf->ssf_type = SCTP_SEND_FAILED;
2970 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2971 		ssf->ssf_length = length;
2972 		ssf->ssf_error = error;
2973 		/* not exactly what the user sent in, but should be close :) */
2974 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2975 		ssf->ssf_info.sinfo_stream = sp->stream;
2976 		ssf->ssf_info.sinfo_ssn = sp->strseq;
2977 		if (sp->some_taken) {
2978 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2979 		} else {
2980 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2981 		}
2982 		ssf->ssf_info.sinfo_ppid = sp->ppid;
2983 		ssf->ssf_info.sinfo_context = sp->context;
2984 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2985 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2986 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2987 	}
2988 	SCTP_BUF_NEXT(m_notify) = sp->data;
2989 
2990 	/* Steal off the mbuf */
2991 	sp->data = NULL;
2992 	/*
2993 	 * For this case, we check the actual socket buffer, since the assoc
2994 	 * is going away we don't want to overfill the socket buffer for a
2995 	 * non-reader
2996 	 */
2997 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2998 		sctp_m_freem(m_notify);
2999 		return;
3000 	}
3001 	/* append to socket */
3002 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3003 	    0, 0, stcb->asoc.context, 0, 0, 0,
3004 	    m_notify);
3005 	if (control == NULL) {
3006 		/* no memory */
3007 		sctp_m_freem(m_notify);
3008 		return;
3009 	}
3010 	control->spec_flags = M_NOTIFICATION;
3011 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3012 	    control,
3013 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3014 }
3015 
3016 
3017 
3018 static void
3019 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3020 {
3021 	struct mbuf *m_notify;
3022 	struct sctp_adaptation_event *sai;
3023 	struct sctp_queued_to_read *control;
3024 
3025 	if ((stcb == NULL) ||
3026 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3027 		/* event not enabled */
3028 		return;
3029 	}
3030 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3031 	if (m_notify == NULL)
3032 		/* no space left */
3033 		return;
3034 	SCTP_BUF_LEN(m_notify) = 0;
3035 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3036 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3037 	sai->sai_flags = 0;
3038 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3039 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3040 	sai->sai_assoc_id = sctp_get_associd(stcb);
3041 
3042 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3043 	SCTP_BUF_NEXT(m_notify) = NULL;
3044 
3045 	/* append to socket */
3046 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3047 	    0, 0, stcb->asoc.context, 0, 0, 0,
3048 	    m_notify);
3049 	if (control == NULL) {
3050 		/* no memory */
3051 		sctp_m_freem(m_notify);
3052 		return;
3053 	}
3054 	control->length = SCTP_BUF_LEN(m_notify);
3055 	control->spec_flags = M_NOTIFICATION;
3056 	/* not that we need this */
3057 	control->tail_mbuf = m_notify;
3058 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3059 	    control,
3060 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3061 }
3062 
3063 /* This always must be called with the read-queue LOCKED in the INP */
3064 static void
3065 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3066     uint32_t val, int so_locked
3067 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3068     SCTP_UNUSED
3069 #endif
3070 )
3071 {
3072 	struct mbuf *m_notify;
3073 	struct sctp_pdapi_event *pdapi;
3074 	struct sctp_queued_to_read *control;
3075 	struct sockbuf *sb;
3076 
3077 	if ((stcb == NULL) ||
3078 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3079 		/* event not enabled */
3080 		return;
3081 	}
3082 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3083 		return;
3084 	}
3085 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3086 	if (m_notify == NULL)
3087 		/* no space left */
3088 		return;
3089 	SCTP_BUF_LEN(m_notify) = 0;
3090 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3091 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3092 	pdapi->pdapi_flags = 0;
3093 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3094 	pdapi->pdapi_indication = error;
3095 	pdapi->pdapi_stream = (val >> 16);
3096 	pdapi->pdapi_seq = (val & 0x0000ffff);
3097 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3098 
3099 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3100 	SCTP_BUF_NEXT(m_notify) = NULL;
3101 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3102 	    0, 0, stcb->asoc.context, 0, 0, 0,
3103 	    m_notify);
3104 	if (control == NULL) {
3105 		/* no memory */
3106 		sctp_m_freem(m_notify);
3107 		return;
3108 	}
3109 	control->spec_flags = M_NOTIFICATION;
3110 	control->length = SCTP_BUF_LEN(m_notify);
3111 	/* not that we need this */
3112 	control->tail_mbuf = m_notify;
3113 	control->held_length = 0;
3114 	control->length = 0;
3115 	sb = &stcb->sctp_socket->so_rcv;
3116 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3117 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3118 	}
3119 	sctp_sballoc(stcb, sb, m_notify);
3120 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3121 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3122 	}
3123 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3124 	control->end_added = 1;
3125 	if (stcb->asoc.control_pdapi)
3126 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3127 	else {
3128 		/* we really should not see this case */
3129 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3130 	}
3131 	if (stcb->sctp_ep && stcb->sctp_socket) {
3132 		/* This should always be the case */
3133 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3134 		struct socket *so;
3135 
3136 		so = SCTP_INP_SO(stcb->sctp_ep);
3137 		if (!so_locked) {
3138 			atomic_add_int(&stcb->asoc.refcnt, 1);
3139 			SCTP_TCB_UNLOCK(stcb);
3140 			SCTP_SOCKET_LOCK(so, 1);
3141 			SCTP_TCB_LOCK(stcb);
3142 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3143 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3144 				SCTP_SOCKET_UNLOCK(so, 1);
3145 				return;
3146 			}
3147 		}
3148 #endif
3149 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3150 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3151 		if (!so_locked) {
3152 			SCTP_SOCKET_UNLOCK(so, 1);
3153 		}
3154 #endif
3155 	}
3156 }
3157 
3158 static void
3159 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3160 {
3161 	struct mbuf *m_notify;
3162 	struct sctp_shutdown_event *sse;
3163 	struct sctp_queued_to_read *control;
3164 
3165 	/*
3166 	 * For TCP model AND UDP connected sockets we will send an error up
3167 	 * when an SHUTDOWN completes
3168 	 */
3169 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3170 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3171 		/* mark socket closed for read/write and wakeup! */
3172 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3173 		struct socket *so;
3174 
3175 		so = SCTP_INP_SO(stcb->sctp_ep);
3176 		atomic_add_int(&stcb->asoc.refcnt, 1);
3177 		SCTP_TCB_UNLOCK(stcb);
3178 		SCTP_SOCKET_LOCK(so, 1);
3179 		SCTP_TCB_LOCK(stcb);
3180 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3181 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3182 			SCTP_SOCKET_UNLOCK(so, 1);
3183 			return;
3184 		}
3185 #endif
3186 		socantsendmore(stcb->sctp_socket);
3187 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3188 		SCTP_SOCKET_UNLOCK(so, 1);
3189 #endif
3190 	}
3191 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3192 		/* event not enabled */
3193 		return;
3194 	}
3195 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3196 	if (m_notify == NULL)
3197 		/* no space left */
3198 		return;
3199 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3200 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3201 	sse->sse_flags = 0;
3202 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3203 	sse->sse_assoc_id = sctp_get_associd(stcb);
3204 
3205 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3206 	SCTP_BUF_NEXT(m_notify) = NULL;
3207 
3208 	/* append to socket */
3209 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3210 	    0, 0, stcb->asoc.context, 0, 0, 0,
3211 	    m_notify);
3212 	if (control == NULL) {
3213 		/* no memory */
3214 		sctp_m_freem(m_notify);
3215 		return;
3216 	}
3217 	control->spec_flags = M_NOTIFICATION;
3218 	control->length = SCTP_BUF_LEN(m_notify);
3219 	/* not that we need this */
3220 	control->tail_mbuf = m_notify;
3221 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3222 	    control,
3223 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3224 }
3225 
3226 static void
3227 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3228     int so_locked
3229 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3230     SCTP_UNUSED
3231 #endif
3232 )
3233 {
3234 	struct mbuf *m_notify;
3235 	struct sctp_sender_dry_event *event;
3236 	struct sctp_queued_to_read *control;
3237 
3238 	if ((stcb == NULL) ||
3239 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3240 		/* event not enabled */
3241 		return;
3242 	}
3243 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3244 	if (m_notify == NULL) {
3245 		/* no space left */
3246 		return;
3247 	}
3248 	SCTP_BUF_LEN(m_notify) = 0;
3249 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3250 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3251 	event->sender_dry_flags = 0;
3252 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3253 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3254 
3255 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3256 	SCTP_BUF_NEXT(m_notify) = NULL;
3257 
3258 	/* append to socket */
3259 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3260 	    0, 0, stcb->asoc.context, 0, 0, 0,
3261 	    m_notify);
3262 	if (control == NULL) {
3263 		/* no memory */
3264 		sctp_m_freem(m_notify);
3265 		return;
3266 	}
3267 	control->length = SCTP_BUF_LEN(m_notify);
3268 	control->spec_flags = M_NOTIFICATION;
3269 	/* not that we need this */
3270 	control->tail_mbuf = m_notify;
3271 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3272 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3273 }
3274 
3275 
3276 void
3277 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3278 {
3279 	struct mbuf *m_notify;
3280 	struct sctp_queued_to_read *control;
3281 	struct sctp_stream_change_event *stradd;
3282 	int len;
3283 
3284 	if ((stcb == NULL) ||
3285 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3286 		/* event not enabled */
3287 		return;
3288 	}
3289 	if ((stcb->asoc.peer_req_out) && flag) {
3290 		/* Peer made the request, don't tell the local user */
3291 		stcb->asoc.peer_req_out = 0;
3292 		return;
3293 	}
3294 	stcb->asoc.peer_req_out = 0;
3295 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3296 	if (m_notify == NULL)
3297 		/* no space left */
3298 		return;
3299 	SCTP_BUF_LEN(m_notify) = 0;
3300 	len = sizeof(struct sctp_stream_change_event);
3301 	if (len > M_TRAILINGSPACE(m_notify)) {
3302 		/* never enough room */
3303 		sctp_m_freem(m_notify);
3304 		return;
3305 	}
3306 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3307 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3308 	stradd->strchange_flags = flag;
3309 	stradd->strchange_length = len;
3310 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3311 	stradd->strchange_instrms = numberin;
3312 	stradd->strchange_outstrms = numberout;
3313 	SCTP_BUF_LEN(m_notify) = len;
3314 	SCTP_BUF_NEXT(m_notify) = NULL;
3315 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3316 		/* no space */
3317 		sctp_m_freem(m_notify);
3318 		return;
3319 	}
3320 	/* append to socket */
3321 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3322 	    0, 0, stcb->asoc.context, 0, 0, 0,
3323 	    m_notify);
3324 	if (control == NULL) {
3325 		/* no memory */
3326 		sctp_m_freem(m_notify);
3327 		return;
3328 	}
3329 	control->spec_flags = M_NOTIFICATION;
3330 	control->length = SCTP_BUF_LEN(m_notify);
3331 	/* not that we need this */
3332 	control->tail_mbuf = m_notify;
3333 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3334 	    control,
3335 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3336 }
3337 
3338 void
3339 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3340 {
3341 	struct mbuf *m_notify;
3342 	struct sctp_queued_to_read *control;
3343 	struct sctp_assoc_reset_event *strasoc;
3344 	int len;
3345 
3346 	if ((stcb == NULL) ||
3347 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3348 		/* event not enabled */
3349 		return;
3350 	}
3351 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3352 	if (m_notify == NULL)
3353 		/* no space left */
3354 		return;
3355 	SCTP_BUF_LEN(m_notify) = 0;
3356 	len = sizeof(struct sctp_assoc_reset_event);
3357 	if (len > M_TRAILINGSPACE(m_notify)) {
3358 		/* never enough room */
3359 		sctp_m_freem(m_notify);
3360 		return;
3361 	}
3362 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3363 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3364 	strasoc->assocreset_flags = flag;
3365 	strasoc->assocreset_length = len;
3366 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3367 	strasoc->assocreset_local_tsn = sending_tsn;
3368 	strasoc->assocreset_remote_tsn = recv_tsn;
3369 	SCTP_BUF_LEN(m_notify) = len;
3370 	SCTP_BUF_NEXT(m_notify) = NULL;
3371 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3372 		/* no space */
3373 		sctp_m_freem(m_notify);
3374 		return;
3375 	}
3376 	/* append to socket */
3377 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3378 	    0, 0, stcb->asoc.context, 0, 0, 0,
3379 	    m_notify);
3380 	if (control == NULL) {
3381 		/* no memory */
3382 		sctp_m_freem(m_notify);
3383 		return;
3384 	}
3385 	control->spec_flags = M_NOTIFICATION;
3386 	control->length = SCTP_BUF_LEN(m_notify);
3387 	/* not that we need this */
3388 	control->tail_mbuf = m_notify;
3389 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3390 	    control,
3391 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3392 }
3393 
3394 
3395 
3396 static void
3397 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3398     int number_entries, uint16_t * list, int flag)
3399 {
3400 	struct mbuf *m_notify;
3401 	struct sctp_queued_to_read *control;
3402 	struct sctp_stream_reset_event *strreset;
3403 	int len;
3404 
3405 	if ((stcb == NULL) ||
3406 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3407 		/* event not enabled */
3408 		return;
3409 	}
3410 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3411 	if (m_notify == NULL)
3412 		/* no space left */
3413 		return;
3414 	SCTP_BUF_LEN(m_notify) = 0;
3415 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3416 	if (len > M_TRAILINGSPACE(m_notify)) {
3417 		/* never enough room */
3418 		sctp_m_freem(m_notify);
3419 		return;
3420 	}
3421 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3422 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3423 	strreset->strreset_flags = flag;
3424 	strreset->strreset_length = len;
3425 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3426 	if (number_entries) {
3427 		int i;
3428 
3429 		for (i = 0; i < number_entries; i++) {
3430 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3431 		}
3432 	}
3433 	SCTP_BUF_LEN(m_notify) = len;
3434 	SCTP_BUF_NEXT(m_notify) = NULL;
3435 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3436 		/* no space */
3437 		sctp_m_freem(m_notify);
3438 		return;
3439 	}
3440 	/* append to socket */
3441 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3442 	    0, 0, stcb->asoc.context, 0, 0, 0,
3443 	    m_notify);
3444 	if (control == NULL) {
3445 		/* no memory */
3446 		sctp_m_freem(m_notify);
3447 		return;
3448 	}
3449 	control->spec_flags = M_NOTIFICATION;
3450 	control->length = SCTP_BUF_LEN(m_notify);
3451 	/* not that we need this */
3452 	control->tail_mbuf = m_notify;
3453 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3454 	    control,
3455 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3456 }
3457 
3458 
3459 static void
3460 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3461 {
3462 	struct mbuf *m_notify;
3463 	struct sctp_remote_error *sre;
3464 	struct sctp_queued_to_read *control;
3465 	size_t notif_len, chunk_len;
3466 
3467 	if ((stcb == NULL) ||
3468 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3469 		return;
3470 	}
3471 	if (chunk != NULL) {
3472 		chunk_len = htons(chunk->ch.chunk_length);
3473 	} else {
3474 		chunk_len = 0;
3475 	}
3476 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3477 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
3478 	if (m_notify == NULL) {
3479 		/* Retry with smaller value. */
3480 		notif_len = sizeof(struct sctp_remote_error);
3481 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
3482 		if (m_notify == NULL) {
3483 			return;
3484 		}
3485 	}
3486 	SCTP_BUF_NEXT(m_notify) = NULL;
3487 	sre = mtod(m_notify, struct sctp_remote_error *);
3488 	sre->sre_type = SCTP_REMOTE_ERROR;
3489 	sre->sre_flags = 0;
3490 	sre->sre_length = sizeof(struct sctp_remote_error);
3491 	sre->sre_error = error;
3492 	sre->sre_assoc_id = sctp_get_associd(stcb);
3493 	if (notif_len > sizeof(struct sctp_remote_error)) {
3494 		memcpy(sre->sre_data, chunk, chunk_len);
3495 		sre->sre_length += chunk_len;
3496 	}
3497 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3498 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3499 	    0, 0, stcb->asoc.context, 0, 0, 0,
3500 	    m_notify);
3501 	if (control != NULL) {
3502 		control->length = SCTP_BUF_LEN(m_notify);
3503 		/* not that we need this */
3504 		control->tail_mbuf = m_notify;
3505 		control->spec_flags = M_NOTIFICATION;
3506 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3507 		    control,
3508 		    &stcb->sctp_socket->so_rcv, 1,
3509 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3510 	} else {
3511 		sctp_m_freem(m_notify);
3512 	}
3513 }
3514 
3515 
3516 void
3517 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3518     uint32_t error, void *data, int so_locked
3519 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3520     SCTP_UNUSED
3521 #endif
3522 )
3523 {
3524 	if ((stcb == NULL) ||
3525 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3526 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3527 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3528 		/* If the socket is gone we are out of here */
3529 		return;
3530 	}
3531 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3532 		return;
3533 	}
3534 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3535 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3536 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3537 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3538 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3539 			/* Don't report these in front states */
3540 			return;
3541 		}
3542 	}
3543 	switch (notification) {
3544 	case SCTP_NOTIFY_ASSOC_UP:
3545 		if (stcb->asoc.assoc_up_sent == 0) {
3546 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3547 			stcb->asoc.assoc_up_sent = 1;
3548 		}
3549 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3550 			sctp_notify_adaptation_layer(stcb);
3551 		}
3552 		if (stcb->asoc.peer_supports_auth == 0) {
3553 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3554 			    NULL, so_locked);
3555 		}
3556 		break;
3557 	case SCTP_NOTIFY_ASSOC_DOWN:
3558 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3559 		break;
3560 	case SCTP_NOTIFY_INTERFACE_DOWN:
3561 		{
3562 			struct sctp_nets *net;
3563 
3564 			net = (struct sctp_nets *)data;
3565 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3566 			    (struct sockaddr *)&net->ro._l_addr, error);
3567 			break;
3568 		}
3569 	case SCTP_NOTIFY_INTERFACE_UP:
3570 		{
3571 			struct sctp_nets *net;
3572 
3573 			net = (struct sctp_nets *)data;
3574 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3575 			    (struct sockaddr *)&net->ro._l_addr, error);
3576 			break;
3577 		}
3578 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3579 		{
3580 			struct sctp_nets *net;
3581 
3582 			net = (struct sctp_nets *)data;
3583 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3584 			    (struct sockaddr *)&net->ro._l_addr, error);
3585 			break;
3586 		}
3587 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3588 		sctp_notify_send_failed2(stcb, error,
3589 		    (struct sctp_stream_queue_pending *)data, so_locked);
3590 		break;
3591 	case SCTP_NOTIFY_SENT_DG_FAIL:
3592 		sctp_notify_send_failed(stcb, 1, error,
3593 		    (struct sctp_tmit_chunk *)data, so_locked);
3594 		break;
3595 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3596 		sctp_notify_send_failed(stcb, 0, error,
3597 		    (struct sctp_tmit_chunk *)data, so_locked);
3598 		break;
3599 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3600 		{
3601 			uint32_t val;
3602 
3603 			val = *((uint32_t *) data);
3604 
3605 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3606 			break;
3607 		}
3608 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3609 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3610 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3611 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3612 		} else {
3613 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3614 		}
3615 		break;
3616 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3617 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3618 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3619 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3620 		} else {
3621 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3622 		}
3623 		break;
3624 	case SCTP_NOTIFY_ASSOC_RESTART:
3625 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3626 		if (stcb->asoc.peer_supports_auth == 0) {
3627 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3628 			    NULL, so_locked);
3629 		}
3630 		break;
3631 	case SCTP_NOTIFY_STR_RESET_SEND:
3632 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3633 		break;
3634 	case SCTP_NOTIFY_STR_RESET_RECV:
3635 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3636 		break;
3637 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3638 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3639 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3640 		break;
3641 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3642 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3643 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3644 		break;
3645 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3646 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3647 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3648 		break;
3649 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3650 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3651 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3652 		break;
3653 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3654 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3655 		    error);
3656 		break;
3657 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3658 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3659 		    error);
3660 		break;
3661 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3662 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3663 		    error);
3664 		break;
3665 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3666 		sctp_notify_shutdown_event(stcb);
3667 		break;
3668 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3669 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3670 		    (uint16_t) (uintptr_t) data,
3671 		    so_locked);
3672 		break;
3673 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3674 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3675 		    (uint16_t) (uintptr_t) data,
3676 		    so_locked);
3677 		break;
3678 	case SCTP_NOTIFY_NO_PEER_AUTH:
3679 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3680 		    (uint16_t) (uintptr_t) data,
3681 		    so_locked);
3682 		break;
3683 	case SCTP_NOTIFY_SENDER_DRY:
3684 		sctp_notify_sender_dry_event(stcb, so_locked);
3685 		break;
3686 	case SCTP_NOTIFY_REMOTE_ERROR:
3687 		sctp_notify_remote_error(stcb, error, data);
3688 		break;
3689 	default:
3690 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3691 		    __FUNCTION__, notification, notification);
3692 		break;
3693 	}			/* end switch */
3694 }
3695 
3696 void
3697 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3698 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3699     SCTP_UNUSED
3700 #endif
3701 )
3702 {
3703 	struct sctp_association *asoc;
3704 	struct sctp_stream_out *outs;
3705 	struct sctp_tmit_chunk *chk, *nchk;
3706 	struct sctp_stream_queue_pending *sp, *nsp;
3707 	int i;
3708 
3709 	if (stcb == NULL) {
3710 		return;
3711 	}
3712 	asoc = &stcb->asoc;
3713 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3714 		/* already being freed */
3715 		return;
3716 	}
3717 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3718 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3719 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3720 		return;
3721 	}
3722 	/* now through all the gunk freeing chunks */
3723 	if (holds_lock == 0) {
3724 		SCTP_TCB_SEND_LOCK(stcb);
3725 	}
3726 	/* sent queue SHOULD be empty */
3727 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3728 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3729 		asoc->sent_queue_cnt--;
3730 		if (chk->data != NULL) {
3731 			sctp_free_bufspace(stcb, asoc, chk, 1);
3732 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3733 			    error, chk, so_locked);
3734 			if (chk->data) {
3735 				sctp_m_freem(chk->data);
3736 				chk->data = NULL;
3737 			}
3738 		}
3739 		sctp_free_a_chunk(stcb, chk, so_locked);
3740 		/* sa_ignore FREED_MEMORY */
3741 	}
3742 	/* pending send queue SHOULD be empty */
3743 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3744 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3745 		asoc->send_queue_cnt--;
3746 		if (chk->data != NULL) {
3747 			sctp_free_bufspace(stcb, asoc, chk, 1);
3748 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3749 			    error, chk, so_locked);
3750 			if (chk->data) {
3751 				sctp_m_freem(chk->data);
3752 				chk->data = NULL;
3753 			}
3754 		}
3755 		sctp_free_a_chunk(stcb, chk, so_locked);
3756 		/* sa_ignore FREED_MEMORY */
3757 	}
3758 	for (i = 0; i < asoc->streamoutcnt; i++) {
3759 		/* For each stream */
3760 		outs = &asoc->strmout[i];
3761 		/* clean up any sends there */
3762 		asoc->locked_on_sending = NULL;
3763 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3764 			asoc->stream_queue_cnt--;
3765 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3766 			sctp_free_spbufspace(stcb, asoc, sp);
3767 			if (sp->data) {
3768 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3769 				    error, (void *)sp, so_locked);
3770 				if (sp->data) {
3771 					sctp_m_freem(sp->data);
3772 					sp->data = NULL;
3773 					sp->tail_mbuf = NULL;
3774 					sp->length = 0;
3775 				}
3776 			}
3777 			if (sp->net) {
3778 				sctp_free_remote_addr(sp->net);
3779 				sp->net = NULL;
3780 			}
3781 			/* Free the chunk */
3782 			sctp_free_a_strmoq(stcb, sp, so_locked);
3783 			/* sa_ignore FREED_MEMORY */
3784 		}
3785 	}
3786 
3787 	if (holds_lock == 0) {
3788 		SCTP_TCB_SEND_UNLOCK(stcb);
3789 	}
3790 }
3791 
3792 void
3793 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3794     struct sctp_abort_chunk *abort, int so_locked
3795 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3796     SCTP_UNUSED
3797 #endif
3798 )
3799 {
3800 	if (stcb == NULL) {
3801 		return;
3802 	}
3803 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3804 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3805 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3806 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3807 	}
3808 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3809 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3810 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3811 		return;
3812 	}
3813 	/* Tell them we lost the asoc */
3814 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3815 	if (from_peer) {
3816 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3817 	} else {
3818 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3819 	}
3820 }
3821 
3822 void
3823 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3824     struct mbuf *m, int iphlen,
3825     struct sockaddr *src, struct sockaddr *dst,
3826     struct sctphdr *sh, struct mbuf *op_err,
3827     uint8_t use_mflowid, uint32_t mflowid,
3828     uint32_t vrf_id, uint16_t port)
3829 {
3830 	uint32_t vtag;
3831 
3832 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3833 	struct socket *so;
3834 
3835 #endif
3836 
3837 	vtag = 0;
3838 	if (stcb != NULL) {
3839 		/* We have a TCB to abort, send notification too */
3840 		vtag = stcb->asoc.peer_vtag;
3841 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3842 		/* get the assoc vrf id and table id */
3843 		vrf_id = stcb->asoc.vrf_id;
3844 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3845 	}
3846 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3847 	    use_mflowid, mflowid,
3848 	    vrf_id, port);
3849 	if (stcb != NULL) {
3850 		/* Ok, now lets free it */
3851 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3852 		so = SCTP_INP_SO(inp);
3853 		atomic_add_int(&stcb->asoc.refcnt, 1);
3854 		SCTP_TCB_UNLOCK(stcb);
3855 		SCTP_SOCKET_LOCK(so, 1);
3856 		SCTP_TCB_LOCK(stcb);
3857 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3858 #endif
3859 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3860 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3861 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3862 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3863 		}
3864 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3865 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3866 		SCTP_SOCKET_UNLOCK(so, 1);
3867 #endif
3868 	}
3869 }
3870 
3871 #ifdef SCTP_ASOCLOG_OF_TSNS
3872 void
3873 sctp_print_out_track_log(struct sctp_tcb *stcb)
3874 {
3875 #ifdef NOSIY_PRINTS
3876 	int i;
3877 
3878 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3879 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3880 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3881 		SCTP_PRINTF("None rcvd\n");
3882 		goto none_in;
3883 	}
3884 	if (stcb->asoc.tsn_in_wrapped) {
3885 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3886 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3887 			    stcb->asoc.in_tsnlog[i].tsn,
3888 			    stcb->asoc.in_tsnlog[i].strm,
3889 			    stcb->asoc.in_tsnlog[i].seq,
3890 			    stcb->asoc.in_tsnlog[i].flgs,
3891 			    stcb->asoc.in_tsnlog[i].sz);
3892 		}
3893 	}
3894 	if (stcb->asoc.tsn_in_at) {
3895 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3896 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3897 			    stcb->asoc.in_tsnlog[i].tsn,
3898 			    stcb->asoc.in_tsnlog[i].strm,
3899 			    stcb->asoc.in_tsnlog[i].seq,
3900 			    stcb->asoc.in_tsnlog[i].flgs,
3901 			    stcb->asoc.in_tsnlog[i].sz);
3902 		}
3903 	}
3904 none_in:
3905 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3906 	if ((stcb->asoc.tsn_out_at == 0) &&
3907 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3908 		SCTP_PRINTF("None sent\n");
3909 	}
3910 	if (stcb->asoc.tsn_out_wrapped) {
3911 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3912 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3913 			    stcb->asoc.out_tsnlog[i].tsn,
3914 			    stcb->asoc.out_tsnlog[i].strm,
3915 			    stcb->asoc.out_tsnlog[i].seq,
3916 			    stcb->asoc.out_tsnlog[i].flgs,
3917 			    stcb->asoc.out_tsnlog[i].sz);
3918 		}
3919 	}
3920 	if (stcb->asoc.tsn_out_at) {
3921 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3922 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3923 			    stcb->asoc.out_tsnlog[i].tsn,
3924 			    stcb->asoc.out_tsnlog[i].strm,
3925 			    stcb->asoc.out_tsnlog[i].seq,
3926 			    stcb->asoc.out_tsnlog[i].flgs,
3927 			    stcb->asoc.out_tsnlog[i].sz);
3928 		}
3929 	}
3930 #endif
3931 }
3932 
3933 #endif
3934 
3935 void
3936 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3937     struct mbuf *op_err,
3938     int so_locked
3939 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3940     SCTP_UNUSED
3941 #endif
3942 )
3943 {
3944 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3945 	struct socket *so;
3946 
3947 #endif
3948 
3949 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3950 	so = SCTP_INP_SO(inp);
3951 #endif
3952 	if (stcb == NULL) {
3953 		/* Got to have a TCB */
3954 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3955 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3956 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3957 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3958 			}
3959 		}
3960 		return;
3961 	} else {
3962 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3963 	}
3964 	/* notify the ulp */
3965 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
3966 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
3967 	}
3968 	/* notify the peer */
3969 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3970 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3971 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3972 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3973 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3974 	}
3975 	/* now free the asoc */
3976 #ifdef SCTP_ASOCLOG_OF_TSNS
3977 	sctp_print_out_track_log(stcb);
3978 #endif
3979 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3980 	if (!so_locked) {
3981 		atomic_add_int(&stcb->asoc.refcnt, 1);
3982 		SCTP_TCB_UNLOCK(stcb);
3983 		SCTP_SOCKET_LOCK(so, 1);
3984 		SCTP_TCB_LOCK(stcb);
3985 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3986 	}
3987 #endif
3988 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3989 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3990 	if (!so_locked) {
3991 		SCTP_SOCKET_UNLOCK(so, 1);
3992 	}
3993 #endif
3994 }
3995 
3996 void
3997 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
3998     struct sockaddr *src, struct sockaddr *dst,
3999     struct sctphdr *sh, struct sctp_inpcb *inp,
4000     uint8_t use_mflowid, uint32_t mflowid,
4001     uint32_t vrf_id, uint16_t port)
4002 {
4003 	struct sctp_chunkhdr *ch, chunk_buf;
4004 	unsigned int chk_length;
4005 	int contains_init_chunk;
4006 
4007 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4008 	/* Generate a TO address for future reference */
4009 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4010 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
4011 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4012 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4013 		}
4014 	}
4015 	contains_init_chunk = 0;
4016 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4017 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4018 	while (ch != NULL) {
4019 		chk_length = ntohs(ch->chunk_length);
4020 		if (chk_length < sizeof(*ch)) {
4021 			/* break to abort land */
4022 			break;
4023 		}
4024 		switch (ch->chunk_type) {
4025 		case SCTP_INIT:
4026 			contains_init_chunk = 1;
4027 			break;
4028 		case SCTP_COOKIE_ECHO:
4029 			/* We hit here only if the assoc is being freed */
4030 			return;
4031 		case SCTP_PACKET_DROPPED:
4032 			/* we don't respond to pkt-dropped */
4033 			return;
4034 		case SCTP_ABORT_ASSOCIATION:
4035 			/* we don't respond with an ABORT to an ABORT */
4036 			return;
4037 		case SCTP_SHUTDOWN_COMPLETE:
4038 			/*
4039 			 * we ignore it since we are not waiting for it and
4040 			 * peer is gone
4041 			 */
4042 			return;
4043 		case SCTP_SHUTDOWN_ACK:
4044 			sctp_send_shutdown_complete2(src, dst, sh,
4045 			    use_mflowid, mflowid,
4046 			    vrf_id, port);
4047 			return;
4048 		default:
4049 			break;
4050 		}
4051 		offset += SCTP_SIZE32(chk_length);
4052 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4053 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4054 	}
4055 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4056 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4057 	    (contains_init_chunk == 0))) {
4058 		sctp_send_abort(m, iphlen, src, dst, sh, 0, NULL,
4059 		    use_mflowid, mflowid,
4060 		    vrf_id, port);
4061 	}
4062 }
4063 
4064 /*
4065  * check the inbound datagram to make sure there is not an abort inside it,
4066  * if there is return 1, else return 0.
4067  */
4068 int
4069 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4070 {
4071 	struct sctp_chunkhdr *ch;
4072 	struct sctp_init_chunk *init_chk, chunk_buf;
4073 	int offset;
4074 	unsigned int chk_length;
4075 
4076 	offset = iphlen + sizeof(struct sctphdr);
4077 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4078 	    (uint8_t *) & chunk_buf);
4079 	while (ch != NULL) {
4080 		chk_length = ntohs(ch->chunk_length);
4081 		if (chk_length < sizeof(*ch)) {
4082 			/* packet is probably corrupt */
4083 			break;
4084 		}
4085 		/* we seem to be ok, is it an abort? */
4086 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4087 			/* yep, tell them */
4088 			return (1);
4089 		}
4090 		if (ch->chunk_type == SCTP_INITIATION) {
4091 			/* need to update the Vtag */
4092 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4093 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4094 			if (init_chk != NULL) {
4095 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4096 			}
4097 		}
4098 		/* Nope, move to the next chunk */
4099 		offset += SCTP_SIZE32(chk_length);
4100 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4101 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4102 	}
4103 	return (0);
4104 }
4105 
4106 /*
4107  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4108  * set (i.e. it's 0) so, create this function to compare link local scopes
4109  */
4110 #ifdef INET6
4111 uint32_t
4112 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4113 {
4114 	struct sockaddr_in6 a, b;
4115 
4116 	/* save copies */
4117 	a = *addr1;
4118 	b = *addr2;
4119 
4120 	if (a.sin6_scope_id == 0)
4121 		if (sa6_recoverscope(&a)) {
4122 			/* can't get scope, so can't match */
4123 			return (0);
4124 		}
4125 	if (b.sin6_scope_id == 0)
4126 		if (sa6_recoverscope(&b)) {
4127 			/* can't get scope, so can't match */
4128 			return (0);
4129 		}
4130 	if (a.sin6_scope_id != b.sin6_scope_id)
4131 		return (0);
4132 
4133 	return (1);
4134 }
4135 
4136 /*
4137  * returns a sockaddr_in6 with embedded scope recovered and removed
4138  */
4139 struct sockaddr_in6 *
4140 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4141 {
4142 	/* check and strip embedded scope junk */
4143 	if (addr->sin6_family == AF_INET6) {
4144 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4145 			if (addr->sin6_scope_id == 0) {
4146 				*store = *addr;
4147 				if (!sa6_recoverscope(store)) {
4148 					/* use the recovered scope */
4149 					addr = store;
4150 				}
4151 			} else {
4152 				/* else, return the original "to" addr */
4153 				in6_clearscope(&addr->sin6_addr);
4154 			}
4155 		}
4156 	}
4157 	return (addr);
4158 }
4159 
4160 #endif
4161 
4162 /*
4163  * are the two addresses the same?  currently a "scopeless" check returns: 1
4164  * if same, 0 if not
4165  */
4166 int
4167 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4168 {
4169 
4170 	/* must be valid */
4171 	if (sa1 == NULL || sa2 == NULL)
4172 		return (0);
4173 
4174 	/* must be the same family */
4175 	if (sa1->sa_family != sa2->sa_family)
4176 		return (0);
4177 
4178 	switch (sa1->sa_family) {
4179 #ifdef INET6
4180 	case AF_INET6:
4181 		{
4182 			/* IPv6 addresses */
4183 			struct sockaddr_in6 *sin6_1, *sin6_2;
4184 
4185 			sin6_1 = (struct sockaddr_in6 *)sa1;
4186 			sin6_2 = (struct sockaddr_in6 *)sa2;
4187 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4188 			    sin6_2));
4189 		}
4190 #endif
4191 #ifdef INET
4192 	case AF_INET:
4193 		{
4194 			/* IPv4 addresses */
4195 			struct sockaddr_in *sin_1, *sin_2;
4196 
4197 			sin_1 = (struct sockaddr_in *)sa1;
4198 			sin_2 = (struct sockaddr_in *)sa2;
4199 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4200 		}
4201 #endif
4202 	default:
4203 		/* we don't do these... */
4204 		return (0);
4205 	}
4206 }
4207 
4208 void
4209 sctp_print_address(struct sockaddr *sa)
4210 {
4211 #ifdef INET6
4212 	char ip6buf[INET6_ADDRSTRLEN];
4213 
4214 #endif
4215 
4216 	switch (sa->sa_family) {
4217 #ifdef INET6
4218 	case AF_INET6:
4219 		{
4220 			struct sockaddr_in6 *sin6;
4221 
4222 			sin6 = (struct sockaddr_in6 *)sa;
4223 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4224 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4225 			    ntohs(sin6->sin6_port),
4226 			    sin6->sin6_scope_id);
4227 			break;
4228 		}
4229 #endif
4230 #ifdef INET
4231 	case AF_INET:
4232 		{
4233 			struct sockaddr_in *sin;
4234 			unsigned char *p;
4235 
4236 			sin = (struct sockaddr_in *)sa;
4237 			p = (unsigned char *)&sin->sin_addr;
4238 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4239 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4240 			break;
4241 		}
4242 #endif
4243 	default:
4244 		SCTP_PRINTF("?\n");
4245 		break;
4246 	}
4247 }
4248 
4249 void
4250 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4251     struct sctp_inpcb *new_inp,
4252     struct sctp_tcb *stcb,
4253     int waitflags)
4254 {
4255 	/*
4256 	 * go through our old INP and pull off any control structures that
4257 	 * belong to stcb and move then to the new inp.
4258 	 */
4259 	struct socket *old_so, *new_so;
4260 	struct sctp_queued_to_read *control, *nctl;
4261 	struct sctp_readhead tmp_queue;
4262 	struct mbuf *m;
4263 	int error = 0;
4264 
4265 	old_so = old_inp->sctp_socket;
4266 	new_so = new_inp->sctp_socket;
4267 	TAILQ_INIT(&tmp_queue);
4268 	error = sblock(&old_so->so_rcv, waitflags);
4269 	if (error) {
4270 		/*
4271 		 * Gak, can't get sblock, we have a problem. data will be
4272 		 * left stranded.. and we don't dare look at it since the
4273 		 * other thread may be reading something. Oh well, its a
4274 		 * screwed up app that does a peeloff OR a accept while
4275 		 * reading from the main socket... actually its only the
4276 		 * peeloff() case, since I think read will fail on a
4277 		 * listening socket..
4278 		 */
4279 		return;
4280 	}
4281 	/* lock the socket buffers */
4282 	SCTP_INP_READ_LOCK(old_inp);
4283 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4284 		/* Pull off all for out target stcb */
4285 		if (control->stcb == stcb) {
4286 			/* remove it we want it */
4287 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4288 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4289 			m = control->data;
4290 			while (m) {
4291 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4292 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4293 				}
4294 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4295 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4296 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4297 				}
4298 				m = SCTP_BUF_NEXT(m);
4299 			}
4300 		}
4301 	}
4302 	SCTP_INP_READ_UNLOCK(old_inp);
4303 	/* Remove the sb-lock on the old socket */
4304 
4305 	sbunlock(&old_so->so_rcv);
4306 	/* Now we move them over to the new socket buffer */
4307 	SCTP_INP_READ_LOCK(new_inp);
4308 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4309 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4310 		m = control->data;
4311 		while (m) {
4312 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4313 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4314 			}
4315 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4316 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4317 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4318 			}
4319 			m = SCTP_BUF_NEXT(m);
4320 		}
4321 	}
4322 	SCTP_INP_READ_UNLOCK(new_inp);
4323 }
4324 
4325 void
4326 sctp_add_to_readq(struct sctp_inpcb *inp,
4327     struct sctp_tcb *stcb,
4328     struct sctp_queued_to_read *control,
4329     struct sockbuf *sb,
4330     int end,
4331     int inp_read_lock_held,
4332     int so_locked
4333 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4334     SCTP_UNUSED
4335 #endif
4336 )
4337 {
4338 	/*
4339 	 * Here we must place the control on the end of the socket read
4340 	 * queue AND increment sb_cc so that select will work properly on
4341 	 * read.
4342 	 */
4343 	struct mbuf *m, *prev = NULL;
4344 
4345 	if (inp == NULL) {
4346 		/* Gak, TSNH!! */
4347 #ifdef INVARIANTS
4348 		panic("Gak, inp NULL on add_to_readq");
4349 #endif
4350 		return;
4351 	}
4352 	if (inp_read_lock_held == 0)
4353 		SCTP_INP_READ_LOCK(inp);
4354 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4355 		sctp_free_remote_addr(control->whoFrom);
4356 		if (control->data) {
4357 			sctp_m_freem(control->data);
4358 			control->data = NULL;
4359 		}
4360 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4361 		if (inp_read_lock_held == 0)
4362 			SCTP_INP_READ_UNLOCK(inp);
4363 		return;
4364 	}
4365 	if (!(control->spec_flags & M_NOTIFICATION)) {
4366 		atomic_add_int(&inp->total_recvs, 1);
4367 		if (!control->do_not_ref_stcb) {
4368 			atomic_add_int(&stcb->total_recvs, 1);
4369 		}
4370 	}
4371 	m = control->data;
4372 	control->held_length = 0;
4373 	control->length = 0;
4374 	while (m) {
4375 		if (SCTP_BUF_LEN(m) == 0) {
4376 			/* Skip mbufs with NO length */
4377 			if (prev == NULL) {
4378 				/* First one */
4379 				control->data = sctp_m_free(m);
4380 				m = control->data;
4381 			} else {
4382 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4383 				m = SCTP_BUF_NEXT(prev);
4384 			}
4385 			if (m == NULL) {
4386 				control->tail_mbuf = prev;
4387 			}
4388 			continue;
4389 		}
4390 		prev = m;
4391 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4392 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4393 		}
4394 		sctp_sballoc(stcb, sb, m);
4395 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4396 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4397 		}
4398 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4399 		m = SCTP_BUF_NEXT(m);
4400 	}
4401 	if (prev != NULL) {
4402 		control->tail_mbuf = prev;
4403 	} else {
4404 		/* Everything got collapsed out?? */
4405 		sctp_free_remote_addr(control->whoFrom);
4406 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4407 		if (inp_read_lock_held == 0)
4408 			SCTP_INP_READ_UNLOCK(inp);
4409 		return;
4410 	}
4411 	if (end) {
4412 		control->end_added = 1;
4413 	}
4414 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4415 	if (inp_read_lock_held == 0)
4416 		SCTP_INP_READ_UNLOCK(inp);
4417 	if (inp && inp->sctp_socket) {
4418 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4419 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4420 		} else {
4421 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4422 			struct socket *so;
4423 
4424 			so = SCTP_INP_SO(inp);
4425 			if (!so_locked) {
4426 				if (stcb) {
4427 					atomic_add_int(&stcb->asoc.refcnt, 1);
4428 					SCTP_TCB_UNLOCK(stcb);
4429 				}
4430 				SCTP_SOCKET_LOCK(so, 1);
4431 				if (stcb) {
4432 					SCTP_TCB_LOCK(stcb);
4433 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4434 				}
4435 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4436 					SCTP_SOCKET_UNLOCK(so, 1);
4437 					return;
4438 				}
4439 			}
4440 #endif
4441 			sctp_sorwakeup(inp, inp->sctp_socket);
4442 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4443 			if (!so_locked) {
4444 				SCTP_SOCKET_UNLOCK(so, 1);
4445 			}
4446 #endif
4447 		}
4448 	}
4449 }
4450 
4451 
4452 int
4453 sctp_append_to_readq(struct sctp_inpcb *inp,
4454     struct sctp_tcb *stcb,
4455     struct sctp_queued_to_read *control,
4456     struct mbuf *m,
4457     int end,
4458     int ctls_cumack,
4459     struct sockbuf *sb)
4460 {
4461 	/*
4462 	 * A partial delivery API event is underway. OR we are appending on
4463 	 * the reassembly queue.
4464 	 *
4465 	 * If PDAPI this means we need to add m to the end of the data.
4466 	 * Increase the length in the control AND increment the sb_cc.
4467 	 * Otherwise sb is NULL and all we need to do is put it at the end
4468 	 * of the mbuf chain.
4469 	 */
4470 	int len = 0;
4471 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4472 
4473 	if (inp) {
4474 		SCTP_INP_READ_LOCK(inp);
4475 	}
4476 	if (control == NULL) {
4477 get_out:
4478 		if (inp) {
4479 			SCTP_INP_READ_UNLOCK(inp);
4480 		}
4481 		return (-1);
4482 	}
4483 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4484 		SCTP_INP_READ_UNLOCK(inp);
4485 		return (0);
4486 	}
4487 	if (control->end_added) {
4488 		/* huh this one is complete? */
4489 		goto get_out;
4490 	}
4491 	mm = m;
4492 	if (mm == NULL) {
4493 		goto get_out;
4494 	}
4495 	while (mm) {
4496 		if (SCTP_BUF_LEN(mm) == 0) {
4497 			/* Skip mbufs with NO lenght */
4498 			if (prev == NULL) {
4499 				/* First one */
4500 				m = sctp_m_free(mm);
4501 				mm = m;
4502 			} else {
4503 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4504 				mm = SCTP_BUF_NEXT(prev);
4505 			}
4506 			continue;
4507 		}
4508 		prev = mm;
4509 		len += SCTP_BUF_LEN(mm);
4510 		if (sb) {
4511 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4512 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4513 			}
4514 			sctp_sballoc(stcb, sb, mm);
4515 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4516 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4517 			}
4518 		}
4519 		mm = SCTP_BUF_NEXT(mm);
4520 	}
4521 	if (prev) {
4522 		tail = prev;
4523 	} else {
4524 		/* Really there should always be a prev */
4525 		if (m == NULL) {
4526 			/* Huh nothing left? */
4527 #ifdef INVARIANTS
4528 			panic("Nothing left to add?");
4529 #else
4530 			goto get_out;
4531 #endif
4532 		}
4533 		tail = m;
4534 	}
4535 	if (control->tail_mbuf) {
4536 		/* append */
4537 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4538 		control->tail_mbuf = tail;
4539 	} else {
4540 		/* nothing there */
4541 #ifdef INVARIANTS
4542 		if (control->data != NULL) {
4543 			panic("This should NOT happen");
4544 		}
4545 #endif
4546 		control->data = m;
4547 		control->tail_mbuf = tail;
4548 	}
4549 	atomic_add_int(&control->length, len);
4550 	if (end) {
4551 		/* message is complete */
4552 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4553 			stcb->asoc.control_pdapi = NULL;
4554 		}
4555 		control->held_length = 0;
4556 		control->end_added = 1;
4557 	}
4558 	if (stcb == NULL) {
4559 		control->do_not_ref_stcb = 1;
4560 	}
4561 	/*
4562 	 * When we are appending in partial delivery, the cum-ack is used
4563 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4564 	 * is populated in the outbound sinfo structure from the true cumack
4565 	 * if the association exists...
4566 	 */
4567 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4568 	if (inp) {
4569 		SCTP_INP_READ_UNLOCK(inp);
4570 	}
4571 	if (inp && inp->sctp_socket) {
4572 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4573 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4574 		} else {
4575 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4576 			struct socket *so;
4577 
4578 			so = SCTP_INP_SO(inp);
4579 			if (stcb) {
4580 				atomic_add_int(&stcb->asoc.refcnt, 1);
4581 				SCTP_TCB_UNLOCK(stcb);
4582 			}
4583 			SCTP_SOCKET_LOCK(so, 1);
4584 			if (stcb) {
4585 				SCTP_TCB_LOCK(stcb);
4586 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4587 			}
4588 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4589 				SCTP_SOCKET_UNLOCK(so, 1);
4590 				return (0);
4591 			}
4592 #endif
4593 			sctp_sorwakeup(inp, inp->sctp_socket);
4594 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4595 			SCTP_SOCKET_UNLOCK(so, 1);
4596 #endif
4597 		}
4598 	}
4599 	return (0);
4600 }
4601 
4602 
4603 
4604 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4605  *************ALTERNATE ROUTING CODE
4606  */
4607 
4608 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4609  *************ALTERNATE ROUTING CODE
4610  */
4611 
4612 struct mbuf *
4613 sctp_generate_invmanparam(int err)
4614 {
4615 	/* Return a MBUF with a invalid mandatory parameter */
4616 	struct mbuf *m;
4617 
4618 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4619 	if (m) {
4620 		struct sctp_paramhdr *ph;
4621 
4622 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4623 		ph = mtod(m, struct sctp_paramhdr *);
4624 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4625 		ph->param_type = htons(err);
4626 	}
4627 	return (m);
4628 }
4629 
4630 #ifdef SCTP_MBCNT_LOGGING
4631 void
4632 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4633     struct sctp_tmit_chunk *tp1, int chk_cnt)
4634 {
4635 	if (tp1->data == NULL) {
4636 		return;
4637 	}
4638 	asoc->chunks_on_out_queue -= chk_cnt;
4639 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4640 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4641 		    asoc->total_output_queue_size,
4642 		    tp1->book_size,
4643 		    0,
4644 		    tp1->mbcnt);
4645 	}
4646 	if (asoc->total_output_queue_size >= tp1->book_size) {
4647 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4648 	} else {
4649 		asoc->total_output_queue_size = 0;
4650 	}
4651 
4652 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4653 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4654 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4655 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4656 		} else {
4657 			stcb->sctp_socket->so_snd.sb_cc = 0;
4658 
4659 		}
4660 	}
4661 }
4662 
4663 #endif
4664 
4665 int
4666 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4667     uint8_t sent, int so_locked
4668 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4669     SCTP_UNUSED
4670 #endif
4671 )
4672 {
4673 	struct sctp_stream_out *strq;
4674 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4675 	struct sctp_stream_queue_pending *sp;
4676 	uint16_t stream = 0, seq = 0;
4677 	uint8_t foundeom = 0;
4678 	int ret_sz = 0;
4679 	int notdone;
4680 	int do_wakeup_routine = 0;
4681 
4682 	stream = tp1->rec.data.stream_number;
4683 	seq = tp1->rec.data.stream_seq;
4684 	do {
4685 		ret_sz += tp1->book_size;
4686 		if (tp1->data != NULL) {
4687 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4688 				sctp_flight_size_decrease(tp1);
4689 				sctp_total_flight_decrease(stcb, tp1);
4690 			}
4691 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4692 			stcb->asoc.peers_rwnd += tp1->send_size;
4693 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4694 			if (sent) {
4695 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4696 			} else {
4697 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4698 			}
4699 			if (tp1->data) {
4700 				sctp_m_freem(tp1->data);
4701 				tp1->data = NULL;
4702 			}
4703 			do_wakeup_routine = 1;
4704 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4705 				stcb->asoc.sent_queue_cnt_removeable--;
4706 			}
4707 		}
4708 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4709 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4710 		    SCTP_DATA_NOT_FRAG) {
4711 			/* not frag'ed we ae done   */
4712 			notdone = 0;
4713 			foundeom = 1;
4714 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4715 			/* end of frag, we are done */
4716 			notdone = 0;
4717 			foundeom = 1;
4718 		} else {
4719 			/*
4720 			 * Its a begin or middle piece, we must mark all of
4721 			 * it
4722 			 */
4723 			notdone = 1;
4724 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4725 		}
4726 	} while (tp1 && notdone);
4727 	if (foundeom == 0) {
4728 		/*
4729 		 * The multi-part message was scattered across the send and
4730 		 * sent queue.
4731 		 */
4732 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4733 			if ((tp1->rec.data.stream_number != stream) ||
4734 			    (tp1->rec.data.stream_seq != seq)) {
4735 				break;
4736 			}
4737 			/*
4738 			 * save to chk in case we have some on stream out
4739 			 * queue. If so and we have an un-transmitted one we
4740 			 * don't have to fudge the TSN.
4741 			 */
4742 			chk = tp1;
4743 			ret_sz += tp1->book_size;
4744 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4745 			if (sent) {
4746 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4747 			} else {
4748 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4749 			}
4750 			if (tp1->data) {
4751 				sctp_m_freem(tp1->data);
4752 				tp1->data = NULL;
4753 			}
4754 			/* No flight involved here book the size to 0 */
4755 			tp1->book_size = 0;
4756 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4757 				foundeom = 1;
4758 			}
4759 			do_wakeup_routine = 1;
4760 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4761 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4762 			/*
4763 			 * on to the sent queue so we can wait for it to be
4764 			 * passed by.
4765 			 */
4766 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4767 			    sctp_next);
4768 			stcb->asoc.send_queue_cnt--;
4769 			stcb->asoc.sent_queue_cnt++;
4770 		}
4771 	}
4772 	if (foundeom == 0) {
4773 		/*
4774 		 * Still no eom found. That means there is stuff left on the
4775 		 * stream out queue.. yuck.
4776 		 */
4777 		strq = &stcb->asoc.strmout[stream];
4778 		SCTP_TCB_SEND_LOCK(stcb);
4779 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4780 			/* FIXME: Shouldn't this be a serial number check? */
4781 			if (sp->strseq > seq) {
4782 				break;
4783 			}
4784 			/* Check if its our SEQ */
4785 			if (sp->strseq == seq) {
4786 				sp->discard_rest = 1;
4787 				/*
4788 				 * We may need to put a chunk on the queue
4789 				 * that holds the TSN that would have been
4790 				 * sent with the LAST bit.
4791 				 */
4792 				if (chk == NULL) {
4793 					/* Yep, we have to */
4794 					sctp_alloc_a_chunk(stcb, chk);
4795 					if (chk == NULL) {
4796 						/*
4797 						 * we are hosed. All we can
4798 						 * do is nothing.. which
4799 						 * will cause an abort if
4800 						 * the peer is paying
4801 						 * attention.
4802 						 */
4803 						goto oh_well;
4804 					}
4805 					memset(chk, 0, sizeof(*chk));
4806 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4807 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4808 					chk->asoc = &stcb->asoc;
4809 					chk->rec.data.stream_seq = sp->strseq;
4810 					chk->rec.data.stream_number = sp->stream;
4811 					chk->rec.data.payloadtype = sp->ppid;
4812 					chk->rec.data.context = sp->context;
4813 					chk->flags = sp->act_flags;
4814 					if (sp->net)
4815 						chk->whoTo = sp->net;
4816 					else
4817 						chk->whoTo = stcb->asoc.primary_destination;
4818 					atomic_add_int(&chk->whoTo->ref_count, 1);
4819 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4820 					stcb->asoc.pr_sctp_cnt++;
4821 					chk->pr_sctp_on = 1;
4822 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4823 					stcb->asoc.sent_queue_cnt++;
4824 					stcb->asoc.pr_sctp_cnt++;
4825 				} else {
4826 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4827 				}
4828 		oh_well:
4829 				if (sp->data) {
4830 					/*
4831 					 * Pull any data to free up the SB
4832 					 * and allow sender to "add more"
4833 					 * while we will throw away :-)
4834 					 */
4835 					sctp_free_spbufspace(stcb, &stcb->asoc,
4836 					    sp);
4837 					ret_sz += sp->length;
4838 					do_wakeup_routine = 1;
4839 					sp->some_taken = 1;
4840 					sctp_m_freem(sp->data);
4841 					sp->data = NULL;
4842 					sp->tail_mbuf = NULL;
4843 					sp->length = 0;
4844 				}
4845 				break;
4846 			}
4847 		}		/* End tailq_foreach */
4848 		SCTP_TCB_SEND_UNLOCK(stcb);
4849 	}
4850 	if (do_wakeup_routine) {
4851 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4852 		struct socket *so;
4853 
4854 		so = SCTP_INP_SO(stcb->sctp_ep);
4855 		if (!so_locked) {
4856 			atomic_add_int(&stcb->asoc.refcnt, 1);
4857 			SCTP_TCB_UNLOCK(stcb);
4858 			SCTP_SOCKET_LOCK(so, 1);
4859 			SCTP_TCB_LOCK(stcb);
4860 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4861 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4862 				/* assoc was freed while we were unlocked */
4863 				SCTP_SOCKET_UNLOCK(so, 1);
4864 				return (ret_sz);
4865 			}
4866 		}
4867 #endif
4868 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4869 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4870 		if (!so_locked) {
4871 			SCTP_SOCKET_UNLOCK(so, 1);
4872 		}
4873 #endif
4874 	}
4875 	return (ret_sz);
4876 }
4877 
4878 /*
4879  * checks to see if the given address, sa, is one that is currently known by
4880  * the kernel note: can't distinguish the same address on multiple interfaces
4881  * and doesn't handle multiple addresses with different zone/scope id's note:
4882  * ifa_ifwithaddr() compares the entire sockaddr struct
4883  */
4884 struct sctp_ifa *
4885 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4886     int holds_lock)
4887 {
4888 	struct sctp_laddr *laddr;
4889 
4890 	if (holds_lock == 0) {
4891 		SCTP_INP_RLOCK(inp);
4892 	}
4893 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4894 		if (laddr->ifa == NULL)
4895 			continue;
4896 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4897 			continue;
4898 #ifdef INET
4899 		if (addr->sa_family == AF_INET) {
4900 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4901 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4902 				/* found him. */
4903 				if (holds_lock == 0) {
4904 					SCTP_INP_RUNLOCK(inp);
4905 				}
4906 				return (laddr->ifa);
4907 				break;
4908 			}
4909 		}
4910 #endif
4911 #ifdef INET6
4912 		if (addr->sa_family == AF_INET6) {
4913 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4914 			    &laddr->ifa->address.sin6)) {
4915 				/* found him. */
4916 				if (holds_lock == 0) {
4917 					SCTP_INP_RUNLOCK(inp);
4918 				}
4919 				return (laddr->ifa);
4920 				break;
4921 			}
4922 		}
4923 #endif
4924 	}
4925 	if (holds_lock == 0) {
4926 		SCTP_INP_RUNLOCK(inp);
4927 	}
4928 	return (NULL);
4929 }
4930 
4931 uint32_t
4932 sctp_get_ifa_hash_val(struct sockaddr *addr)
4933 {
4934 	switch (addr->sa_family) {
4935 #ifdef INET
4936 	case AF_INET:
4937 		{
4938 			struct sockaddr_in *sin;
4939 
4940 			sin = (struct sockaddr_in *)addr;
4941 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4942 		}
4943 #endif
4944 #ifdef INET6
4945 	case INET6:
4946 		{
4947 			struct sockaddr_in6 *sin6;
4948 			uint32_t hash_of_addr;
4949 
4950 			sin6 = (struct sockaddr_in6 *)addr;
4951 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4952 			    sin6->sin6_addr.s6_addr32[1] +
4953 			    sin6->sin6_addr.s6_addr32[2] +
4954 			    sin6->sin6_addr.s6_addr32[3]);
4955 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4956 			return (hash_of_addr);
4957 		}
4958 #endif
4959 	default:
4960 		break;
4961 	}
4962 	return (0);
4963 }
4964 
4965 struct sctp_ifa *
4966 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4967 {
4968 	struct sctp_ifa *sctp_ifap;
4969 	struct sctp_vrf *vrf;
4970 	struct sctp_ifalist *hash_head;
4971 	uint32_t hash_of_addr;
4972 
4973 	if (holds_lock == 0)
4974 		SCTP_IPI_ADDR_RLOCK();
4975 
4976 	vrf = sctp_find_vrf(vrf_id);
4977 	if (vrf == NULL) {
4978 stage_right:
4979 		if (holds_lock == 0)
4980 			SCTP_IPI_ADDR_RUNLOCK();
4981 		return (NULL);
4982 	}
4983 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4984 
4985 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4986 	if (hash_head == NULL) {
4987 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4988 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4989 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4990 		sctp_print_address(addr);
4991 		SCTP_PRINTF("No such bucket for address\n");
4992 		if (holds_lock == 0)
4993 			SCTP_IPI_ADDR_RUNLOCK();
4994 
4995 		return (NULL);
4996 	}
4997 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4998 		if (sctp_ifap == NULL) {
4999 #ifdef INVARIANTS
5000 			panic("Huh LIST_FOREACH corrupt");
5001 			goto stage_right;
5002 #else
5003 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5004 			goto stage_right;
5005 #endif
5006 		}
5007 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5008 			continue;
5009 #ifdef INET
5010 		if (addr->sa_family == AF_INET) {
5011 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5012 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5013 				/* found him. */
5014 				if (holds_lock == 0)
5015 					SCTP_IPI_ADDR_RUNLOCK();
5016 				return (sctp_ifap);
5017 				break;
5018 			}
5019 		}
5020 #endif
5021 #ifdef INET6
5022 		if (addr->sa_family == AF_INET6) {
5023 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5024 			    &sctp_ifap->address.sin6)) {
5025 				/* found him. */
5026 				if (holds_lock == 0)
5027 					SCTP_IPI_ADDR_RUNLOCK();
5028 				return (sctp_ifap);
5029 				break;
5030 			}
5031 		}
5032 #endif
5033 	}
5034 	if (holds_lock == 0)
5035 		SCTP_IPI_ADDR_RUNLOCK();
5036 	return (NULL);
5037 }
5038 
5039 static void
5040 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5041     uint32_t rwnd_req)
5042 {
5043 	/* User pulled some data, do we need a rwnd update? */
5044 	int r_unlocked = 0;
5045 	uint32_t dif, rwnd;
5046 	struct socket *so = NULL;
5047 
5048 	if (stcb == NULL)
5049 		return;
5050 
5051 	atomic_add_int(&stcb->asoc.refcnt, 1);
5052 
5053 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5054 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5055 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5056 		/* Pre-check If we are freeing no update */
5057 		goto no_lock;
5058 	}
5059 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5060 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5061 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5062 		goto out;
5063 	}
5064 	so = stcb->sctp_socket;
5065 	if (so == NULL) {
5066 		goto out;
5067 	}
5068 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5069 	/* Have you have freed enough to look */
5070 	*freed_so_far = 0;
5071 	/* Yep, its worth a look and the lock overhead */
5072 
5073 	/* Figure out what the rwnd would be */
5074 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5075 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5076 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5077 	} else {
5078 		dif = 0;
5079 	}
5080 	if (dif >= rwnd_req) {
5081 		if (hold_rlock) {
5082 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5083 			r_unlocked = 1;
5084 		}
5085 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5086 			/*
5087 			 * One last check before we allow the guy possibly
5088 			 * to get in. There is a race, where the guy has not
5089 			 * reached the gate. In that case
5090 			 */
5091 			goto out;
5092 		}
5093 		SCTP_TCB_LOCK(stcb);
5094 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5095 			/* No reports here */
5096 			SCTP_TCB_UNLOCK(stcb);
5097 			goto out;
5098 		}
5099 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5100 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5101 
5102 		sctp_chunk_output(stcb->sctp_ep, stcb,
5103 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5104 		/* make sure no timer is running */
5105 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5106 		SCTP_TCB_UNLOCK(stcb);
5107 	} else {
5108 		/* Update how much we have pending */
5109 		stcb->freed_by_sorcv_sincelast = dif;
5110 	}
5111 out:
5112 	if (so && r_unlocked && hold_rlock) {
5113 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5114 	}
5115 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5116 no_lock:
5117 	atomic_add_int(&stcb->asoc.refcnt, -1);
5118 	return;
5119 }
5120 
5121 int
5122 sctp_sorecvmsg(struct socket *so,
5123     struct uio *uio,
5124     struct mbuf **mp,
5125     struct sockaddr *from,
5126     int fromlen,
5127     int *msg_flags,
5128     struct sctp_sndrcvinfo *sinfo,
5129     int filling_sinfo)
5130 {
5131 	/*
5132 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5133 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5134 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5135 	 * On the way out we may send out any combination of:
5136 	 * MSG_NOTIFICATION MSG_EOR
5137 	 *
5138 	 */
5139 	struct sctp_inpcb *inp = NULL;
5140 	int my_len = 0;
5141 	int cp_len = 0, error = 0;
5142 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5143 	struct mbuf *m = NULL;
5144 	struct sctp_tcb *stcb = NULL;
5145 	int wakeup_read_socket = 0;
5146 	int freecnt_applied = 0;
5147 	int out_flags = 0, in_flags = 0;
5148 	int block_allowed = 1;
5149 	uint32_t freed_so_far = 0;
5150 	uint32_t copied_so_far = 0;
5151 	int in_eeor_mode = 0;
5152 	int no_rcv_needed = 0;
5153 	uint32_t rwnd_req = 0;
5154 	int hold_sblock = 0;
5155 	int hold_rlock = 0;
5156 	int slen = 0;
5157 	uint32_t held_length = 0;
5158 	int sockbuf_lock = 0;
5159 
5160 	if (uio == NULL) {
5161 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5162 		return (EINVAL);
5163 	}
5164 	if (msg_flags) {
5165 		in_flags = *msg_flags;
5166 		if (in_flags & MSG_PEEK)
5167 			SCTP_STAT_INCR(sctps_read_peeks);
5168 	} else {
5169 		in_flags = 0;
5170 	}
5171 	slen = uio->uio_resid;
5172 
5173 	/* Pull in and set up our int flags */
5174 	if (in_flags & MSG_OOB) {
5175 		/* Out of band's NOT supported */
5176 		return (EOPNOTSUPP);
5177 	}
5178 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5179 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5180 		return (EINVAL);
5181 	}
5182 	if ((in_flags & (MSG_DONTWAIT
5183 	    | MSG_NBIO
5184 	    )) ||
5185 	    SCTP_SO_IS_NBIO(so)) {
5186 		block_allowed = 0;
5187 	}
5188 	/* setup the endpoint */
5189 	inp = (struct sctp_inpcb *)so->so_pcb;
5190 	if (inp == NULL) {
5191 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5192 		return (EFAULT);
5193 	}
5194 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5195 	/* Must be at least a MTU's worth */
5196 	if (rwnd_req < SCTP_MIN_RWND)
5197 		rwnd_req = SCTP_MIN_RWND;
5198 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5199 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5200 		sctp_misc_ints(SCTP_SORECV_ENTER,
5201 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5202 	}
5203 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5204 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5205 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5206 	}
5207 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5208 	sockbuf_lock = 1;
5209 	if (error) {
5210 		goto release_unlocked;
5211 	}
5212 restart:
5213 
5214 
5215 restart_nosblocks:
5216 	if (hold_sblock == 0) {
5217 		SOCKBUF_LOCK(&so->so_rcv);
5218 		hold_sblock = 1;
5219 	}
5220 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5221 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5222 		goto out;
5223 	}
5224 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5225 		if (so->so_error) {
5226 			error = so->so_error;
5227 			if ((in_flags & MSG_PEEK) == 0)
5228 				so->so_error = 0;
5229 			goto out;
5230 		} else {
5231 			if (so->so_rcv.sb_cc == 0) {
5232 				/* indicate EOF */
5233 				error = 0;
5234 				goto out;
5235 			}
5236 		}
5237 	}
5238 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5239 		/* we need to wait for data */
5240 		if ((so->so_rcv.sb_cc == 0) &&
5241 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5242 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5243 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5244 				/*
5245 				 * For active open side clear flags for
5246 				 * re-use passive open is blocked by
5247 				 * connect.
5248 				 */
5249 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5250 					/*
5251 					 * You were aborted, passive side
5252 					 * always hits here
5253 					 */
5254 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5255 					error = ECONNRESET;
5256 				}
5257 				so->so_state &= ~(SS_ISCONNECTING |
5258 				    SS_ISDISCONNECTING |
5259 				    SS_ISCONFIRMING |
5260 				    SS_ISCONNECTED);
5261 				if (error == 0) {
5262 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5263 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5264 						error = ENOTCONN;
5265 					}
5266 				}
5267 				goto out;
5268 			}
5269 		}
5270 		error = sbwait(&so->so_rcv);
5271 		if (error) {
5272 			goto out;
5273 		}
5274 		held_length = 0;
5275 		goto restart_nosblocks;
5276 	} else if (so->so_rcv.sb_cc == 0) {
5277 		if (so->so_error) {
5278 			error = so->so_error;
5279 			if ((in_flags & MSG_PEEK) == 0)
5280 				so->so_error = 0;
5281 		} else {
5282 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5283 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5284 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5285 					/*
5286 					 * For active open side clear flags
5287 					 * for re-use passive open is
5288 					 * blocked by connect.
5289 					 */
5290 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5291 						/*
5292 						 * You were aborted, passive
5293 						 * side always hits here
5294 						 */
5295 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5296 						error = ECONNRESET;
5297 					}
5298 					so->so_state &= ~(SS_ISCONNECTING |
5299 					    SS_ISDISCONNECTING |
5300 					    SS_ISCONFIRMING |
5301 					    SS_ISCONNECTED);
5302 					if (error == 0) {
5303 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5304 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5305 							error = ENOTCONN;
5306 						}
5307 					}
5308 					goto out;
5309 				}
5310 			}
5311 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5312 			error = EWOULDBLOCK;
5313 		}
5314 		goto out;
5315 	}
5316 	if (hold_sblock == 1) {
5317 		SOCKBUF_UNLOCK(&so->so_rcv);
5318 		hold_sblock = 0;
5319 	}
5320 	/* we possibly have data we can read */
5321 	/* sa_ignore FREED_MEMORY */
5322 	control = TAILQ_FIRST(&inp->read_queue);
5323 	if (control == NULL) {
5324 		/*
5325 		 * This could be happening since the appender did the
5326 		 * increment but as not yet did the tailq insert onto the
5327 		 * read_queue
5328 		 */
5329 		if (hold_rlock == 0) {
5330 			SCTP_INP_READ_LOCK(inp);
5331 		}
5332 		control = TAILQ_FIRST(&inp->read_queue);
5333 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5334 #ifdef INVARIANTS
5335 			panic("Huh, its non zero and nothing on control?");
5336 #endif
5337 			so->so_rcv.sb_cc = 0;
5338 		}
5339 		SCTP_INP_READ_UNLOCK(inp);
5340 		hold_rlock = 0;
5341 		goto restart;
5342 	}
5343 	if ((control->length == 0) &&
5344 	    (control->do_not_ref_stcb)) {
5345 		/*
5346 		 * Clean up code for freeing assoc that left behind a
5347 		 * pdapi.. maybe a peer in EEOR that just closed after
5348 		 * sending and never indicated a EOR.
5349 		 */
5350 		if (hold_rlock == 0) {
5351 			hold_rlock = 1;
5352 			SCTP_INP_READ_LOCK(inp);
5353 		}
5354 		control->held_length = 0;
5355 		if (control->data) {
5356 			/* Hmm there is data here .. fix */
5357 			struct mbuf *m_tmp;
5358 			int cnt = 0;
5359 
5360 			m_tmp = control->data;
5361 			while (m_tmp) {
5362 				cnt += SCTP_BUF_LEN(m_tmp);
5363 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5364 					control->tail_mbuf = m_tmp;
5365 					control->end_added = 1;
5366 				}
5367 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5368 			}
5369 			control->length = cnt;
5370 		} else {
5371 			/* remove it */
5372 			TAILQ_REMOVE(&inp->read_queue, control, next);
5373 			/* Add back any hiddend data */
5374 			sctp_free_remote_addr(control->whoFrom);
5375 			sctp_free_a_readq(stcb, control);
5376 		}
5377 		if (hold_rlock) {
5378 			hold_rlock = 0;
5379 			SCTP_INP_READ_UNLOCK(inp);
5380 		}
5381 		goto restart;
5382 	}
5383 	if ((control->length == 0) &&
5384 	    (control->end_added == 1)) {
5385 		/*
5386 		 * Do we also need to check for (control->pdapi_aborted ==
5387 		 * 1)?
5388 		 */
5389 		if (hold_rlock == 0) {
5390 			hold_rlock = 1;
5391 			SCTP_INP_READ_LOCK(inp);
5392 		}
5393 		TAILQ_REMOVE(&inp->read_queue, control, next);
5394 		if (control->data) {
5395 #ifdef INVARIANTS
5396 			panic("control->data not null but control->length == 0");
5397 #else
5398 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5399 			sctp_m_freem(control->data);
5400 			control->data = NULL;
5401 #endif
5402 		}
5403 		if (control->aux_data) {
5404 			sctp_m_free(control->aux_data);
5405 			control->aux_data = NULL;
5406 		}
5407 		sctp_free_remote_addr(control->whoFrom);
5408 		sctp_free_a_readq(stcb, control);
5409 		if (hold_rlock) {
5410 			hold_rlock = 0;
5411 			SCTP_INP_READ_UNLOCK(inp);
5412 		}
5413 		goto restart;
5414 	}
5415 	if (control->length == 0) {
5416 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5417 		    (filling_sinfo)) {
5418 			/* find a more suitable one then this */
5419 			ctl = TAILQ_NEXT(control, next);
5420 			while (ctl) {
5421 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5422 				    (ctl->some_taken ||
5423 				    (ctl->spec_flags & M_NOTIFICATION) ||
5424 				    ((ctl->do_not_ref_stcb == 0) &&
5425 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5426 				    ) {
5427 					/*-
5428 					 * If we have a different TCB next, and there is data
5429 					 * present. If we have already taken some (pdapi), OR we can
5430 					 * ref the tcb and no delivery as started on this stream, we
5431 					 * take it. Note we allow a notification on a different
5432 					 * assoc to be delivered..
5433 					 */
5434 					control = ctl;
5435 					goto found_one;
5436 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5437 					    (ctl->length) &&
5438 					    ((ctl->some_taken) ||
5439 					    ((ctl->do_not_ref_stcb == 0) &&
5440 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5441 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5442 					/*-
5443 					 * If we have the same tcb, and there is data present, and we
5444 					 * have the strm interleave feature present. Then if we have
5445 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5446 					 * not started a delivery for this stream, we can take it.
5447 					 * Note we do NOT allow a notificaiton on the same assoc to
5448 					 * be delivered.
5449 					 */
5450 					control = ctl;
5451 					goto found_one;
5452 				}
5453 				ctl = TAILQ_NEXT(ctl, next);
5454 			}
5455 		}
5456 		/*
5457 		 * if we reach here, not suitable replacement is available
5458 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5459 		 * into the our held count, and its time to sleep again.
5460 		 */
5461 		held_length = so->so_rcv.sb_cc;
5462 		control->held_length = so->so_rcv.sb_cc;
5463 		goto restart;
5464 	}
5465 	/* Clear the held length since there is something to read */
5466 	control->held_length = 0;
5467 	if (hold_rlock) {
5468 		SCTP_INP_READ_UNLOCK(inp);
5469 		hold_rlock = 0;
5470 	}
5471 found_one:
5472 	/*
5473 	 * If we reach here, control has a some data for us to read off.
5474 	 * Note that stcb COULD be NULL.
5475 	 */
5476 	control->some_taken++;
5477 	if (hold_sblock) {
5478 		SOCKBUF_UNLOCK(&so->so_rcv);
5479 		hold_sblock = 0;
5480 	}
5481 	stcb = control->stcb;
5482 	if (stcb) {
5483 		if ((control->do_not_ref_stcb == 0) &&
5484 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5485 			if (freecnt_applied == 0)
5486 				stcb = NULL;
5487 		} else if (control->do_not_ref_stcb == 0) {
5488 			/* you can't free it on me please */
5489 			/*
5490 			 * The lock on the socket buffer protects us so the
5491 			 * free code will stop. But since we used the
5492 			 * socketbuf lock and the sender uses the tcb_lock
5493 			 * to increment, we need to use the atomic add to
5494 			 * the refcnt
5495 			 */
5496 			if (freecnt_applied) {
5497 #ifdef INVARIANTS
5498 				panic("refcnt already incremented");
5499 #else
5500 				SCTP_PRINTF("refcnt already incremented?\n");
5501 #endif
5502 			} else {
5503 				atomic_add_int(&stcb->asoc.refcnt, 1);
5504 				freecnt_applied = 1;
5505 			}
5506 			/*
5507 			 * Setup to remember how much we have not yet told
5508 			 * the peer our rwnd has opened up. Note we grab the
5509 			 * value from the tcb from last time. Note too that
5510 			 * sack sending clears this when a sack is sent,
5511 			 * which is fine. Once we hit the rwnd_req, we then
5512 			 * will go to the sctp_user_rcvd() that will not
5513 			 * lock until it KNOWs it MUST send a WUP-SACK.
5514 			 */
5515 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5516 			stcb->freed_by_sorcv_sincelast = 0;
5517 		}
5518 	}
5519 	if (stcb &&
5520 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5521 	    control->do_not_ref_stcb == 0) {
5522 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5523 	}
5524 	/* First lets get off the sinfo and sockaddr info */
5525 	if ((sinfo) && filling_sinfo) {
5526 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5527 		nxt = TAILQ_NEXT(control, next);
5528 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5529 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5530 			struct sctp_extrcvinfo *s_extra;
5531 
5532 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5533 			if ((nxt) &&
5534 			    (nxt->length)) {
5535 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5536 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5537 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5538 				}
5539 				if (nxt->spec_flags & M_NOTIFICATION) {
5540 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5541 				}
5542 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5543 				s_extra->sreinfo_next_length = nxt->length;
5544 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5545 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5546 				if (nxt->tail_mbuf != NULL) {
5547 					if (nxt->end_added) {
5548 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5549 					}
5550 				}
5551 			} else {
5552 				/*
5553 				 * we explicitly 0 this, since the memcpy
5554 				 * got some other things beyond the older
5555 				 * sinfo_ that is on the control's structure
5556 				 * :-D
5557 				 */
5558 				nxt = NULL;
5559 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5560 				s_extra->sreinfo_next_aid = 0;
5561 				s_extra->sreinfo_next_length = 0;
5562 				s_extra->sreinfo_next_ppid = 0;
5563 				s_extra->sreinfo_next_stream = 0;
5564 			}
5565 		}
5566 		/*
5567 		 * update off the real current cum-ack, if we have an stcb.
5568 		 */
5569 		if ((control->do_not_ref_stcb == 0) && stcb)
5570 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5571 		/*
5572 		 * mask off the high bits, we keep the actual chunk bits in
5573 		 * there.
5574 		 */
5575 		sinfo->sinfo_flags &= 0x00ff;
5576 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5577 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5578 		}
5579 	}
5580 #ifdef SCTP_ASOCLOG_OF_TSNS
5581 	{
5582 		int index, newindex;
5583 		struct sctp_pcbtsn_rlog *entry;
5584 
5585 		do {
5586 			index = inp->readlog_index;
5587 			newindex = index + 1;
5588 			if (newindex >= SCTP_READ_LOG_SIZE) {
5589 				newindex = 0;
5590 			}
5591 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5592 		entry = &inp->readlog[index];
5593 		entry->vtag = control->sinfo_assoc_id;
5594 		entry->strm = control->sinfo_stream;
5595 		entry->seq = control->sinfo_ssn;
5596 		entry->sz = control->length;
5597 		entry->flgs = control->sinfo_flags;
5598 	}
5599 #endif
5600 	if (fromlen && from) {
5601 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
5602 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5603 #ifdef INET6
5604 		case AF_INET6:
5605 			((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5606 			break;
5607 #endif
5608 #ifdef INET
5609 		case AF_INET:
5610 			((struct sockaddr_in *)from)->sin_port = control->port_from;
5611 			break;
5612 #endif
5613 		default:
5614 			break;
5615 		}
5616 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5617 
5618 #if defined(INET) && defined(INET6)
5619 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5620 		    (from->sa_family == AF_INET) &&
5621 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5622 			struct sockaddr_in *sin;
5623 			struct sockaddr_in6 sin6;
5624 
5625 			sin = (struct sockaddr_in *)from;
5626 			bzero(&sin6, sizeof(sin6));
5627 			sin6.sin6_family = AF_INET6;
5628 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5629 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5630 			bcopy(&sin->sin_addr,
5631 			    &sin6.sin6_addr.s6_addr32[3],
5632 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5633 			sin6.sin6_port = sin->sin_port;
5634 			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
5635 		}
5636 #endif
5637 #ifdef INET6
5638 		{
5639 			struct sockaddr_in6 lsa6, *from6;
5640 
5641 			from6 = (struct sockaddr_in6 *)from;
5642 			sctp_recover_scope_mac(from6, (&lsa6));
5643 		}
5644 #endif
5645 	}
5646 	/* now copy out what data we can */
5647 	if (mp == NULL) {
5648 		/* copy out each mbuf in the chain up to length */
5649 get_more_data:
5650 		m = control->data;
5651 		while (m) {
5652 			/* Move out all we can */
5653 			cp_len = (int)uio->uio_resid;
5654 			my_len = (int)SCTP_BUF_LEN(m);
5655 			if (cp_len > my_len) {
5656 				/* not enough in this buf */
5657 				cp_len = my_len;
5658 			}
5659 			if (hold_rlock) {
5660 				SCTP_INP_READ_UNLOCK(inp);
5661 				hold_rlock = 0;
5662 			}
5663 			if (cp_len > 0)
5664 				error = uiomove(mtod(m, char *), cp_len, uio);
5665 			/* re-read */
5666 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5667 				goto release;
5668 			}
5669 			if ((control->do_not_ref_stcb == 0) && stcb &&
5670 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5671 				no_rcv_needed = 1;
5672 			}
5673 			if (error) {
5674 				/* error we are out of here */
5675 				goto release;
5676 			}
5677 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5678 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5679 			    ((control->end_added == 0) ||
5680 			    (control->end_added &&
5681 			    (TAILQ_NEXT(control, next) == NULL)))
5682 			    ) {
5683 				SCTP_INP_READ_LOCK(inp);
5684 				hold_rlock = 1;
5685 			}
5686 			if (cp_len == SCTP_BUF_LEN(m)) {
5687 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5688 				    (control->end_added)) {
5689 					out_flags |= MSG_EOR;
5690 					if ((control->do_not_ref_stcb == 0) &&
5691 					    (control->stcb != NULL) &&
5692 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5693 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5694 				}
5695 				if (control->spec_flags & M_NOTIFICATION) {
5696 					out_flags |= MSG_NOTIFICATION;
5697 				}
5698 				/* we ate up the mbuf */
5699 				if (in_flags & MSG_PEEK) {
5700 					/* just looking */
5701 					m = SCTP_BUF_NEXT(m);
5702 					copied_so_far += cp_len;
5703 				} else {
5704 					/* dispose of the mbuf */
5705 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5706 						sctp_sblog(&so->so_rcv,
5707 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5708 					}
5709 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5710 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5711 						sctp_sblog(&so->so_rcv,
5712 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5713 					}
5714 					copied_so_far += cp_len;
5715 					freed_so_far += cp_len;
5716 					freed_so_far += MSIZE;
5717 					atomic_subtract_int(&control->length, cp_len);
5718 					control->data = sctp_m_free(m);
5719 					m = control->data;
5720 					/*
5721 					 * been through it all, must hold sb
5722 					 * lock ok to null tail
5723 					 */
5724 					if (control->data == NULL) {
5725 #ifdef INVARIANTS
5726 						if ((control->end_added == 0) ||
5727 						    (TAILQ_NEXT(control, next) == NULL)) {
5728 							/*
5729 							 * If the end is not
5730 							 * added, OR the
5731 							 * next is NOT null
5732 							 * we MUST have the
5733 							 * lock.
5734 							 */
5735 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5736 								panic("Hmm we don't own the lock?");
5737 							}
5738 						}
5739 #endif
5740 						control->tail_mbuf = NULL;
5741 #ifdef INVARIANTS
5742 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5743 							panic("end_added, nothing left and no MSG_EOR");
5744 						}
5745 #endif
5746 					}
5747 				}
5748 			} else {
5749 				/* Do we need to trim the mbuf? */
5750 				if (control->spec_flags & M_NOTIFICATION) {
5751 					out_flags |= MSG_NOTIFICATION;
5752 				}
5753 				if ((in_flags & MSG_PEEK) == 0) {
5754 					SCTP_BUF_RESV_UF(m, cp_len);
5755 					SCTP_BUF_LEN(m) -= cp_len;
5756 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5757 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5758 					}
5759 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5760 					if ((control->do_not_ref_stcb == 0) &&
5761 					    stcb) {
5762 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5763 					}
5764 					copied_so_far += cp_len;
5765 					freed_so_far += cp_len;
5766 					freed_so_far += MSIZE;
5767 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5768 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5769 						    SCTP_LOG_SBRESULT, 0);
5770 					}
5771 					atomic_subtract_int(&control->length, cp_len);
5772 				} else {
5773 					copied_so_far += cp_len;
5774 				}
5775 			}
5776 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5777 				break;
5778 			}
5779 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5780 			    (control->do_not_ref_stcb == 0) &&
5781 			    (freed_so_far >= rwnd_req)) {
5782 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5783 			}
5784 		}		/* end while(m) */
5785 		/*
5786 		 * At this point we have looked at it all and we either have
5787 		 * a MSG_EOR/or read all the user wants... <OR>
5788 		 * control->length == 0.
5789 		 */
5790 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5791 			/* we are done with this control */
5792 			if (control->length == 0) {
5793 				if (control->data) {
5794 #ifdef INVARIANTS
5795 					panic("control->data not null at read eor?");
5796 #else
5797 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5798 					sctp_m_freem(control->data);
5799 					control->data = NULL;
5800 #endif
5801 				}
5802 		done_with_control:
5803 				if (TAILQ_NEXT(control, next) == NULL) {
5804 					/*
5805 					 * If we don't have a next we need a
5806 					 * lock, if there is a next
5807 					 * interrupt is filling ahead of us
5808 					 * and we don't need a lock to
5809 					 * remove this guy (which is the
5810 					 * head of the queue).
5811 					 */
5812 					if (hold_rlock == 0) {
5813 						SCTP_INP_READ_LOCK(inp);
5814 						hold_rlock = 1;
5815 					}
5816 				}
5817 				TAILQ_REMOVE(&inp->read_queue, control, next);
5818 				/* Add back any hiddend data */
5819 				if (control->held_length) {
5820 					held_length = 0;
5821 					control->held_length = 0;
5822 					wakeup_read_socket = 1;
5823 				}
5824 				if (control->aux_data) {
5825 					sctp_m_free(control->aux_data);
5826 					control->aux_data = NULL;
5827 				}
5828 				no_rcv_needed = control->do_not_ref_stcb;
5829 				sctp_free_remote_addr(control->whoFrom);
5830 				control->data = NULL;
5831 				sctp_free_a_readq(stcb, control);
5832 				control = NULL;
5833 				if ((freed_so_far >= rwnd_req) &&
5834 				    (no_rcv_needed == 0))
5835 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5836 
5837 			} else {
5838 				/*
5839 				 * The user did not read all of this
5840 				 * message, turn off the returned MSG_EOR
5841 				 * since we are leaving more behind on the
5842 				 * control to read.
5843 				 */
5844 #ifdef INVARIANTS
5845 				if (control->end_added &&
5846 				    (control->data == NULL) &&
5847 				    (control->tail_mbuf == NULL)) {
5848 					panic("Gak, control->length is corrupt?");
5849 				}
5850 #endif
5851 				no_rcv_needed = control->do_not_ref_stcb;
5852 				out_flags &= ~MSG_EOR;
5853 			}
5854 		}
5855 		if (out_flags & MSG_EOR) {
5856 			goto release;
5857 		}
5858 		if ((uio->uio_resid == 0) ||
5859 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5860 		    ) {
5861 			goto release;
5862 		}
5863 		/*
5864 		 * If I hit here the receiver wants more and this message is
5865 		 * NOT done (pd-api). So two questions. Can we block? if not
5866 		 * we are done. Did the user NOT set MSG_WAITALL?
5867 		 */
5868 		if (block_allowed == 0) {
5869 			goto release;
5870 		}
5871 		/*
5872 		 * We need to wait for more data a few things: - We don't
5873 		 * sbunlock() so we don't get someone else reading. - We
5874 		 * must be sure to account for the case where what is added
5875 		 * is NOT to our control when we wakeup.
5876 		 */
5877 
5878 		/*
5879 		 * Do we need to tell the transport a rwnd update might be
5880 		 * needed before we go to sleep?
5881 		 */
5882 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5883 		    ((freed_so_far >= rwnd_req) &&
5884 		    (control->do_not_ref_stcb == 0) &&
5885 		    (no_rcv_needed == 0))) {
5886 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5887 		}
5888 wait_some_more:
5889 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5890 			goto release;
5891 		}
5892 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5893 			goto release;
5894 
5895 		if (hold_rlock == 1) {
5896 			SCTP_INP_READ_UNLOCK(inp);
5897 			hold_rlock = 0;
5898 		}
5899 		if (hold_sblock == 0) {
5900 			SOCKBUF_LOCK(&so->so_rcv);
5901 			hold_sblock = 1;
5902 		}
5903 		if ((copied_so_far) && (control->length == 0) &&
5904 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5905 			goto release;
5906 		}
5907 		if (so->so_rcv.sb_cc <= control->held_length) {
5908 			error = sbwait(&so->so_rcv);
5909 			if (error) {
5910 				goto release;
5911 			}
5912 			control->held_length = 0;
5913 		}
5914 		if (hold_sblock) {
5915 			SOCKBUF_UNLOCK(&so->so_rcv);
5916 			hold_sblock = 0;
5917 		}
5918 		if (control->length == 0) {
5919 			/* still nothing here */
5920 			if (control->end_added == 1) {
5921 				/* he aborted, or is done i.e.did a shutdown */
5922 				out_flags |= MSG_EOR;
5923 				if (control->pdapi_aborted) {
5924 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5925 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5926 
5927 					out_flags |= MSG_TRUNC;
5928 				} else {
5929 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5930 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5931 				}
5932 				goto done_with_control;
5933 			}
5934 			if (so->so_rcv.sb_cc > held_length) {
5935 				control->held_length = so->so_rcv.sb_cc;
5936 				held_length = 0;
5937 			}
5938 			goto wait_some_more;
5939 		} else if (control->data == NULL) {
5940 			/*
5941 			 * we must re-sync since data is probably being
5942 			 * added
5943 			 */
5944 			SCTP_INP_READ_LOCK(inp);
5945 			if ((control->length > 0) && (control->data == NULL)) {
5946 				/*
5947 				 * big trouble.. we have the lock and its
5948 				 * corrupt?
5949 				 */
5950 #ifdef INVARIANTS
5951 				panic("Impossible data==NULL length !=0");
5952 #endif
5953 				out_flags |= MSG_EOR;
5954 				out_flags |= MSG_TRUNC;
5955 				control->length = 0;
5956 				SCTP_INP_READ_UNLOCK(inp);
5957 				goto done_with_control;
5958 			}
5959 			SCTP_INP_READ_UNLOCK(inp);
5960 			/* We will fall around to get more data */
5961 		}
5962 		goto get_more_data;
5963 	} else {
5964 		/*-
5965 		 * Give caller back the mbuf chain,
5966 		 * store in uio_resid the length
5967 		 */
5968 		wakeup_read_socket = 0;
5969 		if ((control->end_added == 0) ||
5970 		    (TAILQ_NEXT(control, next) == NULL)) {
5971 			/* Need to get rlock */
5972 			if (hold_rlock == 0) {
5973 				SCTP_INP_READ_LOCK(inp);
5974 				hold_rlock = 1;
5975 			}
5976 		}
5977 		if (control->end_added) {
5978 			out_flags |= MSG_EOR;
5979 			if ((control->do_not_ref_stcb == 0) &&
5980 			    (control->stcb != NULL) &&
5981 			    ((control->spec_flags & M_NOTIFICATION) == 0))
5982 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5983 		}
5984 		if (control->spec_flags & M_NOTIFICATION) {
5985 			out_flags |= MSG_NOTIFICATION;
5986 		}
5987 		uio->uio_resid = control->length;
5988 		*mp = control->data;
5989 		m = control->data;
5990 		while (m) {
5991 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5992 				sctp_sblog(&so->so_rcv,
5993 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5994 			}
5995 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5996 			freed_so_far += SCTP_BUF_LEN(m);
5997 			freed_so_far += MSIZE;
5998 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5999 				sctp_sblog(&so->so_rcv,
6000 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6001 			}
6002 			m = SCTP_BUF_NEXT(m);
6003 		}
6004 		control->data = control->tail_mbuf = NULL;
6005 		control->length = 0;
6006 		if (out_flags & MSG_EOR) {
6007 			/* Done with this control */
6008 			goto done_with_control;
6009 		}
6010 	}
6011 release:
6012 	if (hold_rlock == 1) {
6013 		SCTP_INP_READ_UNLOCK(inp);
6014 		hold_rlock = 0;
6015 	}
6016 	if (hold_sblock == 1) {
6017 		SOCKBUF_UNLOCK(&so->so_rcv);
6018 		hold_sblock = 0;
6019 	}
6020 	sbunlock(&so->so_rcv);
6021 	sockbuf_lock = 0;
6022 
6023 release_unlocked:
6024 	if (hold_sblock) {
6025 		SOCKBUF_UNLOCK(&so->so_rcv);
6026 		hold_sblock = 0;
6027 	}
6028 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6029 		if ((freed_so_far >= rwnd_req) &&
6030 		    (control && (control->do_not_ref_stcb == 0)) &&
6031 		    (no_rcv_needed == 0))
6032 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6033 	}
6034 out:
6035 	if (msg_flags) {
6036 		*msg_flags = out_flags;
6037 	}
6038 	if (((out_flags & MSG_EOR) == 0) &&
6039 	    ((in_flags & MSG_PEEK) == 0) &&
6040 	    (sinfo) &&
6041 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6042 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6043 		struct sctp_extrcvinfo *s_extra;
6044 
6045 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6046 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6047 	}
6048 	if (hold_rlock == 1) {
6049 		SCTP_INP_READ_UNLOCK(inp);
6050 	}
6051 	if (hold_sblock) {
6052 		SOCKBUF_UNLOCK(&so->so_rcv);
6053 	}
6054 	if (sockbuf_lock) {
6055 		sbunlock(&so->so_rcv);
6056 	}
6057 	if (freecnt_applied) {
6058 		/*
6059 		 * The lock on the socket buffer protects us so the free
6060 		 * code will stop. But since we used the socketbuf lock and
6061 		 * the sender uses the tcb_lock to increment, we need to use
6062 		 * the atomic add to the refcnt.
6063 		 */
6064 		if (stcb == NULL) {
6065 #ifdef INVARIANTS
6066 			panic("stcb for refcnt has gone NULL?");
6067 			goto stage_left;
6068 #else
6069 			goto stage_left;
6070 #endif
6071 		}
6072 		atomic_add_int(&stcb->asoc.refcnt, -1);
6073 		/* Save the value back for next time */
6074 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6075 	}
6076 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6077 		if (stcb) {
6078 			sctp_misc_ints(SCTP_SORECV_DONE,
6079 			    freed_so_far,
6080 			    ((uio) ? (slen - uio->uio_resid) : slen),
6081 			    stcb->asoc.my_rwnd,
6082 			    so->so_rcv.sb_cc);
6083 		} else {
6084 			sctp_misc_ints(SCTP_SORECV_DONE,
6085 			    freed_so_far,
6086 			    ((uio) ? (slen - uio->uio_resid) : slen),
6087 			    0,
6088 			    so->so_rcv.sb_cc);
6089 		}
6090 	}
6091 stage_left:
6092 	if (wakeup_read_socket) {
6093 		sctp_sorwakeup(inp, so);
6094 	}
6095 	return (error);
6096 }
6097 
6098 
6099 #ifdef SCTP_MBUF_LOGGING
6100 struct mbuf *
6101 sctp_m_free(struct mbuf *m)
6102 {
6103 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6104 		if (SCTP_BUF_IS_EXTENDED(m)) {
6105 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6106 		}
6107 	}
6108 	return (m_free(m));
6109 }
6110 
6111 void
6112 sctp_m_freem(struct mbuf *mb)
6113 {
6114 	while (mb != NULL)
6115 		mb = sctp_m_free(mb);
6116 }
6117 
6118 #endif
6119 
6120 int
6121 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6122 {
6123 	/*
6124 	 * Given a local address. For all associations that holds the
6125 	 * address, request a peer-set-primary.
6126 	 */
6127 	struct sctp_ifa *ifa;
6128 	struct sctp_laddr *wi;
6129 
6130 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6131 	if (ifa == NULL) {
6132 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6133 		return (EADDRNOTAVAIL);
6134 	}
6135 	/*
6136 	 * Now that we have the ifa we must awaken the iterator with this
6137 	 * message.
6138 	 */
6139 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6140 	if (wi == NULL) {
6141 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6142 		return (ENOMEM);
6143 	}
6144 	/* Now incr the count and int wi structure */
6145 	SCTP_INCR_LADDR_COUNT();
6146 	bzero(wi, sizeof(*wi));
6147 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6148 	wi->ifa = ifa;
6149 	wi->action = SCTP_SET_PRIM_ADDR;
6150 	atomic_add_int(&ifa->refcount, 1);
6151 
6152 	/* Now add it to the work queue */
6153 	SCTP_WQ_ADDR_LOCK();
6154 	/*
6155 	 * Should this really be a tailq? As it is we will process the
6156 	 * newest first :-0
6157 	 */
6158 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6159 	SCTP_WQ_ADDR_UNLOCK();
6160 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6161 	    (struct sctp_inpcb *)NULL,
6162 	    (struct sctp_tcb *)NULL,
6163 	    (struct sctp_nets *)NULL);
6164 	return (0);
6165 }
6166 
6167 
6168 int
6169 sctp_soreceive(struct socket *so,
6170     struct sockaddr **psa,
6171     struct uio *uio,
6172     struct mbuf **mp0,
6173     struct mbuf **controlp,
6174     int *flagsp)
6175 {
6176 	int error, fromlen;
6177 	uint8_t sockbuf[256];
6178 	struct sockaddr *from;
6179 	struct sctp_extrcvinfo sinfo;
6180 	int filling_sinfo = 1;
6181 	struct sctp_inpcb *inp;
6182 
6183 	inp = (struct sctp_inpcb *)so->so_pcb;
6184 	/* pickup the assoc we are reading from */
6185 	if (inp == NULL) {
6186 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6187 		return (EINVAL);
6188 	}
6189 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6190 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6191 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6192 	    (controlp == NULL)) {
6193 		/* user does not want the sndrcv ctl */
6194 		filling_sinfo = 0;
6195 	}
6196 	if (psa) {
6197 		from = (struct sockaddr *)sockbuf;
6198 		fromlen = sizeof(sockbuf);
6199 		from->sa_len = 0;
6200 	} else {
6201 		from = NULL;
6202 		fromlen = 0;
6203 	}
6204 
6205 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6206 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6207 	if ((controlp) && (filling_sinfo)) {
6208 		/* copy back the sinfo in a CMSG format */
6209 		if (filling_sinfo)
6210 			*controlp = sctp_build_ctl_nchunk(inp,
6211 			    (struct sctp_sndrcvinfo *)&sinfo);
6212 		else
6213 			*controlp = NULL;
6214 	}
6215 	if (psa) {
6216 		/* copy back the address info */
6217 		if (from && from->sa_len) {
6218 			*psa = sodupsockaddr(from, M_NOWAIT);
6219 		} else {
6220 			*psa = NULL;
6221 		}
6222 	}
6223 	return (error);
6224 }
6225 
6226 
6227 
6228 
6229 
6230 int
6231 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6232     int totaddr, int *error)
6233 {
6234 	int added = 0;
6235 	int i;
6236 	struct sctp_inpcb *inp;
6237 	struct sockaddr *sa;
6238 	size_t incr = 0;
6239 
6240 #ifdef INET
6241 	struct sockaddr_in *sin;
6242 
6243 #endif
6244 #ifdef INET6
6245 	struct sockaddr_in6 *sin6;
6246 
6247 #endif
6248 
6249 	sa = addr;
6250 	inp = stcb->sctp_ep;
6251 	*error = 0;
6252 	for (i = 0; i < totaddr; i++) {
6253 		switch (sa->sa_family) {
6254 #ifdef INET
6255 		case AF_INET:
6256 			incr = sizeof(struct sockaddr_in);
6257 			sin = (struct sockaddr_in *)sa;
6258 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6259 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6260 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6261 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6262 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6263 				*error = EINVAL;
6264 				goto out_now;
6265 			}
6266 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6267 				/* assoc gone no un-lock */
6268 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6269 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6270 				*error = ENOBUFS;
6271 				goto out_now;
6272 			}
6273 			added++;
6274 			break;
6275 #endif
6276 #ifdef INET6
6277 		case AF_INET6:
6278 			incr = sizeof(struct sockaddr_in6);
6279 			sin6 = (struct sockaddr_in6 *)sa;
6280 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6281 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6282 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6283 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6284 				*error = EINVAL;
6285 				goto out_now;
6286 			}
6287 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6288 				/* assoc gone no un-lock */
6289 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6290 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6291 				*error = ENOBUFS;
6292 				goto out_now;
6293 			}
6294 			added++;
6295 			break;
6296 #endif
6297 		default:
6298 			break;
6299 		}
6300 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6301 	}
6302 out_now:
6303 	return (added);
6304 }
6305 
6306 struct sctp_tcb *
6307 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6308     int *totaddr, int *num_v4, int *num_v6, int *error,
6309     int limit, int *bad_addr)
6310 {
6311 	struct sockaddr *sa;
6312 	struct sctp_tcb *stcb = NULL;
6313 	size_t incr, at, i;
6314 
6315 	at = incr = 0;
6316 	sa = addr;
6317 
6318 	*error = *num_v6 = *num_v4 = 0;
6319 	/* account and validate addresses */
6320 	for (i = 0; i < (size_t)*totaddr; i++) {
6321 		switch (sa->sa_family) {
6322 #ifdef INET
6323 		case AF_INET:
6324 			(*num_v4) += 1;
6325 			incr = sizeof(struct sockaddr_in);
6326 			if (sa->sa_len != incr) {
6327 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6328 				*error = EINVAL;
6329 				*bad_addr = 1;
6330 				return (NULL);
6331 			}
6332 			break;
6333 #endif
6334 #ifdef INET6
6335 		case AF_INET6:
6336 			{
6337 				struct sockaddr_in6 *sin6;
6338 
6339 				sin6 = (struct sockaddr_in6 *)sa;
6340 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6341 					/* Must be non-mapped for connectx */
6342 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6343 					*error = EINVAL;
6344 					*bad_addr = 1;
6345 					return (NULL);
6346 				}
6347 				(*num_v6) += 1;
6348 				incr = sizeof(struct sockaddr_in6);
6349 				if (sa->sa_len != incr) {
6350 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6351 					*error = EINVAL;
6352 					*bad_addr = 1;
6353 					return (NULL);
6354 				}
6355 				break;
6356 			}
6357 #endif
6358 		default:
6359 			*totaddr = i;
6360 			/* we are done */
6361 			break;
6362 		}
6363 		if (i == (size_t)*totaddr) {
6364 			break;
6365 		}
6366 		SCTP_INP_INCR_REF(inp);
6367 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6368 		if (stcb != NULL) {
6369 			/* Already have or am bring up an association */
6370 			return (stcb);
6371 		} else {
6372 			SCTP_INP_DECR_REF(inp);
6373 		}
6374 		if ((at + incr) > (size_t)limit) {
6375 			*totaddr = i;
6376 			break;
6377 		}
6378 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6379 	}
6380 	return ((struct sctp_tcb *)NULL);
6381 }
6382 
6383 /*
6384  * sctp_bindx(ADD) for one address.
6385  * assumes all arguments are valid/checked by caller.
6386  */
6387 void
6388 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6389     struct sockaddr *sa, sctp_assoc_t assoc_id,
6390     uint32_t vrf_id, int *error, void *p)
6391 {
6392 	struct sockaddr *addr_touse;
6393 
6394 #ifdef INET6
6395 	struct sockaddr_in sin;
6396 
6397 #endif
6398 
6399 	/* see if we're bound all already! */
6400 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6401 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6402 		*error = EINVAL;
6403 		return;
6404 	}
6405 	addr_touse = sa;
6406 #ifdef INET6
6407 	if (sa->sa_family == AF_INET6) {
6408 		struct sockaddr_in6 *sin6;
6409 
6410 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6411 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6412 			*error = EINVAL;
6413 			return;
6414 		}
6415 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6416 			/* can only bind v6 on PF_INET6 sockets */
6417 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6418 			*error = EINVAL;
6419 			return;
6420 		}
6421 		sin6 = (struct sockaddr_in6 *)addr_touse;
6422 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6423 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6424 			    SCTP_IPV6_V6ONLY(inp)) {
6425 				/* can't bind v4-mapped on PF_INET sockets */
6426 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6427 				*error = EINVAL;
6428 				return;
6429 			}
6430 			in6_sin6_2_sin(&sin, sin6);
6431 			addr_touse = (struct sockaddr *)&sin;
6432 		}
6433 	}
6434 #endif
6435 #ifdef INET
6436 	if (sa->sa_family == AF_INET) {
6437 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6438 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6439 			*error = EINVAL;
6440 			return;
6441 		}
6442 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6443 		    SCTP_IPV6_V6ONLY(inp)) {
6444 			/* can't bind v4 on PF_INET sockets */
6445 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6446 			*error = EINVAL;
6447 			return;
6448 		}
6449 	}
6450 #endif
6451 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6452 		if (p == NULL) {
6453 			/* Can't get proc for Net/Open BSD */
6454 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6455 			*error = EINVAL;
6456 			return;
6457 		}
6458 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6459 		return;
6460 	}
6461 	/*
6462 	 * No locks required here since bind and mgmt_ep_sa all do their own
6463 	 * locking. If we do something for the FIX: below we may need to
6464 	 * lock in that case.
6465 	 */
6466 	if (assoc_id == 0) {
6467 		/* add the address */
6468 		struct sctp_inpcb *lep;
6469 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6470 
6471 		/* validate the incoming port */
6472 		if ((lsin->sin_port != 0) &&
6473 		    (lsin->sin_port != inp->sctp_lport)) {
6474 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6475 			*error = EINVAL;
6476 			return;
6477 		} else {
6478 			/* user specified 0 port, set it to existing port */
6479 			lsin->sin_port = inp->sctp_lport;
6480 		}
6481 
6482 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6483 		if (lep != NULL) {
6484 			/*
6485 			 * We must decrement the refcount since we have the
6486 			 * ep already and are binding. No remove going on
6487 			 * here.
6488 			 */
6489 			SCTP_INP_DECR_REF(lep);
6490 		}
6491 		if (lep == inp) {
6492 			/* already bound to it.. ok */
6493 			return;
6494 		} else if (lep == NULL) {
6495 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6496 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6497 			    SCTP_ADD_IP_ADDRESS,
6498 			    vrf_id, NULL);
6499 		} else {
6500 			*error = EADDRINUSE;
6501 		}
6502 		if (*error)
6503 			return;
6504 	} else {
6505 		/*
6506 		 * FIX: decide whether we allow assoc based bindx
6507 		 */
6508 	}
6509 }
6510 
6511 /*
6512  * sctp_bindx(DELETE) for one address.
6513  * assumes all arguments are valid/checked by caller.
6514  */
6515 void
6516 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6517     struct sockaddr *sa, sctp_assoc_t assoc_id,
6518     uint32_t vrf_id, int *error)
6519 {
6520 	struct sockaddr *addr_touse;
6521 
6522 #ifdef INET6
6523 	struct sockaddr_in sin;
6524 
6525 #endif
6526 
6527 	/* see if we're bound all already! */
6528 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6529 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6530 		*error = EINVAL;
6531 		return;
6532 	}
6533 	addr_touse = sa;
6534 #ifdef INET6
6535 	if (sa->sa_family == AF_INET6) {
6536 		struct sockaddr_in6 *sin6;
6537 
6538 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6539 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6540 			*error = EINVAL;
6541 			return;
6542 		}
6543 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6544 			/* can only bind v6 on PF_INET6 sockets */
6545 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6546 			*error = EINVAL;
6547 			return;
6548 		}
6549 		sin6 = (struct sockaddr_in6 *)addr_touse;
6550 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6551 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6552 			    SCTP_IPV6_V6ONLY(inp)) {
6553 				/* can't bind mapped-v4 on PF_INET sockets */
6554 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6555 				*error = EINVAL;
6556 				return;
6557 			}
6558 			in6_sin6_2_sin(&sin, sin6);
6559 			addr_touse = (struct sockaddr *)&sin;
6560 		}
6561 	}
6562 #endif
6563 #ifdef INET
6564 	if (sa->sa_family == AF_INET) {
6565 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6566 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6567 			*error = EINVAL;
6568 			return;
6569 		}
6570 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6571 		    SCTP_IPV6_V6ONLY(inp)) {
6572 			/* can't bind v4 on PF_INET sockets */
6573 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6574 			*error = EINVAL;
6575 			return;
6576 		}
6577 	}
6578 #endif
6579 	/*
6580 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6581 	 * below is ever changed we may need to lock before calling
6582 	 * association level binding.
6583 	 */
6584 	if (assoc_id == 0) {
6585 		/* delete the address */
6586 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6587 		    SCTP_DEL_IP_ADDRESS,
6588 		    vrf_id, NULL);
6589 	} else {
6590 		/*
6591 		 * FIX: decide whether we allow assoc based bindx
6592 		 */
6593 	}
6594 }
6595 
6596 /*
6597  * returns the valid local address count for an assoc, taking into account
6598  * all scoping rules
6599  */
6600 int
6601 sctp_local_addr_count(struct sctp_tcb *stcb)
6602 {
6603 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6604 	int ipv4_addr_legal, ipv6_addr_legal;
6605 	struct sctp_vrf *vrf;
6606 	struct sctp_ifn *sctp_ifn;
6607 	struct sctp_ifa *sctp_ifa;
6608 	int count = 0;
6609 
6610 	/* Turn on all the appropriate scopes */
6611 	loopback_scope = stcb->asoc.loopback_scope;
6612 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6613 	local_scope = stcb->asoc.local_scope;
6614 	site_scope = stcb->asoc.site_scope;
6615 	ipv4_addr_legal = ipv6_addr_legal = 0;
6616 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6617 		ipv6_addr_legal = 1;
6618 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6619 			ipv4_addr_legal = 1;
6620 		}
6621 	} else {
6622 		ipv4_addr_legal = 1;
6623 	}
6624 
6625 	SCTP_IPI_ADDR_RLOCK();
6626 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6627 	if (vrf == NULL) {
6628 		/* no vrf, no addresses */
6629 		SCTP_IPI_ADDR_RUNLOCK();
6630 		return (0);
6631 	}
6632 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6633 		/*
6634 		 * bound all case: go through all ifns on the vrf
6635 		 */
6636 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6637 			if ((loopback_scope == 0) &&
6638 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6639 				continue;
6640 			}
6641 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6642 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6643 					continue;
6644 				switch (sctp_ifa->address.sa.sa_family) {
6645 #ifdef INET
6646 				case AF_INET:
6647 					if (ipv4_addr_legal) {
6648 						struct sockaddr_in *sin;
6649 
6650 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6651 						if (sin->sin_addr.s_addr == 0) {
6652 							/*
6653 							 * skip unspecified
6654 							 * addrs
6655 							 */
6656 							continue;
6657 						}
6658 						if ((ipv4_local_scope == 0) &&
6659 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6660 							continue;
6661 						}
6662 						/* count this one */
6663 						count++;
6664 					} else {
6665 						continue;
6666 					}
6667 					break;
6668 #endif
6669 #ifdef INET6
6670 				case AF_INET6:
6671 					if (ipv6_addr_legal) {
6672 						struct sockaddr_in6 *sin6;
6673 
6674 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6675 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6676 							continue;
6677 						}
6678 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6679 							if (local_scope == 0)
6680 								continue;
6681 							if (sin6->sin6_scope_id == 0) {
6682 								if (sa6_recoverscope(sin6) != 0)
6683 									/*
6684 									 *
6685 									 * bad
6686 									 *
6687 									 * li
6688 									 * nk
6689 									 *
6690 									 * loc
6691 									 * al
6692 									 *
6693 									 * add
6694 									 * re
6695 									 * ss
6696 									 * */
6697 									continue;
6698 							}
6699 						}
6700 						if ((site_scope == 0) &&
6701 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6702 							continue;
6703 						}
6704 						/* count this one */
6705 						count++;
6706 					}
6707 					break;
6708 #endif
6709 				default:
6710 					/* TSNH */
6711 					break;
6712 				}
6713 			}
6714 		}
6715 	} else {
6716 		/*
6717 		 * subset bound case
6718 		 */
6719 		struct sctp_laddr *laddr;
6720 
6721 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6722 		    sctp_nxt_addr) {
6723 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6724 				continue;
6725 			}
6726 			/* count this one */
6727 			count++;
6728 		}
6729 	}
6730 	SCTP_IPI_ADDR_RUNLOCK();
6731 	return (count);
6732 }
6733 
6734 #if defined(SCTP_LOCAL_TRACE_BUF)
6735 
6736 void
6737 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6738 {
6739 	uint32_t saveindex, newindex;
6740 
6741 	do {
6742 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6743 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6744 			newindex = 1;
6745 		} else {
6746 			newindex = saveindex + 1;
6747 		}
6748 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6749 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6750 		saveindex = 0;
6751 	}
6752 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6753 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6754 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6755 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6756 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6757 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6758 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6759 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6760 }
6761 
6762 #endif
6763 /* XXX: Remove the #ifdef after tunneling over IPv6 works also on FreeBSD. */
6764 #ifdef INET
6765 /* We will need to add support
6766  * to bind the ports and such here
6767  * so we can do UDP tunneling. In
6768  * the mean-time, we return error
6769  */
6770 #include <netinet/udp.h>
6771 #include <netinet/udp_var.h>
6772 #include <sys/proc.h>
6773 #ifdef INET6
6774 #include <netinet6/sctp6_var.h>
6775 #endif
6776 
6777 static void
6778 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6779 {
6780 	struct ip *iph;
6781 	struct mbuf *sp, *last;
6782 	struct udphdr *uhdr;
6783 	uint16_t port;
6784 
6785 	if ((m->m_flags & M_PKTHDR) == 0) {
6786 		/* Can't handle one that is not a pkt hdr */
6787 		goto out;
6788 	}
6789 	/* Pull the src port */
6790 	iph = mtod(m, struct ip *);
6791 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6792 	port = uhdr->uh_sport;
6793 	/*
6794 	 * Split out the mbuf chain. Leave the IP header in m, place the
6795 	 * rest in the sp.
6796 	 */
6797 	sp = m_split(m, off, M_DONTWAIT);
6798 	if (sp == NULL) {
6799 		/* Gak, drop packet, we can't do a split */
6800 		goto out;
6801 	}
6802 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6803 		/* Gak, packet can't have an SCTP header in it - too small */
6804 		m_freem(sp);
6805 		goto out;
6806 	}
6807 	/* Now pull up the UDP header and SCTP header together */
6808 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6809 	if (sp == NULL) {
6810 		/* Gak pullup failed */
6811 		goto out;
6812 	}
6813 	/* Trim out the UDP header */
6814 	m_adj(sp, sizeof(struct udphdr));
6815 
6816 	/* Now reconstruct the mbuf chain */
6817 	for (last = m; last->m_next; last = last->m_next);
6818 	last->m_next = sp;
6819 	m->m_pkthdr.len += sp->m_pkthdr.len;
6820 	iph = mtod(m, struct ip *);
6821 	switch (iph->ip_v) {
6822 #ifdef INET
6823 	case IPVERSION:
6824 		iph->ip_len -= sizeof(struct udphdr);
6825 		sctp_input_with_port(m, off, port);
6826 		break;
6827 #endif
6828 #ifdef INET6
6829 	case IPV6_VERSION >> 4:
6830 		/* Not yet supported. */
6831 		goto out;
6832 		break;
6833 
6834 #endif
6835 	default:
6836 		goto out;
6837 		break;
6838 	}
6839 	return;
6840 out:
6841 	m_freem(m);
6842 }
6843 
6844 void
6845 sctp_over_udp_stop(void)
6846 {
6847 	struct socket *sop;
6848 
6849 	/*
6850 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6851 	 * for writting!
6852 	 */
6853 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6854 		/* Nothing to do */
6855 		return;
6856 	}
6857 	sop = SCTP_BASE_INFO(udp_tun_socket);
6858 	soclose(sop);
6859 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6860 }
6861 
6862 int
6863 sctp_over_udp_start(void)
6864 {
6865 	uint16_t port;
6866 	int ret;
6867 	struct sockaddr_in sin;
6868 	struct socket *sop = NULL;
6869 	struct thread *th;
6870 	struct ucred *cred;
6871 
6872 	/*
6873 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6874 	 * for writting!
6875 	 */
6876 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6877 	if (port == 0) {
6878 		/* Must have a port set */
6879 		return (EINVAL);
6880 	}
6881 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6882 		/* Already running -- must stop first */
6883 		return (EALREADY);
6884 	}
6885 	th = curthread;
6886 	cred = th->td_ucred;
6887 	if ((ret = socreate(PF_INET, &sop,
6888 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6889 		return (ret);
6890 	}
6891 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6892 	/* call the special UDP hook */
6893 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6894 	if (ret) {
6895 		goto exit_stage_left;
6896 	}
6897 	/* Ok we have a socket, bind it to the port */
6898 	memset(&sin, 0, sizeof(sin));
6899 	sin.sin_len = sizeof(sin);
6900 	sin.sin_family = AF_INET;
6901 	sin.sin_port = htons(port);
6902 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6903 	if (ret) {
6904 		/* Close up we cant get the port */
6905 exit_stage_left:
6906 		sctp_over_udp_stop();
6907 		return (ret);
6908 	}
6909 	/*
6910 	 * Ok we should now get UDP packets directly to our input routine
6911 	 * sctp_recv_upd_tunneled_packet().
6912 	 */
6913 	return (0);
6914 }
6915 
6916 #endif
6917