xref: /freebsd/sys/netinet/sctputil.c (revision c243e4902be8df1e643c76b5f18b68bb77cc5268)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_bsd_addr.h>
51 
52 
53 #ifndef KTR_SCTP
54 #define KTR_SCTP KTR_SUBSYS
55 #endif
56 
57 extern struct sctp_cc_functions sctp_cc_functions[];
58 extern struct sctp_ss_functions sctp_ss_functions[];
59 
60 void
61 sctp_sblog(struct sockbuf *sb,
62     struct sctp_tcb *stcb, int from, int incr)
63 {
64 	struct sctp_cwnd_log sctp_clog;
65 
66 	sctp_clog.x.sb.stcb = stcb;
67 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
68 	if (stcb)
69 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
70 	else
71 		sctp_clog.x.sb.stcb_sbcc = 0;
72 	sctp_clog.x.sb.incr = incr;
73 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
74 	    SCTP_LOG_EVENT_SB,
75 	    from,
76 	    sctp_clog.x.misc.log1,
77 	    sctp_clog.x.misc.log2,
78 	    sctp_clog.x.misc.log3,
79 	    sctp_clog.x.misc.log4);
80 }
81 
82 void
83 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
84 {
85 	struct sctp_cwnd_log sctp_clog;
86 
87 	sctp_clog.x.close.inp = (void *)inp;
88 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
89 	if (stcb) {
90 		sctp_clog.x.close.stcb = (void *)stcb;
91 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
92 	} else {
93 		sctp_clog.x.close.stcb = 0;
94 		sctp_clog.x.close.state = 0;
95 	}
96 	sctp_clog.x.close.loc = loc;
97 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
98 	    SCTP_LOG_EVENT_CLOSE,
99 	    0,
100 	    sctp_clog.x.misc.log1,
101 	    sctp_clog.x.misc.log2,
102 	    sctp_clog.x.misc.log3,
103 	    sctp_clog.x.misc.log4);
104 }
105 
106 
107 void
108 rto_logging(struct sctp_nets *net, int from)
109 {
110 	struct sctp_cwnd_log sctp_clog;
111 
112 	memset(&sctp_clog, 0, sizeof(sctp_clog));
113 	sctp_clog.x.rto.net = (void *)net;
114 	sctp_clog.x.rto.rtt = net->rtt / 1000;
115 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 	    SCTP_LOG_EVENT_RTT,
117 	    from,
118 	    sctp_clog.x.misc.log1,
119 	    sctp_clog.x.misc.log2,
120 	    sctp_clog.x.misc.log3,
121 	    sctp_clog.x.misc.log4);
122 }
123 
124 void
125 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
126 {
127 	struct sctp_cwnd_log sctp_clog;
128 
129 	sctp_clog.x.strlog.stcb = stcb;
130 	sctp_clog.x.strlog.n_tsn = tsn;
131 	sctp_clog.x.strlog.n_sseq = sseq;
132 	sctp_clog.x.strlog.e_tsn = 0;
133 	sctp_clog.x.strlog.e_sseq = 0;
134 	sctp_clog.x.strlog.strm = stream;
135 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
136 	    SCTP_LOG_EVENT_STRM,
137 	    from,
138 	    sctp_clog.x.misc.log1,
139 	    sctp_clog.x.misc.log2,
140 	    sctp_clog.x.misc.log3,
141 	    sctp_clog.x.misc.log4);
142 }
143 
144 void
145 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
146 {
147 	struct sctp_cwnd_log sctp_clog;
148 
149 	sctp_clog.x.nagle.stcb = (void *)stcb;
150 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
151 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
152 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
153 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
154 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
155 	    SCTP_LOG_EVENT_NAGLE,
156 	    action,
157 	    sctp_clog.x.misc.log1,
158 	    sctp_clog.x.misc.log2,
159 	    sctp_clog.x.misc.log3,
160 	    sctp_clog.x.misc.log4);
161 }
162 
163 void
164 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
165 {
166 	struct sctp_cwnd_log sctp_clog;
167 
168 	sctp_clog.x.sack.cumack = cumack;
169 	sctp_clog.x.sack.oldcumack = old_cumack;
170 	sctp_clog.x.sack.tsn = tsn;
171 	sctp_clog.x.sack.numGaps = gaps;
172 	sctp_clog.x.sack.numDups = dups;
173 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
174 	    SCTP_LOG_EVENT_SACK,
175 	    from,
176 	    sctp_clog.x.misc.log1,
177 	    sctp_clog.x.misc.log2,
178 	    sctp_clog.x.misc.log3,
179 	    sctp_clog.x.misc.log4);
180 }
181 
182 void
183 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
184 {
185 	struct sctp_cwnd_log sctp_clog;
186 
187 	memset(&sctp_clog, 0, sizeof(sctp_clog));
188 	sctp_clog.x.map.base = map;
189 	sctp_clog.x.map.cum = cum;
190 	sctp_clog.x.map.high = high;
191 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
192 	    SCTP_LOG_EVENT_MAP,
193 	    from,
194 	    sctp_clog.x.misc.log1,
195 	    sctp_clog.x.misc.log2,
196 	    sctp_clog.x.misc.log3,
197 	    sctp_clog.x.misc.log4);
198 }
199 
200 void
201 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
202     int from)
203 {
204 	struct sctp_cwnd_log sctp_clog;
205 
206 	memset(&sctp_clog, 0, sizeof(sctp_clog));
207 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
208 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
209 	sctp_clog.x.fr.tsn = tsn;
210 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
211 	    SCTP_LOG_EVENT_FR,
212 	    from,
213 	    sctp_clog.x.misc.log1,
214 	    sctp_clog.x.misc.log2,
215 	    sctp_clog.x.misc.log3,
216 	    sctp_clog.x.misc.log4);
217 }
218 
219 void
220 sctp_log_mb(struct mbuf *m, int from)
221 {
222 	struct sctp_cwnd_log sctp_clog;
223 
224 	sctp_clog.x.mb.mp = m;
225 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
226 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
227 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
228 	if (SCTP_BUF_IS_EXTENDED(m)) {
229 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
230 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
231 	} else {
232 		sctp_clog.x.mb.ext = 0;
233 		sctp_clog.x.mb.refcnt = 0;
234 	}
235 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
236 	    SCTP_LOG_EVENT_MBUF,
237 	    from,
238 	    sctp_clog.x.misc.log1,
239 	    sctp_clog.x.misc.log2,
240 	    sctp_clog.x.misc.log3,
241 	    sctp_clog.x.misc.log4);
242 }
243 
244 void
245 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
246     int from)
247 {
248 	struct sctp_cwnd_log sctp_clog;
249 
250 	if (control == NULL) {
251 		SCTP_PRINTF("Gak log of NULL?\n");
252 		return;
253 	}
254 	sctp_clog.x.strlog.stcb = control->stcb;
255 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
256 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
257 	sctp_clog.x.strlog.strm = control->sinfo_stream;
258 	if (poschk != NULL) {
259 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
260 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
261 	} else {
262 		sctp_clog.x.strlog.e_tsn = 0;
263 		sctp_clog.x.strlog.e_sseq = 0;
264 	}
265 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
266 	    SCTP_LOG_EVENT_STRM,
267 	    from,
268 	    sctp_clog.x.misc.log1,
269 	    sctp_clog.x.misc.log2,
270 	    sctp_clog.x.misc.log3,
271 	    sctp_clog.x.misc.log4);
272 }
273 
274 void
275 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
276 {
277 	struct sctp_cwnd_log sctp_clog;
278 
279 	sctp_clog.x.cwnd.net = net;
280 	if (stcb->asoc.send_queue_cnt > 255)
281 		sctp_clog.x.cwnd.cnt_in_send = 255;
282 	else
283 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
284 	if (stcb->asoc.stream_queue_cnt > 255)
285 		sctp_clog.x.cwnd.cnt_in_str = 255;
286 	else
287 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
288 
289 	if (net) {
290 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
291 		sctp_clog.x.cwnd.inflight = net->flight_size;
292 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
293 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
294 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
295 	}
296 	if (SCTP_CWNDLOG_PRESEND == from) {
297 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
298 	}
299 	sctp_clog.x.cwnd.cwnd_augment = augment;
300 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
301 	    SCTP_LOG_EVENT_CWND,
302 	    from,
303 	    sctp_clog.x.misc.log1,
304 	    sctp_clog.x.misc.log2,
305 	    sctp_clog.x.misc.log3,
306 	    sctp_clog.x.misc.log4);
307 }
308 
309 void
310 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
311 {
312 	struct sctp_cwnd_log sctp_clog;
313 
314 	memset(&sctp_clog, 0, sizeof(sctp_clog));
315 	if (inp) {
316 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
317 
318 	} else {
319 		sctp_clog.x.lock.sock = (void *)NULL;
320 	}
321 	sctp_clog.x.lock.inp = (void *)inp;
322 	if (stcb) {
323 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
324 	} else {
325 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
326 	}
327 	if (inp) {
328 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
329 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
330 	} else {
331 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
332 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
333 	}
334 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
335 	if (inp && (inp->sctp_socket)) {
336 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
337 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
338 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
339 	} else {
340 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
341 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
342 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
343 	}
344 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
345 	    SCTP_LOG_LOCK_EVENT,
346 	    from,
347 	    sctp_clog.x.misc.log1,
348 	    sctp_clog.x.misc.log2,
349 	    sctp_clog.x.misc.log3,
350 	    sctp_clog.x.misc.log4);
351 }
352 
353 void
354 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
355 {
356 	struct sctp_cwnd_log sctp_clog;
357 
358 	memset(&sctp_clog, 0, sizeof(sctp_clog));
359 	sctp_clog.x.cwnd.net = net;
360 	sctp_clog.x.cwnd.cwnd_new_value = error;
361 	sctp_clog.x.cwnd.inflight = net->flight_size;
362 	sctp_clog.x.cwnd.cwnd_augment = burst;
363 	if (stcb->asoc.send_queue_cnt > 255)
364 		sctp_clog.x.cwnd.cnt_in_send = 255;
365 	else
366 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
367 	if (stcb->asoc.stream_queue_cnt > 255)
368 		sctp_clog.x.cwnd.cnt_in_str = 255;
369 	else
370 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
371 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
372 	    SCTP_LOG_EVENT_MAXBURST,
373 	    from,
374 	    sctp_clog.x.misc.log1,
375 	    sctp_clog.x.misc.log2,
376 	    sctp_clog.x.misc.log3,
377 	    sctp_clog.x.misc.log4);
378 }
379 
380 void
381 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
382 {
383 	struct sctp_cwnd_log sctp_clog;
384 
385 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
386 	sctp_clog.x.rwnd.send_size = snd_size;
387 	sctp_clog.x.rwnd.overhead = overhead;
388 	sctp_clog.x.rwnd.new_rwnd = 0;
389 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
390 	    SCTP_LOG_EVENT_RWND,
391 	    from,
392 	    sctp_clog.x.misc.log1,
393 	    sctp_clog.x.misc.log2,
394 	    sctp_clog.x.misc.log3,
395 	    sctp_clog.x.misc.log4);
396 }
397 
398 void
399 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
400 {
401 	struct sctp_cwnd_log sctp_clog;
402 
403 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
404 	sctp_clog.x.rwnd.send_size = flight_size;
405 	sctp_clog.x.rwnd.overhead = overhead;
406 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
407 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
408 	    SCTP_LOG_EVENT_RWND,
409 	    from,
410 	    sctp_clog.x.misc.log1,
411 	    sctp_clog.x.misc.log2,
412 	    sctp_clog.x.misc.log3,
413 	    sctp_clog.x.misc.log4);
414 }
415 
416 void
417 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
418 {
419 	struct sctp_cwnd_log sctp_clog;
420 
421 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
422 	sctp_clog.x.mbcnt.size_change = book;
423 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
424 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
425 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
426 	    SCTP_LOG_EVENT_MBCNT,
427 	    from,
428 	    sctp_clog.x.misc.log1,
429 	    sctp_clog.x.misc.log2,
430 	    sctp_clog.x.misc.log3,
431 	    sctp_clog.x.misc.log4);
432 }
433 
434 void
435 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
436 {
437 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
438 	    SCTP_LOG_MISC_EVENT,
439 	    from,
440 	    a, b, c, d);
441 }
442 
443 void
444 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from)
445 {
446 	struct sctp_cwnd_log sctp_clog;
447 
448 	sctp_clog.x.wake.stcb = (void *)stcb;
449 	sctp_clog.x.wake.wake_cnt = wake_cnt;
450 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
451 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
452 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
453 
454 	if (stcb->asoc.stream_queue_cnt < 0xff)
455 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
456 	else
457 		sctp_clog.x.wake.stream_qcnt = 0xff;
458 
459 	if (stcb->asoc.chunks_on_out_queue < 0xff)
460 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
461 	else
462 		sctp_clog.x.wake.chunks_on_oque = 0xff;
463 
464 	sctp_clog.x.wake.sctpflags = 0;
465 	/* set in the defered mode stuff */
466 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
467 		sctp_clog.x.wake.sctpflags |= 1;
468 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
469 		sctp_clog.x.wake.sctpflags |= 2;
470 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
471 		sctp_clog.x.wake.sctpflags |= 4;
472 	/* what about the sb */
473 	if (stcb->sctp_socket) {
474 		struct socket *so = stcb->sctp_socket;
475 
476 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
477 	} else {
478 		sctp_clog.x.wake.sbflags = 0xff;
479 	}
480 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
481 	    SCTP_LOG_EVENT_WAKE,
482 	    from,
483 	    sctp_clog.x.misc.log1,
484 	    sctp_clog.x.misc.log2,
485 	    sctp_clog.x.misc.log3,
486 	    sctp_clog.x.misc.log4);
487 }
488 
489 void
490 sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen)
491 {
492 	struct sctp_cwnd_log sctp_clog;
493 
494 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
495 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
496 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
497 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
498 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
499 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
500 	sctp_clog.x.blk.sndlen = sendlen;
501 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
502 	    SCTP_LOG_EVENT_BLOCK,
503 	    from,
504 	    sctp_clog.x.misc.log1,
505 	    sctp_clog.x.misc.log2,
506 	    sctp_clog.x.misc.log3,
507 	    sctp_clog.x.misc.log4);
508 }
509 
510 int
511 sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED)
512 {
513 	/* May need to fix this if ktrdump does not work */
514 	return (0);
515 }
516 
517 #ifdef SCTP_AUDITING_ENABLED
518 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
519 static int sctp_audit_indx = 0;
520 
521 static
522 void
523 sctp_print_audit_report(void)
524 {
525 	int i;
526 	int cnt;
527 
528 	cnt = 0;
529 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
530 		if ((sctp_audit_data[i][0] == 0xe0) &&
531 		    (sctp_audit_data[i][1] == 0x01)) {
532 			cnt = 0;
533 			SCTP_PRINTF("\n");
534 		} else if (sctp_audit_data[i][0] == 0xf0) {
535 			cnt = 0;
536 			SCTP_PRINTF("\n");
537 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
538 		    (sctp_audit_data[i][1] == 0x01)) {
539 			SCTP_PRINTF("\n");
540 			cnt = 0;
541 		}
542 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
543 		    (uint32_t) sctp_audit_data[i][1]);
544 		cnt++;
545 		if ((cnt % 14) == 0)
546 			SCTP_PRINTF("\n");
547 	}
548 	for (i = 0; i < sctp_audit_indx; i++) {
549 		if ((sctp_audit_data[i][0] == 0xe0) &&
550 		    (sctp_audit_data[i][1] == 0x01)) {
551 			cnt = 0;
552 			SCTP_PRINTF("\n");
553 		} else if (sctp_audit_data[i][0] == 0xf0) {
554 			cnt = 0;
555 			SCTP_PRINTF("\n");
556 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
557 		    (sctp_audit_data[i][1] == 0x01)) {
558 			SCTP_PRINTF("\n");
559 			cnt = 0;
560 		}
561 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
562 		    (uint32_t) sctp_audit_data[i][1]);
563 		cnt++;
564 		if ((cnt % 14) == 0)
565 			SCTP_PRINTF("\n");
566 	}
567 	SCTP_PRINTF("\n");
568 }
569 
570 void
571 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
572     struct sctp_nets *net)
573 {
574 	int resend_cnt, tot_out, rep, tot_book_cnt;
575 	struct sctp_nets *lnet;
576 	struct sctp_tmit_chunk *chk;
577 
578 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
579 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
580 	sctp_audit_indx++;
581 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
582 		sctp_audit_indx = 0;
583 	}
584 	if (inp == NULL) {
585 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
586 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
587 		sctp_audit_indx++;
588 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
589 			sctp_audit_indx = 0;
590 		}
591 		return;
592 	}
593 	if (stcb == NULL) {
594 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
595 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
596 		sctp_audit_indx++;
597 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
598 			sctp_audit_indx = 0;
599 		}
600 		return;
601 	}
602 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
603 	sctp_audit_data[sctp_audit_indx][1] =
604 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
605 	sctp_audit_indx++;
606 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
607 		sctp_audit_indx = 0;
608 	}
609 	rep = 0;
610 	tot_book_cnt = 0;
611 	resend_cnt = tot_out = 0;
612 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
613 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
614 			resend_cnt++;
615 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
616 			tot_out += chk->book_size;
617 			tot_book_cnt++;
618 		}
619 	}
620 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
621 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
622 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
623 		sctp_audit_indx++;
624 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
625 			sctp_audit_indx = 0;
626 		}
627 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
628 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
629 		rep = 1;
630 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
631 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
632 		sctp_audit_data[sctp_audit_indx][1] =
633 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
634 		sctp_audit_indx++;
635 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
636 			sctp_audit_indx = 0;
637 		}
638 	}
639 	if (tot_out != stcb->asoc.total_flight) {
640 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
641 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
642 		sctp_audit_indx++;
643 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
644 			sctp_audit_indx = 0;
645 		}
646 		rep = 1;
647 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
648 		    (int)stcb->asoc.total_flight);
649 		stcb->asoc.total_flight = tot_out;
650 	}
651 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
652 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
653 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
654 		sctp_audit_indx++;
655 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 			sctp_audit_indx = 0;
657 		}
658 		rep = 1;
659 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
660 
661 		stcb->asoc.total_flight_count = tot_book_cnt;
662 	}
663 	tot_out = 0;
664 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
665 		tot_out += lnet->flight_size;
666 	}
667 	if (tot_out != stcb->asoc.total_flight) {
668 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
669 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
670 		sctp_audit_indx++;
671 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
672 			sctp_audit_indx = 0;
673 		}
674 		rep = 1;
675 		SCTP_PRINTF("real flight:%d net total was %d\n",
676 		    stcb->asoc.total_flight, tot_out);
677 		/* now corrective action */
678 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
679 
680 			tot_out = 0;
681 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
682 				if ((chk->whoTo == lnet) &&
683 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
684 					tot_out += chk->book_size;
685 				}
686 			}
687 			if (lnet->flight_size != tot_out) {
688 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
689 				    lnet, lnet->flight_size,
690 				    tot_out);
691 				lnet->flight_size = tot_out;
692 			}
693 		}
694 	}
695 	if (rep) {
696 		sctp_print_audit_report();
697 	}
698 }
699 
700 void
701 sctp_audit_log(uint8_t ev, uint8_t fd)
702 {
703 
704 	sctp_audit_data[sctp_audit_indx][0] = ev;
705 	sctp_audit_data[sctp_audit_indx][1] = fd;
706 	sctp_audit_indx++;
707 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
708 		sctp_audit_indx = 0;
709 	}
710 }
711 
712 #endif
713 
714 /*
715  * sctp_stop_timers_for_shutdown() should be called
716  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
717  * state to make sure that all timers are stopped.
718  */
719 void
720 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
721 {
722 	struct sctp_association *asoc;
723 	struct sctp_nets *net;
724 
725 	asoc = &stcb->asoc;
726 
727 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
728 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
729 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
730 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
731 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
732 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
733 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
734 		(void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer);
735 	}
736 }
737 
738 /*
739  * a list of sizes based on typical mtu's, used only if next hop size not
740  * returned.
741  */
742 static uint32_t sctp_mtu_sizes[] = {
743 	68,
744 	296,
745 	508,
746 	512,
747 	544,
748 	576,
749 	1006,
750 	1492,
751 	1500,
752 	1536,
753 	2002,
754 	2048,
755 	4352,
756 	4464,
757 	8166,
758 	17914,
759 	32000,
760 	65535
761 };
762 
763 /*
764  * Return the largest MTU smaller than val. If there is no
765  * entry, just return val.
766  */
767 uint32_t
768 sctp_get_prev_mtu(uint32_t val)
769 {
770 	uint32_t i;
771 
772 	if (val <= sctp_mtu_sizes[0]) {
773 		return (val);
774 	}
775 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
776 		if (val <= sctp_mtu_sizes[i]) {
777 			break;
778 		}
779 	}
780 	return (sctp_mtu_sizes[i - 1]);
781 }
782 
783 /*
784  * Return the smallest MTU larger than val. If there is no
785  * entry, just return val.
786  */
787 uint32_t
788 sctp_get_next_mtu(uint32_t val)
789 {
790 	/* select another MTU that is just bigger than this one */
791 	uint32_t i;
792 
793 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
794 		if (val < sctp_mtu_sizes[i]) {
795 			return (sctp_mtu_sizes[i]);
796 		}
797 	}
798 	return (val);
799 }
800 
801 void
802 sctp_fill_random_store(struct sctp_pcb *m)
803 {
804 	/*
805 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
806 	 * our counter. The result becomes our good random numbers and we
807 	 * then setup to give these out. Note that we do no locking to
808 	 * protect this. This is ok, since if competing folks call this we
809 	 * will get more gobbled gook in the random store which is what we
810 	 * want. There is a danger that two guys will use the same random
811 	 * numbers, but thats ok too since that is random as well :->
812 	 */
813 	m->store_at = 0;
814 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
815 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
816 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
817 	m->random_counter++;
818 }
819 
820 uint32_t
821 sctp_select_initial_TSN(struct sctp_pcb *inp)
822 {
823 	/*
824 	 * A true implementation should use random selection process to get
825 	 * the initial stream sequence number, using RFC1750 as a good
826 	 * guideline
827 	 */
828 	uint32_t x, *xp;
829 	uint8_t *p;
830 	int store_at, new_store;
831 
832 	if (inp->initial_sequence_debug != 0) {
833 		uint32_t ret;
834 
835 		ret = inp->initial_sequence_debug;
836 		inp->initial_sequence_debug++;
837 		return (ret);
838 	}
839 retry:
840 	store_at = inp->store_at;
841 	new_store = store_at + sizeof(uint32_t);
842 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
843 		new_store = 0;
844 	}
845 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
846 		goto retry;
847 	}
848 	if (new_store == 0) {
849 		/* Refill the random store */
850 		sctp_fill_random_store(inp);
851 	}
852 	p = &inp->random_store[store_at];
853 	xp = (uint32_t *) p;
854 	x = *xp;
855 	return (x);
856 }
857 
858 uint32_t
859 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check)
860 {
861 	uint32_t x;
862 	struct timeval now;
863 
864 	if (check) {
865 		(void)SCTP_GETTIME_TIMEVAL(&now);
866 	}
867 	for (;;) {
868 		x = sctp_select_initial_TSN(&inp->sctp_ep);
869 		if (x == 0) {
870 			/* we never use 0 */
871 			continue;
872 		}
873 		if (!check || sctp_is_vtag_good(x, lport, rport, &now)) {
874 			break;
875 		}
876 	}
877 	return (x);
878 }
879 
880 int
881 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
882     uint32_t override_tag, uint32_t vrf_id)
883 {
884 	struct sctp_association *asoc;
885 
886 	/*
887 	 * Anything set to zero is taken care of by the allocation routine's
888 	 * bzero
889 	 */
890 
891 	/*
892 	 * Up front select what scoping to apply on addresses I tell my peer
893 	 * Not sure what to do with these right now, we will need to come up
894 	 * with a way to set them. We may need to pass them through from the
895 	 * caller in the sctp_aloc_assoc() function.
896 	 */
897 	int i;
898 
899 	asoc = &stcb->asoc;
900 	/* init all variables to a known value. */
901 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
902 	asoc->max_burst = m->sctp_ep.max_burst;
903 	asoc->fr_max_burst = m->sctp_ep.fr_max_burst;
904 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
905 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
906 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
907 	asoc->ecn_allowed = m->sctp_ecn_enable;
908 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
909 	asoc->sctp_cmt_pf = (uint8_t) 0;
910 	asoc->sctp_frag_point = m->sctp_frag_point;
911 	asoc->sctp_features = m->sctp_features;
912 	asoc->default_dscp = m->sctp_ep.default_dscp;
913 #ifdef INET6
914 	if (m->sctp_ep.default_flowlabel) {
915 		asoc->default_flowlabel = m->sctp_ep.default_flowlabel;
916 	} else {
917 		if (m->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) {
918 			asoc->default_flowlabel = sctp_select_initial_TSN(&m->sctp_ep);
919 			asoc->default_flowlabel &= 0x000fffff;
920 			asoc->default_flowlabel |= 0x80000000;
921 		} else {
922 			asoc->default_flowlabel = 0;
923 		}
924 	}
925 #endif
926 	asoc->sb_send_resv = 0;
927 	if (override_tag) {
928 		asoc->my_vtag = override_tag;
929 	} else {
930 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
931 	}
932 	/* Get the nonce tags */
933 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
934 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
935 	asoc->vrf_id = vrf_id;
936 
937 #ifdef SCTP_ASOCLOG_OF_TSNS
938 	asoc->tsn_in_at = 0;
939 	asoc->tsn_out_at = 0;
940 	asoc->tsn_in_wrapped = 0;
941 	asoc->tsn_out_wrapped = 0;
942 	asoc->cumack_log_at = 0;
943 	asoc->cumack_log_atsnt = 0;
944 #endif
945 #ifdef SCTP_FS_SPEC_LOG
946 	asoc->fs_index = 0;
947 #endif
948 	asoc->refcnt = 0;
949 	asoc->assoc_up_sent = 0;
950 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
951 	    sctp_select_initial_TSN(&m->sctp_ep);
952 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
953 	/* we are optimisitic here */
954 	asoc->peer_supports_pktdrop = 1;
955 	asoc->peer_supports_nat = 0;
956 	asoc->sent_queue_retran_cnt = 0;
957 
958 	/* for CMT */
959 	asoc->last_net_cmt_send_started = NULL;
960 
961 	/* This will need to be adjusted */
962 	asoc->last_acked_seq = asoc->init_seq_number - 1;
963 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
964 	asoc->asconf_seq_in = asoc->last_acked_seq;
965 
966 	/* here we are different, we hold the next one we expect */
967 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
968 
969 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
970 	asoc->initial_rto = m->sctp_ep.initial_rto;
971 
972 	asoc->max_init_times = m->sctp_ep.max_init_times;
973 	asoc->max_send_times = m->sctp_ep.max_send_times;
974 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
975 	asoc->def_net_pf_threshold = m->sctp_ep.def_net_pf_threshold;
976 	asoc->free_chunk_cnt = 0;
977 
978 	asoc->iam_blocking = 0;
979 	asoc->context = m->sctp_context;
980 	asoc->local_strreset_support = m->local_strreset_support;
981 	asoc->def_send = m->def_send;
982 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
983 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
984 	asoc->pr_sctp_cnt = 0;
985 	asoc->total_output_queue_size = 0;
986 
987 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
988 		struct in6pcb *inp6;
989 
990 		/* Its a V6 socket */
991 		inp6 = (struct in6pcb *)m;
992 		asoc->ipv6_addr_legal = 1;
993 		/* Now look at the binding flag to see if V4 will be legal */
994 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
995 			asoc->ipv4_addr_legal = 1;
996 		} else {
997 			/* V4 addresses are NOT legal on the association */
998 			asoc->ipv4_addr_legal = 0;
999 		}
1000 	} else {
1001 		/* Its a V4 socket, no - V6 */
1002 		asoc->ipv4_addr_legal = 1;
1003 		asoc->ipv6_addr_legal = 0;
1004 	}
1005 
1006 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1007 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1008 
1009 	asoc->smallest_mtu = m->sctp_frag_point;
1010 	asoc->minrto = m->sctp_ep.sctp_minrto;
1011 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1012 
1013 	asoc->locked_on_sending = NULL;
1014 	asoc->stream_locked_on = 0;
1015 	asoc->ecn_echo_cnt_onq = 0;
1016 	asoc->stream_locked = 0;
1017 
1018 	asoc->send_sack = 1;
1019 
1020 	LIST_INIT(&asoc->sctp_restricted_addrs);
1021 
1022 	TAILQ_INIT(&asoc->nets);
1023 	TAILQ_INIT(&asoc->pending_reply_queue);
1024 	TAILQ_INIT(&asoc->asconf_ack_sent);
1025 	/* Setup to fill the hb random cache at first HB */
1026 	asoc->hb_random_idx = 4;
1027 
1028 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1029 
1030 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1031 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1032 
1033 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1034 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1035 
1036 	/*
1037 	 * Now the stream parameters, here we allocate space for all streams
1038 	 * that we request by default.
1039 	 */
1040 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1041 	    m->sctp_ep.pre_open_stream_count;
1042 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1043 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1044 	    SCTP_M_STRMO);
1045 	if (asoc->strmout == NULL) {
1046 		/* big trouble no memory */
1047 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1048 		return (ENOMEM);
1049 	}
1050 	for (i = 0; i < asoc->streamoutcnt; i++) {
1051 		/*
1052 		 * inbound side must be set to 0xffff, also NOTE when we get
1053 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1054 		 * count (streamoutcnt) but first check if we sent to any of
1055 		 * the upper streams that were dropped (if some were). Those
1056 		 * that were dropped must be notified to the upper layer as
1057 		 * failed to send.
1058 		 */
1059 		asoc->strmout[i].next_sequence_sent = 0x0;
1060 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1061 		asoc->strmout[i].stream_no = i;
1062 		asoc->strmout[i].last_msg_incomplete = 0;
1063 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL);
1064 	}
1065 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1066 
1067 	/* Now the mapping array */
1068 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1069 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1070 	    SCTP_M_MAP);
1071 	if (asoc->mapping_array == NULL) {
1072 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1073 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1074 		return (ENOMEM);
1075 	}
1076 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1077 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1078 	    SCTP_M_MAP);
1079 	if (asoc->nr_mapping_array == NULL) {
1080 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1081 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1082 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1083 		return (ENOMEM);
1084 	}
1085 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1086 
1087 	/* Now the init of the other outqueues */
1088 	TAILQ_INIT(&asoc->free_chunks);
1089 	TAILQ_INIT(&asoc->control_send_queue);
1090 	TAILQ_INIT(&asoc->asconf_send_queue);
1091 	TAILQ_INIT(&asoc->send_queue);
1092 	TAILQ_INIT(&asoc->sent_queue);
1093 	TAILQ_INIT(&asoc->reasmqueue);
1094 	TAILQ_INIT(&asoc->resetHead);
1095 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1096 	TAILQ_INIT(&asoc->asconf_queue);
1097 	/* authentication fields */
1098 	asoc->authinfo.random = NULL;
1099 	asoc->authinfo.active_keyid = 0;
1100 	asoc->authinfo.assoc_key = NULL;
1101 	asoc->authinfo.assoc_keyid = 0;
1102 	asoc->authinfo.recv_key = NULL;
1103 	asoc->authinfo.recv_keyid = 0;
1104 	LIST_INIT(&asoc->shared_keys);
1105 	asoc->marked_retrans = 0;
1106 	asoc->port = m->sctp_ep.port;
1107 	asoc->timoinit = 0;
1108 	asoc->timodata = 0;
1109 	asoc->timosack = 0;
1110 	asoc->timoshutdown = 0;
1111 	asoc->timoheartbeat = 0;
1112 	asoc->timocookie = 0;
1113 	asoc->timoshutdownack = 0;
1114 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1115 	asoc->discontinuity_time = asoc->start_time;
1116 	/*
1117 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1118 	 * freed later when the association is freed.
1119 	 */
1120 	return (0);
1121 }
1122 
1123 void
1124 sctp_print_mapping_array(struct sctp_association *asoc)
1125 {
1126 	unsigned int i, limit;
1127 
1128 	SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1129 	    asoc->mapping_array_size,
1130 	    asoc->mapping_array_base_tsn,
1131 	    asoc->cumulative_tsn,
1132 	    asoc->highest_tsn_inside_map,
1133 	    asoc->highest_tsn_inside_nr_map);
1134 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1135 		if (asoc->mapping_array[limit - 1] != 0) {
1136 			break;
1137 		}
1138 	}
1139 	SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1140 	for (i = 0; i < limit; i++) {
1141 		SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1142 	}
1143 	if (limit % 16)
1144 		SCTP_PRINTF("\n");
1145 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1146 		if (asoc->nr_mapping_array[limit - 1]) {
1147 			break;
1148 		}
1149 	}
1150 	SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1151 	for (i = 0; i < limit; i++) {
1152 		SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1153 	}
1154 	if (limit % 16)
1155 		SCTP_PRINTF("\n");
1156 }
1157 
1158 int
1159 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1160 {
1161 	/* mapping array needs to grow */
1162 	uint8_t *new_array1, *new_array2;
1163 	uint32_t new_size;
1164 
1165 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1166 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1167 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1168 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1169 		/* can't get more, forget it */
1170 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1171 		if (new_array1) {
1172 			SCTP_FREE(new_array1, SCTP_M_MAP);
1173 		}
1174 		if (new_array2) {
1175 			SCTP_FREE(new_array2, SCTP_M_MAP);
1176 		}
1177 		return (-1);
1178 	}
1179 	memset(new_array1, 0, new_size);
1180 	memset(new_array2, 0, new_size);
1181 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1182 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1183 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1184 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1185 	asoc->mapping_array = new_array1;
1186 	asoc->nr_mapping_array = new_array2;
1187 	asoc->mapping_array_size = new_size;
1188 	return (0);
1189 }
1190 
1191 
1192 static void
1193 sctp_iterator_work(struct sctp_iterator *it)
1194 {
1195 	int iteration_count = 0;
1196 	int inp_skip = 0;
1197 	int first_in = 1;
1198 	struct sctp_inpcb *tinp;
1199 
1200 	SCTP_INP_INFO_RLOCK();
1201 	SCTP_ITERATOR_LOCK();
1202 	if (it->inp) {
1203 		SCTP_INP_RLOCK(it->inp);
1204 		SCTP_INP_DECR_REF(it->inp);
1205 	}
1206 	if (it->inp == NULL) {
1207 		/* iterator is complete */
1208 done_with_iterator:
1209 		SCTP_ITERATOR_UNLOCK();
1210 		SCTP_INP_INFO_RUNLOCK();
1211 		if (it->function_atend != NULL) {
1212 			(*it->function_atend) (it->pointer, it->val);
1213 		}
1214 		SCTP_FREE(it, SCTP_M_ITER);
1215 		return;
1216 	}
1217 select_a_new_ep:
1218 	if (first_in) {
1219 		first_in = 0;
1220 	} else {
1221 		SCTP_INP_RLOCK(it->inp);
1222 	}
1223 	while (((it->pcb_flags) &&
1224 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1225 	    ((it->pcb_features) &&
1226 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1227 		/* endpoint flags or features don't match, so keep looking */
1228 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1229 			SCTP_INP_RUNLOCK(it->inp);
1230 			goto done_with_iterator;
1231 		}
1232 		tinp = it->inp;
1233 		it->inp = LIST_NEXT(it->inp, sctp_list);
1234 		SCTP_INP_RUNLOCK(tinp);
1235 		if (it->inp == NULL) {
1236 			goto done_with_iterator;
1237 		}
1238 		SCTP_INP_RLOCK(it->inp);
1239 	}
1240 	/* now go through each assoc which is in the desired state */
1241 	if (it->done_current_ep == 0) {
1242 		if (it->function_inp != NULL)
1243 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1244 		it->done_current_ep = 1;
1245 	}
1246 	if (it->stcb == NULL) {
1247 		/* run the per instance function */
1248 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1249 	}
1250 	if ((inp_skip) || it->stcb == NULL) {
1251 		if (it->function_inp_end != NULL) {
1252 			inp_skip = (*it->function_inp_end) (it->inp,
1253 			    it->pointer,
1254 			    it->val);
1255 		}
1256 		SCTP_INP_RUNLOCK(it->inp);
1257 		goto no_stcb;
1258 	}
1259 	while (it->stcb) {
1260 		SCTP_TCB_LOCK(it->stcb);
1261 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1262 			/* not in the right state... keep looking */
1263 			SCTP_TCB_UNLOCK(it->stcb);
1264 			goto next_assoc;
1265 		}
1266 		/* see if we have limited out the iterator loop */
1267 		iteration_count++;
1268 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1269 			/* Pause to let others grab the lock */
1270 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1271 			SCTP_TCB_UNLOCK(it->stcb);
1272 			SCTP_INP_INCR_REF(it->inp);
1273 			SCTP_INP_RUNLOCK(it->inp);
1274 			SCTP_ITERATOR_UNLOCK();
1275 			SCTP_INP_INFO_RUNLOCK();
1276 			SCTP_INP_INFO_RLOCK();
1277 			SCTP_ITERATOR_LOCK();
1278 			if (sctp_it_ctl.iterator_flags) {
1279 				/* We won't be staying here */
1280 				SCTP_INP_DECR_REF(it->inp);
1281 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1282 				if (sctp_it_ctl.iterator_flags &
1283 				    SCTP_ITERATOR_STOP_CUR_IT) {
1284 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1285 					goto done_with_iterator;
1286 				}
1287 				if (sctp_it_ctl.iterator_flags &
1288 				    SCTP_ITERATOR_STOP_CUR_INP) {
1289 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1290 					goto no_stcb;
1291 				}
1292 				/* If we reach here huh? */
1293 				SCTP_PRINTF("Unknown it ctl flag %x\n",
1294 				    sctp_it_ctl.iterator_flags);
1295 				sctp_it_ctl.iterator_flags = 0;
1296 			}
1297 			SCTP_INP_RLOCK(it->inp);
1298 			SCTP_INP_DECR_REF(it->inp);
1299 			SCTP_TCB_LOCK(it->stcb);
1300 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1301 			iteration_count = 0;
1302 		}
1303 		/* run function on this one */
1304 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1305 
1306 		/*
1307 		 * we lie here, it really needs to have its own type but
1308 		 * first I must verify that this won't effect things :-0
1309 		 */
1310 		if (it->no_chunk_output == 0)
1311 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1312 
1313 		SCTP_TCB_UNLOCK(it->stcb);
1314 next_assoc:
1315 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1316 		if (it->stcb == NULL) {
1317 			/* Run last function */
1318 			if (it->function_inp_end != NULL) {
1319 				inp_skip = (*it->function_inp_end) (it->inp,
1320 				    it->pointer,
1321 				    it->val);
1322 			}
1323 		}
1324 	}
1325 	SCTP_INP_RUNLOCK(it->inp);
1326 no_stcb:
1327 	/* done with all assocs on this endpoint, move on to next endpoint */
1328 	it->done_current_ep = 0;
1329 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1330 		it->inp = NULL;
1331 	} else {
1332 		it->inp = LIST_NEXT(it->inp, sctp_list);
1333 	}
1334 	if (it->inp == NULL) {
1335 		goto done_with_iterator;
1336 	}
1337 	goto select_a_new_ep;
1338 }
1339 
1340 void
1341 sctp_iterator_worker(void)
1342 {
1343 	struct sctp_iterator *it, *nit;
1344 
1345 	/* This function is called with the WQ lock in place */
1346 
1347 	sctp_it_ctl.iterator_running = 1;
1348 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1349 		sctp_it_ctl.cur_it = it;
1350 		/* now lets work on this one */
1351 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1352 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1353 		CURVNET_SET(it->vn);
1354 		sctp_iterator_work(it);
1355 		sctp_it_ctl.cur_it = NULL;
1356 		CURVNET_RESTORE();
1357 		SCTP_IPI_ITERATOR_WQ_LOCK();
1358 		/* sa_ignore FREED_MEMORY */
1359 	}
1360 	sctp_it_ctl.iterator_running = 0;
1361 	return;
1362 }
1363 
1364 
1365 static void
1366 sctp_handle_addr_wq(void)
1367 {
1368 	/* deal with the ADDR wq from the rtsock calls */
1369 	struct sctp_laddr *wi, *nwi;
1370 	struct sctp_asconf_iterator *asc;
1371 
1372 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1373 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1374 	if (asc == NULL) {
1375 		/* Try later, no memory */
1376 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1377 		    (struct sctp_inpcb *)NULL,
1378 		    (struct sctp_tcb *)NULL,
1379 		    (struct sctp_nets *)NULL);
1380 		return;
1381 	}
1382 	LIST_INIT(&asc->list_of_work);
1383 	asc->cnt = 0;
1384 
1385 	SCTP_WQ_ADDR_LOCK();
1386 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1387 		LIST_REMOVE(wi, sctp_nxt_addr);
1388 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1389 		asc->cnt++;
1390 	}
1391 	SCTP_WQ_ADDR_UNLOCK();
1392 
1393 	if (asc->cnt == 0) {
1394 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1395 	} else {
1396 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1397 		    sctp_asconf_iterator_stcb,
1398 		    NULL,	/* No ep end for boundall */
1399 		    SCTP_PCB_FLAGS_BOUNDALL,
1400 		    SCTP_PCB_ANY_FEATURES,
1401 		    SCTP_ASOC_ANY_STATE,
1402 		    (void *)asc, 0,
1403 		    sctp_asconf_iterator_end, NULL, 0);
1404 	}
1405 }
1406 
1407 void
1408 sctp_timeout_handler(void *t)
1409 {
1410 	struct sctp_inpcb *inp;
1411 	struct sctp_tcb *stcb;
1412 	struct sctp_nets *net;
1413 	struct sctp_timer *tmr;
1414 
1415 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1416 	struct socket *so;
1417 
1418 #endif
1419 	int did_output, type;
1420 
1421 	tmr = (struct sctp_timer *)t;
1422 	inp = (struct sctp_inpcb *)tmr->ep;
1423 	stcb = (struct sctp_tcb *)tmr->tcb;
1424 	net = (struct sctp_nets *)tmr->net;
1425 	CURVNET_SET((struct vnet *)tmr->vnet);
1426 	did_output = 1;
1427 
1428 #ifdef SCTP_AUDITING_ENABLED
1429 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1430 	sctp_auditing(3, inp, stcb, net);
1431 #endif
1432 
1433 	/* sanity checks... */
1434 	if (tmr->self != (void *)tmr) {
1435 		/*
1436 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1437 		 * tmr);
1438 		 */
1439 		CURVNET_RESTORE();
1440 		return;
1441 	}
1442 	tmr->stopped_from = 0xa001;
1443 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1444 		/*
1445 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1446 		 * tmr->type);
1447 		 */
1448 		CURVNET_RESTORE();
1449 		return;
1450 	}
1451 	tmr->stopped_from = 0xa002;
1452 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1453 		CURVNET_RESTORE();
1454 		return;
1455 	}
1456 	/* if this is an iterator timeout, get the struct and clear inp */
1457 	tmr->stopped_from = 0xa003;
1458 	type = tmr->type;
1459 	if (inp) {
1460 		SCTP_INP_INCR_REF(inp);
1461 		if ((inp->sctp_socket == NULL) &&
1462 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1463 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1464 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1465 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1466 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1467 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1468 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1469 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1470 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1471 		    ) {
1472 			SCTP_INP_DECR_REF(inp);
1473 			CURVNET_RESTORE();
1474 			return;
1475 		}
1476 	}
1477 	tmr->stopped_from = 0xa004;
1478 	if (stcb) {
1479 		atomic_add_int(&stcb->asoc.refcnt, 1);
1480 		if (stcb->asoc.state == 0) {
1481 			atomic_add_int(&stcb->asoc.refcnt, -1);
1482 			if (inp) {
1483 				SCTP_INP_DECR_REF(inp);
1484 			}
1485 			CURVNET_RESTORE();
1486 			return;
1487 		}
1488 	}
1489 	tmr->stopped_from = 0xa005;
1490 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1491 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1492 		if (inp) {
1493 			SCTP_INP_DECR_REF(inp);
1494 		}
1495 		if (stcb) {
1496 			atomic_add_int(&stcb->asoc.refcnt, -1);
1497 		}
1498 		CURVNET_RESTORE();
1499 		return;
1500 	}
1501 	tmr->stopped_from = 0xa006;
1502 
1503 	if (stcb) {
1504 		SCTP_TCB_LOCK(stcb);
1505 		atomic_add_int(&stcb->asoc.refcnt, -1);
1506 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1507 		    ((stcb->asoc.state == 0) ||
1508 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1509 			SCTP_TCB_UNLOCK(stcb);
1510 			if (inp) {
1511 				SCTP_INP_DECR_REF(inp);
1512 			}
1513 			CURVNET_RESTORE();
1514 			return;
1515 		}
1516 	}
1517 	/* record in stopped what t-o occured */
1518 	tmr->stopped_from = tmr->type;
1519 
1520 	/* mark as being serviced now */
1521 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1522 		/*
1523 		 * Callout has been rescheduled.
1524 		 */
1525 		goto get_out;
1526 	}
1527 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1528 		/*
1529 		 * Not active, so no action.
1530 		 */
1531 		goto get_out;
1532 	}
1533 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1534 
1535 	/* call the handler for the appropriate timer type */
1536 	switch (tmr->type) {
1537 	case SCTP_TIMER_TYPE_ZERO_COPY:
1538 		if (inp == NULL) {
1539 			break;
1540 		}
1541 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1542 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1543 		}
1544 		break;
1545 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1546 		if (inp == NULL) {
1547 			break;
1548 		}
1549 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1550 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1551 		}
1552 		break;
1553 	case SCTP_TIMER_TYPE_ADDR_WQ:
1554 		sctp_handle_addr_wq();
1555 		break;
1556 	case SCTP_TIMER_TYPE_SEND:
1557 		if ((stcb == NULL) || (inp == NULL)) {
1558 			break;
1559 		}
1560 		SCTP_STAT_INCR(sctps_timodata);
1561 		stcb->asoc.timodata++;
1562 		stcb->asoc.num_send_timers_up--;
1563 		if (stcb->asoc.num_send_timers_up < 0) {
1564 			stcb->asoc.num_send_timers_up = 0;
1565 		}
1566 		SCTP_TCB_LOCK_ASSERT(stcb);
1567 		if (sctp_t3rxt_timer(inp, stcb, net)) {
1568 			/* no need to unlock on tcb its gone */
1569 
1570 			goto out_decr;
1571 		}
1572 		SCTP_TCB_LOCK_ASSERT(stcb);
1573 #ifdef SCTP_AUDITING_ENABLED
1574 		sctp_auditing(4, inp, stcb, net);
1575 #endif
1576 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1577 		if ((stcb->asoc.num_send_timers_up == 0) &&
1578 		    (stcb->asoc.sent_queue_cnt > 0)) {
1579 			struct sctp_tmit_chunk *chk;
1580 
1581 			/*
1582 			 * safeguard. If there on some on the sent queue
1583 			 * somewhere but no timers running something is
1584 			 * wrong... so we start a timer on the first chunk
1585 			 * on the send queue on whatever net it is sent to.
1586 			 */
1587 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1588 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1589 			    chk->whoTo);
1590 		}
1591 		break;
1592 	case SCTP_TIMER_TYPE_INIT:
1593 		if ((stcb == NULL) || (inp == NULL)) {
1594 			break;
1595 		}
1596 		SCTP_STAT_INCR(sctps_timoinit);
1597 		stcb->asoc.timoinit++;
1598 		if (sctp_t1init_timer(inp, stcb, net)) {
1599 			/* no need to unlock on tcb its gone */
1600 			goto out_decr;
1601 		}
1602 		/* We do output but not here */
1603 		did_output = 0;
1604 		break;
1605 	case SCTP_TIMER_TYPE_RECV:
1606 		if ((stcb == NULL) || (inp == NULL)) {
1607 			break;
1608 		}
1609 		SCTP_STAT_INCR(sctps_timosack);
1610 		stcb->asoc.timosack++;
1611 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
1612 #ifdef SCTP_AUDITING_ENABLED
1613 		sctp_auditing(4, inp, stcb, net);
1614 #endif
1615 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1616 		break;
1617 	case SCTP_TIMER_TYPE_SHUTDOWN:
1618 		if ((stcb == NULL) || (inp == NULL)) {
1619 			break;
1620 		}
1621 		if (sctp_shutdown_timer(inp, stcb, net)) {
1622 			/* no need to unlock on tcb its gone */
1623 			goto out_decr;
1624 		}
1625 		SCTP_STAT_INCR(sctps_timoshutdown);
1626 		stcb->asoc.timoshutdown++;
1627 #ifdef SCTP_AUDITING_ENABLED
1628 		sctp_auditing(4, inp, stcb, net);
1629 #endif
1630 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1631 		break;
1632 	case SCTP_TIMER_TYPE_HEARTBEAT:
1633 		if ((stcb == NULL) || (inp == NULL) || (net == NULL)) {
1634 			break;
1635 		}
1636 		SCTP_STAT_INCR(sctps_timoheartbeat);
1637 		stcb->asoc.timoheartbeat++;
1638 		if (sctp_heartbeat_timer(inp, stcb, net)) {
1639 			/* no need to unlock on tcb its gone */
1640 			goto out_decr;
1641 		}
1642 #ifdef SCTP_AUDITING_ENABLED
1643 		sctp_auditing(4, inp, stcb, net);
1644 #endif
1645 		if (!(net->dest_state & SCTP_ADDR_NOHB)) {
1646 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
1647 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1648 		}
1649 		break;
1650 	case SCTP_TIMER_TYPE_COOKIE:
1651 		if ((stcb == NULL) || (inp == NULL)) {
1652 			break;
1653 		}
1654 		if (sctp_cookie_timer(inp, stcb, net)) {
1655 			/* no need to unlock on tcb its gone */
1656 			goto out_decr;
1657 		}
1658 		SCTP_STAT_INCR(sctps_timocookie);
1659 		stcb->asoc.timocookie++;
1660 #ifdef SCTP_AUDITING_ENABLED
1661 		sctp_auditing(4, inp, stcb, net);
1662 #endif
1663 		/*
1664 		 * We consider T3 and Cookie timer pretty much the same with
1665 		 * respect to where from in chunk_output.
1666 		 */
1667 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1668 		break;
1669 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1670 		{
1671 			struct timeval tv;
1672 			int i, secret;
1673 
1674 			if (inp == NULL) {
1675 				break;
1676 			}
1677 			SCTP_STAT_INCR(sctps_timosecret);
1678 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1679 			SCTP_INP_WLOCK(inp);
1680 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1681 			inp->sctp_ep.last_secret_number =
1682 			    inp->sctp_ep.current_secret_number;
1683 			inp->sctp_ep.current_secret_number++;
1684 			if (inp->sctp_ep.current_secret_number >=
1685 			    SCTP_HOW_MANY_SECRETS) {
1686 				inp->sctp_ep.current_secret_number = 0;
1687 			}
1688 			secret = (int)inp->sctp_ep.current_secret_number;
1689 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1690 				inp->sctp_ep.secret_key[secret][i] =
1691 				    sctp_select_initial_TSN(&inp->sctp_ep);
1692 			}
1693 			SCTP_INP_WUNLOCK(inp);
1694 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1695 		}
1696 		did_output = 0;
1697 		break;
1698 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1699 		if ((stcb == NULL) || (inp == NULL)) {
1700 			break;
1701 		}
1702 		SCTP_STAT_INCR(sctps_timopathmtu);
1703 		sctp_pathmtu_timer(inp, stcb, net);
1704 		did_output = 0;
1705 		break;
1706 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1707 		if ((stcb == NULL) || (inp == NULL)) {
1708 			break;
1709 		}
1710 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1711 			/* no need to unlock on tcb its gone */
1712 			goto out_decr;
1713 		}
1714 		SCTP_STAT_INCR(sctps_timoshutdownack);
1715 		stcb->asoc.timoshutdownack++;
1716 #ifdef SCTP_AUDITING_ENABLED
1717 		sctp_auditing(4, inp, stcb, net);
1718 #endif
1719 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1720 		break;
1721 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1722 		if ((stcb == NULL) || (inp == NULL)) {
1723 			break;
1724 		}
1725 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1726 		sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED);
1727 		/* no need to unlock on tcb its gone */
1728 		goto out_decr;
1729 
1730 	case SCTP_TIMER_TYPE_STRRESET:
1731 		if ((stcb == NULL) || (inp == NULL)) {
1732 			break;
1733 		}
1734 		if (sctp_strreset_timer(inp, stcb, net)) {
1735 			/* no need to unlock on tcb its gone */
1736 			goto out_decr;
1737 		}
1738 		SCTP_STAT_INCR(sctps_timostrmrst);
1739 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1740 		break;
1741 	case SCTP_TIMER_TYPE_ASCONF:
1742 		if ((stcb == NULL) || (inp == NULL)) {
1743 			break;
1744 		}
1745 		if (sctp_asconf_timer(inp, stcb, net)) {
1746 			/* no need to unlock on tcb its gone */
1747 			goto out_decr;
1748 		}
1749 		SCTP_STAT_INCR(sctps_timoasconf);
1750 #ifdef SCTP_AUDITING_ENABLED
1751 		sctp_auditing(4, inp, stcb, net);
1752 #endif
1753 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1754 		break;
1755 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1756 		if ((stcb == NULL) || (inp == NULL)) {
1757 			break;
1758 		}
1759 		sctp_delete_prim_timer(inp, stcb, net);
1760 		SCTP_STAT_INCR(sctps_timodelprim);
1761 		break;
1762 
1763 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1764 		if ((stcb == NULL) || (inp == NULL)) {
1765 			break;
1766 		}
1767 		SCTP_STAT_INCR(sctps_timoautoclose);
1768 		sctp_autoclose_timer(inp, stcb, net);
1769 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1770 		did_output = 0;
1771 		break;
1772 	case SCTP_TIMER_TYPE_ASOCKILL:
1773 		if ((stcb == NULL) || (inp == NULL)) {
1774 			break;
1775 		}
1776 		SCTP_STAT_INCR(sctps_timoassockill);
1777 		/* Can we free it yet? */
1778 		SCTP_INP_DECR_REF(inp);
1779 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1780 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1781 		so = SCTP_INP_SO(inp);
1782 		atomic_add_int(&stcb->asoc.refcnt, 1);
1783 		SCTP_TCB_UNLOCK(stcb);
1784 		SCTP_SOCKET_LOCK(so, 1);
1785 		SCTP_TCB_LOCK(stcb);
1786 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1787 #endif
1788 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1789 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1790 		SCTP_SOCKET_UNLOCK(so, 1);
1791 #endif
1792 		/*
1793 		 * free asoc, always unlocks (or destroy's) so prevent
1794 		 * duplicate unlock or unlock of a free mtx :-0
1795 		 */
1796 		stcb = NULL;
1797 		goto out_no_decr;
1798 	case SCTP_TIMER_TYPE_INPKILL:
1799 		SCTP_STAT_INCR(sctps_timoinpkill);
1800 		if (inp == NULL) {
1801 			break;
1802 		}
1803 		/*
1804 		 * special case, take away our increment since WE are the
1805 		 * killer
1806 		 */
1807 		SCTP_INP_DECR_REF(inp);
1808 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1809 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1810 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1811 		inp = NULL;
1812 		goto out_no_decr;
1813 	default:
1814 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1815 		    tmr->type);
1816 		break;
1817 	}
1818 #ifdef SCTP_AUDITING_ENABLED
1819 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1820 	if (inp)
1821 		sctp_auditing(5, inp, stcb, net);
1822 #endif
1823 	if ((did_output) && stcb) {
1824 		/*
1825 		 * Now we need to clean up the control chunk chain if an
1826 		 * ECNE is on it. It must be marked as UNSENT again so next
1827 		 * call will continue to send it until such time that we get
1828 		 * a CWR, to remove it. It is, however, less likely that we
1829 		 * will find a ecn echo on the chain though.
1830 		 */
1831 		sctp_fix_ecn_echo(&stcb->asoc);
1832 	}
1833 get_out:
1834 	if (stcb) {
1835 		SCTP_TCB_UNLOCK(stcb);
1836 	}
1837 out_decr:
1838 	if (inp) {
1839 		SCTP_INP_DECR_REF(inp);
1840 	}
1841 out_no_decr:
1842 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1843 	    type);
1844 	CURVNET_RESTORE();
1845 }
1846 
1847 void
1848 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1849     struct sctp_nets *net)
1850 {
1851 	uint32_t to_ticks;
1852 	struct sctp_timer *tmr;
1853 
1854 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1855 		return;
1856 
1857 	tmr = NULL;
1858 	if (stcb) {
1859 		SCTP_TCB_LOCK_ASSERT(stcb);
1860 	}
1861 	switch (t_type) {
1862 	case SCTP_TIMER_TYPE_ZERO_COPY:
1863 		tmr = &inp->sctp_ep.zero_copy_timer;
1864 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1865 		break;
1866 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1867 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1868 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1869 		break;
1870 	case SCTP_TIMER_TYPE_ADDR_WQ:
1871 		/* Only 1 tick away :-) */
1872 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1873 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1874 		break;
1875 	case SCTP_TIMER_TYPE_SEND:
1876 		/* Here we use the RTO timer */
1877 		{
1878 			int rto_val;
1879 
1880 			if ((stcb == NULL) || (net == NULL)) {
1881 				return;
1882 			}
1883 			tmr = &net->rxt_timer;
1884 			if (net->RTO == 0) {
1885 				rto_val = stcb->asoc.initial_rto;
1886 			} else {
1887 				rto_val = net->RTO;
1888 			}
1889 			to_ticks = MSEC_TO_TICKS(rto_val);
1890 		}
1891 		break;
1892 	case SCTP_TIMER_TYPE_INIT:
1893 		/*
1894 		 * Here we use the INIT timer default usually about 1
1895 		 * minute.
1896 		 */
1897 		if ((stcb == NULL) || (net == NULL)) {
1898 			return;
1899 		}
1900 		tmr = &net->rxt_timer;
1901 		if (net->RTO == 0) {
1902 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1903 		} else {
1904 			to_ticks = MSEC_TO_TICKS(net->RTO);
1905 		}
1906 		break;
1907 	case SCTP_TIMER_TYPE_RECV:
1908 		/*
1909 		 * Here we use the Delayed-Ack timer value from the inp
1910 		 * ususually about 200ms.
1911 		 */
1912 		if (stcb == NULL) {
1913 			return;
1914 		}
1915 		tmr = &stcb->asoc.dack_timer;
1916 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1917 		break;
1918 	case SCTP_TIMER_TYPE_SHUTDOWN:
1919 		/* Here we use the RTO of the destination. */
1920 		if ((stcb == NULL) || (net == NULL)) {
1921 			return;
1922 		}
1923 		if (net->RTO == 0) {
1924 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1925 		} else {
1926 			to_ticks = MSEC_TO_TICKS(net->RTO);
1927 		}
1928 		tmr = &net->rxt_timer;
1929 		break;
1930 	case SCTP_TIMER_TYPE_HEARTBEAT:
1931 		/*
1932 		 * the net is used here so that we can add in the RTO. Even
1933 		 * though we use a different timer. We also add the HB timer
1934 		 * PLUS a random jitter.
1935 		 */
1936 		if ((inp == NULL) || (stcb == NULL) || (net == NULL)) {
1937 			return;
1938 		} else {
1939 			uint32_t rndval;
1940 			uint32_t jitter;
1941 
1942 			if ((net->dest_state & SCTP_ADDR_NOHB) &&
1943 			    !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) {
1944 				return;
1945 			}
1946 			if (net->RTO == 0) {
1947 				to_ticks = stcb->asoc.initial_rto;
1948 			} else {
1949 				to_ticks = net->RTO;
1950 			}
1951 			rndval = sctp_select_initial_TSN(&inp->sctp_ep);
1952 			jitter = rndval % to_ticks;
1953 			if (jitter >= (to_ticks >> 1)) {
1954 				to_ticks = to_ticks + (jitter - (to_ticks >> 1));
1955 			} else {
1956 				to_ticks = to_ticks - jitter;
1957 			}
1958 			if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1959 			    !(net->dest_state & SCTP_ADDR_PF)) {
1960 				to_ticks += net->heart_beat_delay;
1961 			}
1962 			/*
1963 			 * Now we must convert the to_ticks that are now in
1964 			 * ms to ticks.
1965 			 */
1966 			to_ticks = MSEC_TO_TICKS(to_ticks);
1967 			tmr = &net->hb_timer;
1968 		}
1969 		break;
1970 	case SCTP_TIMER_TYPE_COOKIE:
1971 		/*
1972 		 * Here we can use the RTO timer from the network since one
1973 		 * RTT was compelete. If a retran happened then we will be
1974 		 * using the RTO initial value.
1975 		 */
1976 		if ((stcb == NULL) || (net == NULL)) {
1977 			return;
1978 		}
1979 		if (net->RTO == 0) {
1980 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1981 		} else {
1982 			to_ticks = MSEC_TO_TICKS(net->RTO);
1983 		}
1984 		tmr = &net->rxt_timer;
1985 		break;
1986 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1987 		/*
1988 		 * nothing needed but the endpoint here ususually about 60
1989 		 * minutes.
1990 		 */
1991 		if (inp == NULL) {
1992 			return;
1993 		}
1994 		tmr = &inp->sctp_ep.signature_change;
1995 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
1996 		break;
1997 	case SCTP_TIMER_TYPE_ASOCKILL:
1998 		if (stcb == NULL) {
1999 			return;
2000 		}
2001 		tmr = &stcb->asoc.strreset_timer;
2002 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2003 		break;
2004 	case SCTP_TIMER_TYPE_INPKILL:
2005 		/*
2006 		 * The inp is setup to die. We re-use the signature_chage
2007 		 * timer since that has stopped and we are in the GONE
2008 		 * state.
2009 		 */
2010 		if (inp == NULL) {
2011 			return;
2012 		}
2013 		tmr = &inp->sctp_ep.signature_change;
2014 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2015 		break;
2016 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2017 		/*
2018 		 * Here we use the value found in the EP for PMTU ususually
2019 		 * about 10 minutes.
2020 		 */
2021 		if ((stcb == NULL) || (inp == NULL)) {
2022 			return;
2023 		}
2024 		if (net == NULL) {
2025 			return;
2026 		}
2027 		if (net->dest_state & SCTP_ADDR_NO_PMTUD) {
2028 			return;
2029 		}
2030 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2031 		tmr = &net->pmtu_timer;
2032 		break;
2033 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2034 		/* Here we use the RTO of the destination */
2035 		if ((stcb == NULL) || (net == NULL)) {
2036 			return;
2037 		}
2038 		if (net->RTO == 0) {
2039 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2040 		} else {
2041 			to_ticks = MSEC_TO_TICKS(net->RTO);
2042 		}
2043 		tmr = &net->rxt_timer;
2044 		break;
2045 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2046 		/*
2047 		 * Here we use the endpoints shutdown guard timer usually
2048 		 * about 3 minutes.
2049 		 */
2050 		if ((inp == NULL) || (stcb == NULL)) {
2051 			return;
2052 		}
2053 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2054 		tmr = &stcb->asoc.shut_guard_timer;
2055 		break;
2056 	case SCTP_TIMER_TYPE_STRRESET:
2057 		/*
2058 		 * Here the timer comes from the stcb but its value is from
2059 		 * the net's RTO.
2060 		 */
2061 		if ((stcb == NULL) || (net == NULL)) {
2062 			return;
2063 		}
2064 		if (net->RTO == 0) {
2065 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2066 		} else {
2067 			to_ticks = MSEC_TO_TICKS(net->RTO);
2068 		}
2069 		tmr = &stcb->asoc.strreset_timer;
2070 		break;
2071 	case SCTP_TIMER_TYPE_ASCONF:
2072 		/*
2073 		 * Here the timer comes from the stcb but its value is from
2074 		 * the net's RTO.
2075 		 */
2076 		if ((stcb == NULL) || (net == NULL)) {
2077 			return;
2078 		}
2079 		if (net->RTO == 0) {
2080 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2081 		} else {
2082 			to_ticks = MSEC_TO_TICKS(net->RTO);
2083 		}
2084 		tmr = &stcb->asoc.asconf_timer;
2085 		break;
2086 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2087 		if ((stcb == NULL) || (net != NULL)) {
2088 			return;
2089 		}
2090 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2091 		tmr = &stcb->asoc.delete_prim_timer;
2092 		break;
2093 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2094 		if (stcb == NULL) {
2095 			return;
2096 		}
2097 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2098 			/*
2099 			 * Really an error since stcb is NOT set to
2100 			 * autoclose
2101 			 */
2102 			return;
2103 		}
2104 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2105 		tmr = &stcb->asoc.autoclose_timer;
2106 		break;
2107 	default:
2108 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2109 		    __FUNCTION__, t_type);
2110 		return;
2111 		break;
2112 	}
2113 	if ((to_ticks <= 0) || (tmr == NULL)) {
2114 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2115 		    __FUNCTION__, t_type, to_ticks, tmr);
2116 		return;
2117 	}
2118 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2119 		/*
2120 		 * we do NOT allow you to have it already running. if it is
2121 		 * we leave the current one up unchanged
2122 		 */
2123 		return;
2124 	}
2125 	/* At this point we can proceed */
2126 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2127 		stcb->asoc.num_send_timers_up++;
2128 	}
2129 	tmr->stopped_from = 0;
2130 	tmr->type = t_type;
2131 	tmr->ep = (void *)inp;
2132 	tmr->tcb = (void *)stcb;
2133 	tmr->net = (void *)net;
2134 	tmr->self = (void *)tmr;
2135 	tmr->vnet = (void *)curvnet;
2136 	tmr->ticks = sctp_get_tick_count();
2137 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2138 	return;
2139 }
2140 
2141 void
2142 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2143     struct sctp_nets *net, uint32_t from)
2144 {
2145 	struct sctp_timer *tmr;
2146 
2147 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2148 	    (inp == NULL))
2149 		return;
2150 
2151 	tmr = NULL;
2152 	if (stcb) {
2153 		SCTP_TCB_LOCK_ASSERT(stcb);
2154 	}
2155 	switch (t_type) {
2156 	case SCTP_TIMER_TYPE_ZERO_COPY:
2157 		tmr = &inp->sctp_ep.zero_copy_timer;
2158 		break;
2159 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2160 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2161 		break;
2162 	case SCTP_TIMER_TYPE_ADDR_WQ:
2163 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2164 		break;
2165 	case SCTP_TIMER_TYPE_SEND:
2166 		if ((stcb == NULL) || (net == NULL)) {
2167 			return;
2168 		}
2169 		tmr = &net->rxt_timer;
2170 		break;
2171 	case SCTP_TIMER_TYPE_INIT:
2172 		if ((stcb == NULL) || (net == NULL)) {
2173 			return;
2174 		}
2175 		tmr = &net->rxt_timer;
2176 		break;
2177 	case SCTP_TIMER_TYPE_RECV:
2178 		if (stcb == NULL) {
2179 			return;
2180 		}
2181 		tmr = &stcb->asoc.dack_timer;
2182 		break;
2183 	case SCTP_TIMER_TYPE_SHUTDOWN:
2184 		if ((stcb == NULL) || (net == NULL)) {
2185 			return;
2186 		}
2187 		tmr = &net->rxt_timer;
2188 		break;
2189 	case SCTP_TIMER_TYPE_HEARTBEAT:
2190 		if ((stcb == NULL) || (net == NULL)) {
2191 			return;
2192 		}
2193 		tmr = &net->hb_timer;
2194 		break;
2195 	case SCTP_TIMER_TYPE_COOKIE:
2196 		if ((stcb == NULL) || (net == NULL)) {
2197 			return;
2198 		}
2199 		tmr = &net->rxt_timer;
2200 		break;
2201 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2202 		/* nothing needed but the endpoint here */
2203 		tmr = &inp->sctp_ep.signature_change;
2204 		/*
2205 		 * We re-use the newcookie timer for the INP kill timer. We
2206 		 * must assure that we do not kill it by accident.
2207 		 */
2208 		break;
2209 	case SCTP_TIMER_TYPE_ASOCKILL:
2210 		/*
2211 		 * Stop the asoc kill timer.
2212 		 */
2213 		if (stcb == NULL) {
2214 			return;
2215 		}
2216 		tmr = &stcb->asoc.strreset_timer;
2217 		break;
2218 
2219 	case SCTP_TIMER_TYPE_INPKILL:
2220 		/*
2221 		 * The inp is setup to die. We re-use the signature_chage
2222 		 * timer since that has stopped and we are in the GONE
2223 		 * state.
2224 		 */
2225 		tmr = &inp->sctp_ep.signature_change;
2226 		break;
2227 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2228 		if ((stcb == NULL) || (net == NULL)) {
2229 			return;
2230 		}
2231 		tmr = &net->pmtu_timer;
2232 		break;
2233 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2234 		if ((stcb == NULL) || (net == NULL)) {
2235 			return;
2236 		}
2237 		tmr = &net->rxt_timer;
2238 		break;
2239 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2240 		if (stcb == NULL) {
2241 			return;
2242 		}
2243 		tmr = &stcb->asoc.shut_guard_timer;
2244 		break;
2245 	case SCTP_TIMER_TYPE_STRRESET:
2246 		if (stcb == NULL) {
2247 			return;
2248 		}
2249 		tmr = &stcb->asoc.strreset_timer;
2250 		break;
2251 	case SCTP_TIMER_TYPE_ASCONF:
2252 		if (stcb == NULL) {
2253 			return;
2254 		}
2255 		tmr = &stcb->asoc.asconf_timer;
2256 		break;
2257 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2258 		if (stcb == NULL) {
2259 			return;
2260 		}
2261 		tmr = &stcb->asoc.delete_prim_timer;
2262 		break;
2263 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2264 		if (stcb == NULL) {
2265 			return;
2266 		}
2267 		tmr = &stcb->asoc.autoclose_timer;
2268 		break;
2269 	default:
2270 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2271 		    __FUNCTION__, t_type);
2272 		break;
2273 	}
2274 	if (tmr == NULL) {
2275 		return;
2276 	}
2277 	if ((tmr->type != t_type) && tmr->type) {
2278 		/*
2279 		 * Ok we have a timer that is under joint use. Cookie timer
2280 		 * per chance with the SEND timer. We therefore are NOT
2281 		 * running the timer that the caller wants stopped.  So just
2282 		 * return.
2283 		 */
2284 		return;
2285 	}
2286 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2287 		stcb->asoc.num_send_timers_up--;
2288 		if (stcb->asoc.num_send_timers_up < 0) {
2289 			stcb->asoc.num_send_timers_up = 0;
2290 		}
2291 	}
2292 	tmr->self = NULL;
2293 	tmr->stopped_from = from;
2294 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2295 	return;
2296 }
2297 
2298 uint32_t
2299 sctp_calculate_len(struct mbuf *m)
2300 {
2301 	uint32_t tlen = 0;
2302 	struct mbuf *at;
2303 
2304 	at = m;
2305 	while (at) {
2306 		tlen += SCTP_BUF_LEN(at);
2307 		at = SCTP_BUF_NEXT(at);
2308 	}
2309 	return (tlen);
2310 }
2311 
2312 void
2313 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2314     struct sctp_association *asoc, uint32_t mtu)
2315 {
2316 	/*
2317 	 * Reset the P-MTU size on this association, this involves changing
2318 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2319 	 * allow the DF flag to be cleared.
2320 	 */
2321 	struct sctp_tmit_chunk *chk;
2322 	unsigned int eff_mtu, ovh;
2323 
2324 	asoc->smallest_mtu = mtu;
2325 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2326 		ovh = SCTP_MIN_OVERHEAD;
2327 	} else {
2328 		ovh = SCTP_MIN_V4_OVERHEAD;
2329 	}
2330 	eff_mtu = mtu - ovh;
2331 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2332 		if (chk->send_size > eff_mtu) {
2333 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2334 		}
2335 	}
2336 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2337 		if (chk->send_size > eff_mtu) {
2338 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2339 		}
2340 	}
2341 }
2342 
2343 
2344 /*
2345  * given an association and starting time of the current RTT period return
2346  * RTO in number of msecs net should point to the current network
2347  */
2348 
2349 uint32_t
2350 sctp_calculate_rto(struct sctp_tcb *stcb,
2351     struct sctp_association *asoc,
2352     struct sctp_nets *net,
2353     struct timeval *told,
2354     int safe, int rtt_from_sack)
2355 {
2356 	/*-
2357 	 * given an association and the starting time of the current RTT
2358 	 * period (in value1/value2) return RTO in number of msecs.
2359 	 */
2360 	int32_t rtt;		/* RTT in ms */
2361 	uint32_t new_rto;
2362 	int first_measure = 0;
2363 	struct timeval now, then, *old;
2364 
2365 	/* Copy it out for sparc64 */
2366 	if (safe == sctp_align_unsafe_makecopy) {
2367 		old = &then;
2368 		memcpy(&then, told, sizeof(struct timeval));
2369 	} else if (safe == sctp_align_safe_nocopy) {
2370 		old = told;
2371 	} else {
2372 		/* error */
2373 		SCTP_PRINTF("Huh, bad rto calc call\n");
2374 		return (0);
2375 	}
2376 	/************************/
2377 	/* 1. calculate new RTT */
2378 	/************************/
2379 	/* get the current time */
2380 	if (stcb->asoc.use_precise_time) {
2381 		(void)SCTP_GETPTIME_TIMEVAL(&now);
2382 	} else {
2383 		(void)SCTP_GETTIME_TIMEVAL(&now);
2384 	}
2385 	timevalsub(&now, old);
2386 	/* store the current RTT in us */
2387 	net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec +
2388 	        (uint64_t) now.tv_usec;
2389 
2390 	/* computer rtt in ms */
2391 	rtt = net->rtt / 1000;
2392 	if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) {
2393 		/*
2394 		 * Tell the CC module that a new update has just occurred
2395 		 * from a sack
2396 		 */
2397 		(*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now);
2398 	}
2399 	/*
2400 	 * Do we need to determine the lan? We do this only on sacks i.e.
2401 	 * RTT being determined from data not non-data (HB/INIT->INITACK).
2402 	 */
2403 	if ((rtt_from_sack == SCTP_RTT_FROM_DATA) &&
2404 	    (net->lan_type == SCTP_LAN_UNKNOWN)) {
2405 		if (net->rtt > SCTP_LOCAL_LAN_RTT) {
2406 			net->lan_type = SCTP_LAN_INTERNET;
2407 		} else {
2408 			net->lan_type = SCTP_LAN_LOCAL;
2409 		}
2410 	}
2411 	/***************************/
2412 	/* 2. update RTTVAR & SRTT */
2413 	/***************************/
2414 	/*-
2415 	 * Compute the scaled average lastsa and the
2416 	 * scaled variance lastsv as described in van Jacobson
2417 	 * Paper "Congestion Avoidance and Control", Annex A.
2418 	 *
2419 	 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt
2420 	 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar
2421 	 */
2422 	if (net->RTO_measured) {
2423 		rtt -= (net->lastsa >> SCTP_RTT_SHIFT);
2424 		net->lastsa += rtt;
2425 		if (rtt < 0) {
2426 			rtt = -rtt;
2427 		}
2428 		rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);
2429 		net->lastsv += rtt;
2430 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2431 			rto_logging(net, SCTP_LOG_RTTVAR);
2432 		}
2433 	} else {
2434 		/* First RTO measurment */
2435 		net->RTO_measured = 1;
2436 		first_measure = 1;
2437 		net->lastsa = rtt << SCTP_RTT_SHIFT;
2438 		net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT;
2439 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2440 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2441 		}
2442 	}
2443 	if (net->lastsv == 0) {
2444 		net->lastsv = SCTP_CLOCK_GRANULARITY;
2445 	}
2446 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2447 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2448 	    (stcb->asoc.sat_network_lockout == 0)) {
2449 		stcb->asoc.sat_network = 1;
2450 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2451 		stcb->asoc.sat_network = 0;
2452 		stcb->asoc.sat_network_lockout = 1;
2453 	}
2454 	/* bound it, per C6/C7 in Section 5.3.1 */
2455 	if (new_rto < stcb->asoc.minrto) {
2456 		new_rto = stcb->asoc.minrto;
2457 	}
2458 	if (new_rto > stcb->asoc.maxrto) {
2459 		new_rto = stcb->asoc.maxrto;
2460 	}
2461 	/* we are now returning the RTO */
2462 	return (new_rto);
2463 }
2464 
2465 /*
2466  * return a pointer to a contiguous piece of data from the given mbuf chain
2467  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2468  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2469  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2470  */
2471 caddr_t
2472 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2473 {
2474 	uint32_t count;
2475 	uint8_t *ptr;
2476 
2477 	ptr = in_ptr;
2478 	if ((off < 0) || (len <= 0))
2479 		return (NULL);
2480 
2481 	/* find the desired start location */
2482 	while ((m != NULL) && (off > 0)) {
2483 		if (off < SCTP_BUF_LEN(m))
2484 			break;
2485 		off -= SCTP_BUF_LEN(m);
2486 		m = SCTP_BUF_NEXT(m);
2487 	}
2488 	if (m == NULL)
2489 		return (NULL);
2490 
2491 	/* is the current mbuf large enough (eg. contiguous)? */
2492 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2493 		return (mtod(m, caddr_t)+off);
2494 	} else {
2495 		/* else, it spans more than one mbuf, so save a temp copy... */
2496 		while ((m != NULL) && (len > 0)) {
2497 			count = min(SCTP_BUF_LEN(m) - off, len);
2498 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2499 			len -= count;
2500 			ptr += count;
2501 			off = 0;
2502 			m = SCTP_BUF_NEXT(m);
2503 		}
2504 		if ((m == NULL) && (len > 0))
2505 			return (NULL);
2506 		else
2507 			return ((caddr_t)in_ptr);
2508 	}
2509 }
2510 
2511 
2512 
2513 struct sctp_paramhdr *
2514 sctp_get_next_param(struct mbuf *m,
2515     int offset,
2516     struct sctp_paramhdr *pull,
2517     int pull_limit)
2518 {
2519 	/* This just provides a typed signature to Peter's Pull routine */
2520 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2521 	    (uint8_t *) pull));
2522 }
2523 
2524 
2525 int
2526 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2527 {
2528 	/*
2529 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2530 	 * padlen is > 3 this routine will fail.
2531 	 */
2532 	uint8_t *dp;
2533 	int i;
2534 
2535 	if (padlen > 3) {
2536 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2537 		return (ENOBUFS);
2538 	}
2539 	if (padlen <= M_TRAILINGSPACE(m)) {
2540 		/*
2541 		 * The easy way. We hope the majority of the time we hit
2542 		 * here :)
2543 		 */
2544 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2545 		SCTP_BUF_LEN(m) += padlen;
2546 	} else {
2547 		/* Hard way we must grow the mbuf */
2548 		struct mbuf *tmp;
2549 
2550 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2551 		if (tmp == NULL) {
2552 			/* Out of space GAK! we are in big trouble. */
2553 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2554 			return (ENOBUFS);
2555 		}
2556 		/* setup and insert in middle */
2557 		SCTP_BUF_LEN(tmp) = padlen;
2558 		SCTP_BUF_NEXT(tmp) = NULL;
2559 		SCTP_BUF_NEXT(m) = tmp;
2560 		dp = mtod(tmp, uint8_t *);
2561 	}
2562 	/* zero out the pad */
2563 	for (i = 0; i < padlen; i++) {
2564 		*dp = 0;
2565 		dp++;
2566 	}
2567 	return (0);
2568 }
2569 
2570 int
2571 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2572 {
2573 	/* find the last mbuf in chain and pad it */
2574 	struct mbuf *m_at;
2575 
2576 	if (last_mbuf) {
2577 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2578 	} else {
2579 		for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) {
2580 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2581 				return (sctp_add_pad_tombuf(m_at, padval));
2582 			}
2583 		}
2584 	}
2585 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2586 	return (EFAULT);
2587 }
2588 
2589 static void
2590 sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb,
2591     uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked
2592 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2593     SCTP_UNUSED
2594 #endif
2595 )
2596 {
2597 	struct mbuf *m_notify;
2598 	struct sctp_assoc_change *sac;
2599 	struct sctp_queued_to_read *control;
2600 	size_t notif_len, abort_len;
2601 	unsigned int i;
2602 
2603 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2604 	struct socket *so;
2605 
2606 #endif
2607 
2608 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2609 		notif_len = sizeof(struct sctp_assoc_change);
2610 		if (abort != NULL) {
2611 			abort_len = htons(abort->ch.chunk_length);
2612 		} else {
2613 			abort_len = 0;
2614 		}
2615 		if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2616 			notif_len += SCTP_ASSOC_SUPPORTS_MAX;
2617 		} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2618 			notif_len += abort_len;
2619 		}
2620 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
2621 		if (m_notify == NULL) {
2622 			/* Retry with smaller value. */
2623 			notif_len = sizeof(struct sctp_assoc_change);
2624 			m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
2625 			if (m_notify == NULL) {
2626 				goto set_error;
2627 			}
2628 		}
2629 		SCTP_BUF_NEXT(m_notify) = NULL;
2630 		sac = mtod(m_notify, struct sctp_assoc_change *);
2631 		sac->sac_type = SCTP_ASSOC_CHANGE;
2632 		sac->sac_flags = 0;
2633 		sac->sac_length = sizeof(struct sctp_assoc_change);
2634 		sac->sac_state = state;
2635 		sac->sac_error = error;
2636 		/* XXX verify these stream counts */
2637 		sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2638 		sac->sac_inbound_streams = stcb->asoc.streamincnt;
2639 		sac->sac_assoc_id = sctp_get_associd(stcb);
2640 		if (notif_len > sizeof(struct sctp_assoc_change)) {
2641 			if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) {
2642 				i = 0;
2643 				if (stcb->asoc.peer_supports_prsctp) {
2644 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR;
2645 				}
2646 				if (stcb->asoc.peer_supports_auth) {
2647 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH;
2648 				}
2649 				if (stcb->asoc.peer_supports_asconf) {
2650 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF;
2651 				}
2652 				sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF;
2653 				if (stcb->asoc.peer_supports_strreset) {
2654 					sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG;
2655 				}
2656 				sac->sac_length += i;
2657 			} else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) {
2658 				memcpy(sac->sac_info, abort, abort_len);
2659 				sac->sac_length += abort_len;
2660 			}
2661 		}
2662 		SCTP_BUF_LEN(m_notify) = sac->sac_length;
2663 		control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2664 		    0, 0, stcb->asoc.context, 0, 0, 0,
2665 		    m_notify);
2666 		if (control != NULL) {
2667 			control->length = SCTP_BUF_LEN(m_notify);
2668 			/* not that we need this */
2669 			control->tail_mbuf = m_notify;
2670 			control->spec_flags = M_NOTIFICATION;
2671 			sctp_add_to_readq(stcb->sctp_ep, stcb,
2672 			    control,
2673 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2674 			    so_locked);
2675 		} else {
2676 			sctp_m_freem(m_notify);
2677 		}
2678 	}
2679 	/*
2680 	 * For 1-to-1 style sockets, we send up and error when an ABORT
2681 	 * comes in.
2682 	 */
2683 set_error:
2684 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2685 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2686 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2687 		if (from_peer) {
2688 			if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2689 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2690 				stcb->sctp_socket->so_error = ECONNREFUSED;
2691 			} else {
2692 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2693 				stcb->sctp_socket->so_error = ECONNRESET;
2694 			}
2695 		} else {
2696 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED);
2697 			stcb->sctp_socket->so_error = ECONNABORTED;
2698 		}
2699 	}
2700 	/* Wake ANY sleepers */
2701 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2702 	so = SCTP_INP_SO(stcb->sctp_ep);
2703 	if (!so_locked) {
2704 		atomic_add_int(&stcb->asoc.refcnt, 1);
2705 		SCTP_TCB_UNLOCK(stcb);
2706 		SCTP_SOCKET_LOCK(so, 1);
2707 		SCTP_TCB_LOCK(stcb);
2708 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
2709 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2710 			SCTP_SOCKET_UNLOCK(so, 1);
2711 			return;
2712 		}
2713 	}
2714 #endif
2715 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2716 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2717 	    ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) {
2718 		socantrcvmore(stcb->sctp_socket);
2719 	}
2720 	sorwakeup(stcb->sctp_socket);
2721 	sowwakeup(stcb->sctp_socket);
2722 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2723 	if (!so_locked) {
2724 		SCTP_SOCKET_UNLOCK(so, 1);
2725 	}
2726 #endif
2727 }
2728 
2729 static void
2730 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2731     struct sockaddr *sa, uint32_t error)
2732 {
2733 	struct mbuf *m_notify;
2734 	struct sctp_paddr_change *spc;
2735 	struct sctp_queued_to_read *control;
2736 
2737 	if ((stcb == NULL) ||
2738 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2739 		/* event not enabled */
2740 		return;
2741 	}
2742 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2743 	if (m_notify == NULL)
2744 		return;
2745 	SCTP_BUF_LEN(m_notify) = 0;
2746 	spc = mtod(m_notify, struct sctp_paddr_change *);
2747 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2748 	spc->spc_flags = 0;
2749 	spc->spc_length = sizeof(struct sctp_paddr_change);
2750 	switch (sa->sa_family) {
2751 #ifdef INET
2752 	case AF_INET:
2753 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2754 		break;
2755 #endif
2756 #ifdef INET6
2757 	case AF_INET6:
2758 		{
2759 			struct sockaddr_in6 *sin6;
2760 
2761 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2762 
2763 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2764 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2765 				if (sin6->sin6_scope_id == 0) {
2766 					/* recover scope_id for user */
2767 					(void)sa6_recoverscope(sin6);
2768 				} else {
2769 					/* clear embedded scope_id for user */
2770 					in6_clearscope(&sin6->sin6_addr);
2771 				}
2772 			}
2773 			break;
2774 		}
2775 #endif
2776 	default:
2777 		/* TSNH */
2778 		break;
2779 	}
2780 	spc->spc_state = state;
2781 	spc->spc_error = error;
2782 	spc->spc_assoc_id = sctp_get_associd(stcb);
2783 
2784 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2785 	SCTP_BUF_NEXT(m_notify) = NULL;
2786 
2787 	/* append to socket */
2788 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2789 	    0, 0, stcb->asoc.context, 0, 0, 0,
2790 	    m_notify);
2791 	if (control == NULL) {
2792 		/* no memory */
2793 		sctp_m_freem(m_notify);
2794 		return;
2795 	}
2796 	control->length = SCTP_BUF_LEN(m_notify);
2797 	control->spec_flags = M_NOTIFICATION;
2798 	/* not that we need this */
2799 	control->tail_mbuf = m_notify;
2800 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2801 	    control,
2802 	    &stcb->sctp_socket->so_rcv, 1,
2803 	    SCTP_READ_LOCK_NOT_HELD,
2804 	    SCTP_SO_NOT_LOCKED);
2805 }
2806 
2807 
2808 static void
2809 sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error,
2810     struct sctp_tmit_chunk *chk, int so_locked
2811 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2812     SCTP_UNUSED
2813 #endif
2814 )
2815 {
2816 	struct mbuf *m_notify;
2817 	struct sctp_send_failed *ssf;
2818 	struct sctp_send_failed_event *ssfe;
2819 	struct sctp_queued_to_read *control;
2820 	int length;
2821 
2822 	if ((stcb == NULL) ||
2823 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2824 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2825 		/* event not enabled */
2826 		return;
2827 	}
2828 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2829 		length = sizeof(struct sctp_send_failed_event);
2830 	} else {
2831 		length = sizeof(struct sctp_send_failed);
2832 	}
2833 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA);
2834 	if (m_notify == NULL)
2835 		/* no space left */
2836 		return;
2837 	length += chk->send_size;
2838 	length -= sizeof(struct sctp_data_chunk);
2839 	SCTP_BUF_LEN(m_notify) = 0;
2840 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2841 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2842 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2843 		if (sent) {
2844 			ssfe->ssfe_flags = SCTP_DATA_SENT;
2845 		} else {
2846 			ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2847 		}
2848 		ssfe->ssfe_length = length;
2849 		ssfe->ssfe_error = error;
2850 		/* not exactly what the user sent in, but should be close :) */
2851 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2852 		ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number;
2853 		ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags;
2854 		ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype;
2855 		ssfe->ssfe_info.snd_context = chk->rec.data.context;
2856 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2857 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2858 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2859 	} else {
2860 		ssf = mtod(m_notify, struct sctp_send_failed *);
2861 		ssf->ssf_type = SCTP_SEND_FAILED;
2862 		if (sent) {
2863 			ssf->ssf_flags = SCTP_DATA_SENT;
2864 		} else {
2865 			ssf->ssf_flags = SCTP_DATA_UNSENT;
2866 		}
2867 		ssf->ssf_length = length;
2868 		ssf->ssf_error = error;
2869 		/* not exactly what the user sent in, but should be close :) */
2870 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2871 		ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2872 		ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2873 		ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2874 		ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2875 		ssf->ssf_info.sinfo_context = chk->rec.data.context;
2876 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2877 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2878 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2879 	}
2880 	if (chk->data) {
2881 		/*
2882 		 * trim off the sctp chunk header(it should be there)
2883 		 */
2884 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2885 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2886 			sctp_mbuf_crush(chk->data);
2887 			chk->send_size -= sizeof(struct sctp_data_chunk);
2888 		}
2889 	}
2890 	SCTP_BUF_NEXT(m_notify) = chk->data;
2891 	/* Steal off the mbuf */
2892 	chk->data = NULL;
2893 	/*
2894 	 * For this case, we check the actual socket buffer, since the assoc
2895 	 * is going away we don't want to overfill the socket buffer for a
2896 	 * non-reader
2897 	 */
2898 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2899 		sctp_m_freem(m_notify);
2900 		return;
2901 	}
2902 	/* append to socket */
2903 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2904 	    0, 0, stcb->asoc.context, 0, 0, 0,
2905 	    m_notify);
2906 	if (control == NULL) {
2907 		/* no memory */
2908 		sctp_m_freem(m_notify);
2909 		return;
2910 	}
2911 	control->spec_flags = M_NOTIFICATION;
2912 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2913 	    control,
2914 	    &stcb->sctp_socket->so_rcv, 1,
2915 	    SCTP_READ_LOCK_NOT_HELD,
2916 	    so_locked);
2917 }
2918 
2919 
2920 static void
2921 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
2922     struct sctp_stream_queue_pending *sp, int so_locked
2923 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2924     SCTP_UNUSED
2925 #endif
2926 )
2927 {
2928 	struct mbuf *m_notify;
2929 	struct sctp_send_failed *ssf;
2930 	struct sctp_send_failed_event *ssfe;
2931 	struct sctp_queued_to_read *control;
2932 	int length;
2933 
2934 	if ((stcb == NULL) ||
2935 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) &&
2936 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) {
2937 		/* event not enabled */
2938 		return;
2939 	}
2940 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2941 		length = sizeof(struct sctp_send_failed_event);
2942 	} else {
2943 		length = sizeof(struct sctp_send_failed);
2944 	}
2945 	m_notify = sctp_get_mbuf_for_msg(length, 0, M_DONTWAIT, 1, MT_DATA);
2946 	if (m_notify == NULL) {
2947 		/* no space left */
2948 		return;
2949 	}
2950 	length += sp->length;
2951 	SCTP_BUF_LEN(m_notify) = 0;
2952 	if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) {
2953 		ssfe = mtod(m_notify, struct sctp_send_failed_event *);
2954 		ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT;
2955 		ssfe->ssfe_flags = SCTP_DATA_UNSENT;
2956 		ssfe->ssfe_length = length;
2957 		ssfe->ssfe_error = error;
2958 		/* not exactly what the user sent in, but should be close :) */
2959 		bzero(&ssfe->ssfe_info, sizeof(ssfe->ssfe_info));
2960 		ssfe->ssfe_info.snd_sid = sp->stream;
2961 		if (sp->some_taken) {
2962 			ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG;
2963 		} else {
2964 			ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG;
2965 		}
2966 		ssfe->ssfe_info.snd_ppid = sp->ppid;
2967 		ssfe->ssfe_info.snd_context = sp->context;
2968 		ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb);
2969 		ssfe->ssfe_assoc_id = sctp_get_associd(stcb);
2970 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event);
2971 	} else {
2972 		ssf = mtod(m_notify, struct sctp_send_failed *);
2973 		ssf->ssf_type = SCTP_SEND_FAILED;
2974 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2975 		ssf->ssf_length = length;
2976 		ssf->ssf_error = error;
2977 		/* not exactly what the user sent in, but should be close :) */
2978 		bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2979 		ssf->ssf_info.sinfo_stream = sp->stream;
2980 		ssf->ssf_info.sinfo_ssn = sp->strseq;
2981 		if (sp->some_taken) {
2982 			ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
2983 		} else {
2984 			ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
2985 		}
2986 		ssf->ssf_info.sinfo_ppid = sp->ppid;
2987 		ssf->ssf_info.sinfo_context = sp->context;
2988 		ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2989 		ssf->ssf_assoc_id = sctp_get_associd(stcb);
2990 		SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2991 	}
2992 	SCTP_BUF_NEXT(m_notify) = sp->data;
2993 
2994 	/* Steal off the mbuf */
2995 	sp->data = NULL;
2996 	/*
2997 	 * For this case, we check the actual socket buffer, since the assoc
2998 	 * is going away we don't want to overfill the socket buffer for a
2999 	 * non-reader
3000 	 */
3001 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3002 		sctp_m_freem(m_notify);
3003 		return;
3004 	}
3005 	/* append to socket */
3006 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3007 	    0, 0, stcb->asoc.context, 0, 0, 0,
3008 	    m_notify);
3009 	if (control == NULL) {
3010 		/* no memory */
3011 		sctp_m_freem(m_notify);
3012 		return;
3013 	}
3014 	control->spec_flags = M_NOTIFICATION;
3015 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3016 	    control,
3017 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3018 }
3019 
3020 
3021 
3022 static void
3023 sctp_notify_adaptation_layer(struct sctp_tcb *stcb)
3024 {
3025 	struct mbuf *m_notify;
3026 	struct sctp_adaptation_event *sai;
3027 	struct sctp_queued_to_read *control;
3028 
3029 	if ((stcb == NULL) ||
3030 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3031 		/* event not enabled */
3032 		return;
3033 	}
3034 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3035 	if (m_notify == NULL)
3036 		/* no space left */
3037 		return;
3038 	SCTP_BUF_LEN(m_notify) = 0;
3039 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3040 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3041 	sai->sai_flags = 0;
3042 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3043 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3044 	sai->sai_assoc_id = sctp_get_associd(stcb);
3045 
3046 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3047 	SCTP_BUF_NEXT(m_notify) = NULL;
3048 
3049 	/* append to socket */
3050 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3051 	    0, 0, stcb->asoc.context, 0, 0, 0,
3052 	    m_notify);
3053 	if (control == NULL) {
3054 		/* no memory */
3055 		sctp_m_freem(m_notify);
3056 		return;
3057 	}
3058 	control->length = SCTP_BUF_LEN(m_notify);
3059 	control->spec_flags = M_NOTIFICATION;
3060 	/* not that we need this */
3061 	control->tail_mbuf = m_notify;
3062 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3063 	    control,
3064 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3065 }
3066 
3067 /* This always must be called with the read-queue LOCKED in the INP */
3068 static void
3069 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3070     uint32_t val, int so_locked
3071 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3072     SCTP_UNUSED
3073 #endif
3074 )
3075 {
3076 	struct mbuf *m_notify;
3077 	struct sctp_pdapi_event *pdapi;
3078 	struct sctp_queued_to_read *control;
3079 	struct sockbuf *sb;
3080 
3081 	if ((stcb == NULL) ||
3082 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3083 		/* event not enabled */
3084 		return;
3085 	}
3086 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3087 		return;
3088 	}
3089 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3090 	if (m_notify == NULL)
3091 		/* no space left */
3092 		return;
3093 	SCTP_BUF_LEN(m_notify) = 0;
3094 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3095 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3096 	pdapi->pdapi_flags = 0;
3097 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3098 	pdapi->pdapi_indication = error;
3099 	pdapi->pdapi_stream = (val >> 16);
3100 	pdapi->pdapi_seq = (val & 0x0000ffff);
3101 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3102 
3103 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3104 	SCTP_BUF_NEXT(m_notify) = NULL;
3105 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3106 	    0, 0, stcb->asoc.context, 0, 0, 0,
3107 	    m_notify);
3108 	if (control == NULL) {
3109 		/* no memory */
3110 		sctp_m_freem(m_notify);
3111 		return;
3112 	}
3113 	control->spec_flags = M_NOTIFICATION;
3114 	control->length = SCTP_BUF_LEN(m_notify);
3115 	/* not that we need this */
3116 	control->tail_mbuf = m_notify;
3117 	control->held_length = 0;
3118 	control->length = 0;
3119 	sb = &stcb->sctp_socket->so_rcv;
3120 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3121 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3122 	}
3123 	sctp_sballoc(stcb, sb, m_notify);
3124 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3125 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3126 	}
3127 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3128 	control->end_added = 1;
3129 	if (stcb->asoc.control_pdapi)
3130 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3131 	else {
3132 		/* we really should not see this case */
3133 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3134 	}
3135 	if (stcb->sctp_ep && stcb->sctp_socket) {
3136 		/* This should always be the case */
3137 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3138 		struct socket *so;
3139 
3140 		so = SCTP_INP_SO(stcb->sctp_ep);
3141 		if (!so_locked) {
3142 			atomic_add_int(&stcb->asoc.refcnt, 1);
3143 			SCTP_TCB_UNLOCK(stcb);
3144 			SCTP_SOCKET_LOCK(so, 1);
3145 			SCTP_TCB_LOCK(stcb);
3146 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3147 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3148 				SCTP_SOCKET_UNLOCK(so, 1);
3149 				return;
3150 			}
3151 		}
3152 #endif
3153 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3154 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3155 		if (!so_locked) {
3156 			SCTP_SOCKET_UNLOCK(so, 1);
3157 		}
3158 #endif
3159 	}
3160 }
3161 
3162 static void
3163 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3164 {
3165 	struct mbuf *m_notify;
3166 	struct sctp_shutdown_event *sse;
3167 	struct sctp_queued_to_read *control;
3168 
3169 	/*
3170 	 * For TCP model AND UDP connected sockets we will send an error up
3171 	 * when an SHUTDOWN completes
3172 	 */
3173 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3174 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3175 		/* mark socket closed for read/write and wakeup! */
3176 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3177 		struct socket *so;
3178 
3179 		so = SCTP_INP_SO(stcb->sctp_ep);
3180 		atomic_add_int(&stcb->asoc.refcnt, 1);
3181 		SCTP_TCB_UNLOCK(stcb);
3182 		SCTP_SOCKET_LOCK(so, 1);
3183 		SCTP_TCB_LOCK(stcb);
3184 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3185 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3186 			SCTP_SOCKET_UNLOCK(so, 1);
3187 			return;
3188 		}
3189 #endif
3190 		socantsendmore(stcb->sctp_socket);
3191 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3192 		SCTP_SOCKET_UNLOCK(so, 1);
3193 #endif
3194 	}
3195 	if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3196 		/* event not enabled */
3197 		return;
3198 	}
3199 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3200 	if (m_notify == NULL)
3201 		/* no space left */
3202 		return;
3203 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3204 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3205 	sse->sse_flags = 0;
3206 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3207 	sse->sse_assoc_id = sctp_get_associd(stcb);
3208 
3209 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3210 	SCTP_BUF_NEXT(m_notify) = NULL;
3211 
3212 	/* append to socket */
3213 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3214 	    0, 0, stcb->asoc.context, 0, 0, 0,
3215 	    m_notify);
3216 	if (control == NULL) {
3217 		/* no memory */
3218 		sctp_m_freem(m_notify);
3219 		return;
3220 	}
3221 	control->spec_flags = M_NOTIFICATION;
3222 	control->length = SCTP_BUF_LEN(m_notify);
3223 	/* not that we need this */
3224 	control->tail_mbuf = m_notify;
3225 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3226 	    control,
3227 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3228 }
3229 
3230 static void
3231 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3232     int so_locked
3233 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3234     SCTP_UNUSED
3235 #endif
3236 )
3237 {
3238 	struct mbuf *m_notify;
3239 	struct sctp_sender_dry_event *event;
3240 	struct sctp_queued_to_read *control;
3241 
3242 	if ((stcb == NULL) ||
3243 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) {
3244 		/* event not enabled */
3245 		return;
3246 	}
3247 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3248 	if (m_notify == NULL) {
3249 		/* no space left */
3250 		return;
3251 	}
3252 	SCTP_BUF_LEN(m_notify) = 0;
3253 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3254 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3255 	event->sender_dry_flags = 0;
3256 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3257 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3258 
3259 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3260 	SCTP_BUF_NEXT(m_notify) = NULL;
3261 
3262 	/* append to socket */
3263 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3264 	    0, 0, stcb->asoc.context, 0, 0, 0,
3265 	    m_notify);
3266 	if (control == NULL) {
3267 		/* no memory */
3268 		sctp_m_freem(m_notify);
3269 		return;
3270 	}
3271 	control->length = SCTP_BUF_LEN(m_notify);
3272 	control->spec_flags = M_NOTIFICATION;
3273 	/* not that we need this */
3274 	control->tail_mbuf = m_notify;
3275 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3276 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3277 }
3278 
3279 
3280 void
3281 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag)
3282 {
3283 	struct mbuf *m_notify;
3284 	struct sctp_queued_to_read *control;
3285 	struct sctp_stream_change_event *stradd;
3286 	int len;
3287 
3288 	if ((stcb == NULL) ||
3289 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) {
3290 		/* event not enabled */
3291 		return;
3292 	}
3293 	if ((stcb->asoc.peer_req_out) && flag) {
3294 		/* Peer made the request, don't tell the local user */
3295 		stcb->asoc.peer_req_out = 0;
3296 		return;
3297 	}
3298 	stcb->asoc.peer_req_out = 0;
3299 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3300 	if (m_notify == NULL)
3301 		/* no space left */
3302 		return;
3303 	SCTP_BUF_LEN(m_notify) = 0;
3304 	len = sizeof(struct sctp_stream_change_event);
3305 	if (len > M_TRAILINGSPACE(m_notify)) {
3306 		/* never enough room */
3307 		sctp_m_freem(m_notify);
3308 		return;
3309 	}
3310 	stradd = mtod(m_notify, struct sctp_stream_change_event *);
3311 	stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT;
3312 	stradd->strchange_flags = flag;
3313 	stradd->strchange_length = len;
3314 	stradd->strchange_assoc_id = sctp_get_associd(stcb);
3315 	stradd->strchange_instrms = numberin;
3316 	stradd->strchange_outstrms = numberout;
3317 	SCTP_BUF_LEN(m_notify) = len;
3318 	SCTP_BUF_NEXT(m_notify) = NULL;
3319 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3320 		/* no space */
3321 		sctp_m_freem(m_notify);
3322 		return;
3323 	}
3324 	/* append to socket */
3325 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3326 	    0, 0, stcb->asoc.context, 0, 0, 0,
3327 	    m_notify);
3328 	if (control == NULL) {
3329 		/* no memory */
3330 		sctp_m_freem(m_notify);
3331 		return;
3332 	}
3333 	control->spec_flags = M_NOTIFICATION;
3334 	control->length = SCTP_BUF_LEN(m_notify);
3335 	/* not that we need this */
3336 	control->tail_mbuf = m_notify;
3337 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3338 	    control,
3339 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3340 }
3341 
3342 void
3343 sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag)
3344 {
3345 	struct mbuf *m_notify;
3346 	struct sctp_queued_to_read *control;
3347 	struct sctp_assoc_reset_event *strasoc;
3348 	int len;
3349 
3350 	if ((stcb == NULL) ||
3351 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) {
3352 		/* event not enabled */
3353 		return;
3354 	}
3355 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3356 	if (m_notify == NULL)
3357 		/* no space left */
3358 		return;
3359 	SCTP_BUF_LEN(m_notify) = 0;
3360 	len = sizeof(struct sctp_assoc_reset_event);
3361 	if (len > M_TRAILINGSPACE(m_notify)) {
3362 		/* never enough room */
3363 		sctp_m_freem(m_notify);
3364 		return;
3365 	}
3366 	strasoc = mtod(m_notify, struct sctp_assoc_reset_event *);
3367 	strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT;
3368 	strasoc->assocreset_flags = flag;
3369 	strasoc->assocreset_length = len;
3370 	strasoc->assocreset_assoc_id = sctp_get_associd(stcb);
3371 	strasoc->assocreset_local_tsn = sending_tsn;
3372 	strasoc->assocreset_remote_tsn = recv_tsn;
3373 	SCTP_BUF_LEN(m_notify) = len;
3374 	SCTP_BUF_NEXT(m_notify) = NULL;
3375 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3376 		/* no space */
3377 		sctp_m_freem(m_notify);
3378 		return;
3379 	}
3380 	/* append to socket */
3381 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3382 	    0, 0, stcb->asoc.context, 0, 0, 0,
3383 	    m_notify);
3384 	if (control == NULL) {
3385 		/* no memory */
3386 		sctp_m_freem(m_notify);
3387 		return;
3388 	}
3389 	control->spec_flags = M_NOTIFICATION;
3390 	control->length = SCTP_BUF_LEN(m_notify);
3391 	/* not that we need this */
3392 	control->tail_mbuf = m_notify;
3393 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3394 	    control,
3395 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3396 }
3397 
3398 
3399 
3400 static void
3401 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3402     int number_entries, uint16_t * list, int flag)
3403 {
3404 	struct mbuf *m_notify;
3405 	struct sctp_queued_to_read *control;
3406 	struct sctp_stream_reset_event *strreset;
3407 	int len;
3408 
3409 	if ((stcb == NULL) ||
3410 	    (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) {
3411 		/* event not enabled */
3412 		return;
3413 	}
3414 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3415 	if (m_notify == NULL)
3416 		/* no space left */
3417 		return;
3418 	SCTP_BUF_LEN(m_notify) = 0;
3419 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3420 	if (len > M_TRAILINGSPACE(m_notify)) {
3421 		/* never enough room */
3422 		sctp_m_freem(m_notify);
3423 		return;
3424 	}
3425 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3426 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3427 	strreset->strreset_flags = flag;
3428 	strreset->strreset_length = len;
3429 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3430 	if (number_entries) {
3431 		int i;
3432 
3433 		for (i = 0; i < number_entries; i++) {
3434 			strreset->strreset_stream_list[i] = ntohs(list[i]);
3435 		}
3436 	}
3437 	SCTP_BUF_LEN(m_notify) = len;
3438 	SCTP_BUF_NEXT(m_notify) = NULL;
3439 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3440 		/* no space */
3441 		sctp_m_freem(m_notify);
3442 		return;
3443 	}
3444 	/* append to socket */
3445 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3446 	    0, 0, stcb->asoc.context, 0, 0, 0,
3447 	    m_notify);
3448 	if (control == NULL) {
3449 		/* no memory */
3450 		sctp_m_freem(m_notify);
3451 		return;
3452 	}
3453 	control->spec_flags = M_NOTIFICATION;
3454 	control->length = SCTP_BUF_LEN(m_notify);
3455 	/* not that we need this */
3456 	control->tail_mbuf = m_notify;
3457 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3458 	    control,
3459 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3460 }
3461 
3462 
3463 static void
3464 sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk)
3465 {
3466 	struct mbuf *m_notify;
3467 	struct sctp_remote_error *sre;
3468 	struct sctp_queued_to_read *control;
3469 	size_t notif_len, chunk_len;
3470 
3471 	if ((stcb == NULL) ||
3472 	    sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) {
3473 		return;
3474 	}
3475 	if (chunk != NULL) {
3476 		chunk_len = htons(chunk->ch.chunk_length);
3477 	} else {
3478 		chunk_len = 0;
3479 	}
3480 	notif_len = sizeof(struct sctp_remote_error) + chunk_len;
3481 	m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
3482 	if (m_notify == NULL) {
3483 		/* Retry with smaller value. */
3484 		notif_len = sizeof(struct sctp_remote_error);
3485 		m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_DONTWAIT, 1, MT_DATA);
3486 		if (m_notify == NULL) {
3487 			return;
3488 		}
3489 	}
3490 	SCTP_BUF_NEXT(m_notify) = NULL;
3491 	sre = mtod(m_notify, struct sctp_remote_error *);
3492 	sre->sre_type = SCTP_REMOTE_ERROR;
3493 	sre->sre_flags = 0;
3494 	sre->sre_length = sizeof(struct sctp_remote_error);
3495 	sre->sre_error = error;
3496 	sre->sre_assoc_id = sctp_get_associd(stcb);
3497 	if (notif_len > sizeof(struct sctp_remote_error)) {
3498 		memcpy(sre->sre_data, chunk, chunk_len);
3499 		sre->sre_length += chunk_len;
3500 	}
3501 	SCTP_BUF_LEN(m_notify) = sre->sre_length;
3502 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3503 	    0, 0, stcb->asoc.context, 0, 0, 0,
3504 	    m_notify);
3505 	if (control != NULL) {
3506 		control->length = SCTP_BUF_LEN(m_notify);
3507 		/* not that we need this */
3508 		control->tail_mbuf = m_notify;
3509 		control->spec_flags = M_NOTIFICATION;
3510 		sctp_add_to_readq(stcb->sctp_ep, stcb,
3511 		    control,
3512 		    &stcb->sctp_socket->so_rcv, 1,
3513 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3514 	} else {
3515 		sctp_m_freem(m_notify);
3516 	}
3517 }
3518 
3519 
3520 void
3521 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3522     uint32_t error, void *data, int so_locked
3523 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3524     SCTP_UNUSED
3525 #endif
3526 )
3527 {
3528 	if ((stcb == NULL) ||
3529 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3530 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3531 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3532 		/* If the socket is gone we are out of here */
3533 		return;
3534 	}
3535 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3536 		return;
3537 	}
3538 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3539 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3540 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3541 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3542 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3543 			/* Don't report these in front states */
3544 			return;
3545 		}
3546 	}
3547 	switch (notification) {
3548 	case SCTP_NOTIFY_ASSOC_UP:
3549 		if (stcb->asoc.assoc_up_sent == 0) {
3550 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked);
3551 			stcb->asoc.assoc_up_sent = 1;
3552 		}
3553 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3554 			sctp_notify_adaptation_layer(stcb);
3555 		}
3556 		if (stcb->asoc.peer_supports_auth == 0) {
3557 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3558 			    NULL, so_locked);
3559 		}
3560 		break;
3561 	case SCTP_NOTIFY_ASSOC_DOWN:
3562 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked);
3563 		break;
3564 	case SCTP_NOTIFY_INTERFACE_DOWN:
3565 		{
3566 			struct sctp_nets *net;
3567 
3568 			net = (struct sctp_nets *)data;
3569 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3570 			    (struct sockaddr *)&net->ro._l_addr, error);
3571 			break;
3572 		}
3573 	case SCTP_NOTIFY_INTERFACE_UP:
3574 		{
3575 			struct sctp_nets *net;
3576 
3577 			net = (struct sctp_nets *)data;
3578 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3579 			    (struct sockaddr *)&net->ro._l_addr, error);
3580 			break;
3581 		}
3582 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3583 		{
3584 			struct sctp_nets *net;
3585 
3586 			net = (struct sctp_nets *)data;
3587 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3588 			    (struct sockaddr *)&net->ro._l_addr, error);
3589 			break;
3590 		}
3591 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3592 		sctp_notify_send_failed2(stcb, error,
3593 		    (struct sctp_stream_queue_pending *)data, so_locked);
3594 		break;
3595 	case SCTP_NOTIFY_SENT_DG_FAIL:
3596 		sctp_notify_send_failed(stcb, 1, error,
3597 		    (struct sctp_tmit_chunk *)data, so_locked);
3598 		break;
3599 	case SCTP_NOTIFY_UNSENT_DG_FAIL:
3600 		sctp_notify_send_failed(stcb, 0, error,
3601 		    (struct sctp_tmit_chunk *)data, so_locked);
3602 		break;
3603 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3604 		{
3605 			uint32_t val;
3606 
3607 			val = *((uint32_t *) data);
3608 
3609 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3610 			break;
3611 		}
3612 	case SCTP_NOTIFY_ASSOC_LOC_ABORTED:
3613 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3614 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3615 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked);
3616 		} else {
3617 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked);
3618 		}
3619 		break;
3620 	case SCTP_NOTIFY_ASSOC_REM_ABORTED:
3621 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3622 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3623 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked);
3624 		} else {
3625 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked);
3626 		}
3627 		break;
3628 	case SCTP_NOTIFY_ASSOC_RESTART:
3629 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked);
3630 		if (stcb->asoc.peer_supports_auth == 0) {
3631 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3632 			    NULL, so_locked);
3633 		}
3634 		break;
3635 	case SCTP_NOTIFY_STR_RESET_SEND:
3636 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN);
3637 		break;
3638 	case SCTP_NOTIFY_STR_RESET_RECV:
3639 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING);
3640 		break;
3641 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3642 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3643 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED));
3644 		break;
3645 	case SCTP_NOTIFY_STR_RESET_DENIED_OUT:
3646 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3647 		    (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED));
3648 		break;
3649 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3650 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3651 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED));
3652 		break;
3653 	case SCTP_NOTIFY_STR_RESET_DENIED_IN:
3654 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data),
3655 		    (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED));
3656 		break;
3657 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3658 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3659 		    error);
3660 		break;
3661 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3662 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3663 		    error);
3664 		break;
3665 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3666 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3667 		    error);
3668 		break;
3669 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3670 		sctp_notify_shutdown_event(stcb);
3671 		break;
3672 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3673 		sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error,
3674 		    (uint16_t) (uintptr_t) data,
3675 		    so_locked);
3676 		break;
3677 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3678 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3679 		    (uint16_t) (uintptr_t) data,
3680 		    so_locked);
3681 		break;
3682 	case SCTP_NOTIFY_NO_PEER_AUTH:
3683 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3684 		    (uint16_t) (uintptr_t) data,
3685 		    so_locked);
3686 		break;
3687 	case SCTP_NOTIFY_SENDER_DRY:
3688 		sctp_notify_sender_dry_event(stcb, so_locked);
3689 		break;
3690 	case SCTP_NOTIFY_REMOTE_ERROR:
3691 		sctp_notify_remote_error(stcb, error, data);
3692 		break;
3693 	default:
3694 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3695 		    __FUNCTION__, notification, notification);
3696 		break;
3697 	}			/* end switch */
3698 }
3699 
3700 void
3701 sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked
3702 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3703     SCTP_UNUSED
3704 #endif
3705 )
3706 {
3707 	struct sctp_association *asoc;
3708 	struct sctp_stream_out *outs;
3709 	struct sctp_tmit_chunk *chk, *nchk;
3710 	struct sctp_stream_queue_pending *sp, *nsp;
3711 	int i;
3712 
3713 	if (stcb == NULL) {
3714 		return;
3715 	}
3716 	asoc = &stcb->asoc;
3717 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3718 		/* already being freed */
3719 		return;
3720 	}
3721 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3722 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3723 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3724 		return;
3725 	}
3726 	/* now through all the gunk freeing chunks */
3727 	if (holds_lock == 0) {
3728 		SCTP_TCB_SEND_LOCK(stcb);
3729 	}
3730 	/* sent queue SHOULD be empty */
3731 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3732 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3733 		asoc->sent_queue_cnt--;
3734 		if (chk->data != NULL) {
3735 			sctp_free_bufspace(stcb, asoc, chk, 1);
3736 			sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
3737 			    error, chk, so_locked);
3738 			if (chk->data) {
3739 				sctp_m_freem(chk->data);
3740 				chk->data = NULL;
3741 			}
3742 		}
3743 		sctp_free_a_chunk(stcb, chk, so_locked);
3744 		/* sa_ignore FREED_MEMORY */
3745 	}
3746 	/* pending send queue SHOULD be empty */
3747 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3748 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3749 		asoc->send_queue_cnt--;
3750 		if (chk->data != NULL) {
3751 			sctp_free_bufspace(stcb, asoc, chk, 1);
3752 			sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
3753 			    error, chk, so_locked);
3754 			if (chk->data) {
3755 				sctp_m_freem(chk->data);
3756 				chk->data = NULL;
3757 			}
3758 		}
3759 		sctp_free_a_chunk(stcb, chk, so_locked);
3760 		/* sa_ignore FREED_MEMORY */
3761 	}
3762 	for (i = 0; i < asoc->streamoutcnt; i++) {
3763 		/* For each stream */
3764 		outs = &asoc->strmout[i];
3765 		/* clean up any sends there */
3766 		asoc->locked_on_sending = NULL;
3767 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3768 			asoc->stream_queue_cnt--;
3769 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3770 			sctp_free_spbufspace(stcb, asoc, sp);
3771 			if (sp->data) {
3772 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3773 				    error, (void *)sp, so_locked);
3774 				if (sp->data) {
3775 					sctp_m_freem(sp->data);
3776 					sp->data = NULL;
3777 					sp->tail_mbuf = NULL;
3778 					sp->length = 0;
3779 				}
3780 			}
3781 			if (sp->net) {
3782 				sctp_free_remote_addr(sp->net);
3783 				sp->net = NULL;
3784 			}
3785 			/* Free the chunk */
3786 			sctp_free_a_strmoq(stcb, sp, so_locked);
3787 			/* sa_ignore FREED_MEMORY */
3788 		}
3789 	}
3790 
3791 	if (holds_lock == 0) {
3792 		SCTP_TCB_SEND_UNLOCK(stcb);
3793 	}
3794 }
3795 
3796 void
3797 sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error,
3798     struct sctp_abort_chunk *abort, int so_locked
3799 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3800     SCTP_UNUSED
3801 #endif
3802 )
3803 {
3804 	if (stcb == NULL) {
3805 		return;
3806 	}
3807 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3808 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3809 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3810 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3811 	}
3812 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3813 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3814 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3815 		return;
3816 	}
3817 	/* Tell them we lost the asoc */
3818 	sctp_report_all_outbound(stcb, error, 1, so_locked);
3819 	if (from_peer) {
3820 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked);
3821 	} else {
3822 		sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked);
3823 	}
3824 }
3825 
3826 void
3827 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3828     struct mbuf *m, int iphlen,
3829     struct sockaddr *src, struct sockaddr *dst,
3830     struct sctphdr *sh, struct mbuf *op_err,
3831     uint8_t use_mflowid, uint32_t mflowid,
3832     uint32_t vrf_id, uint16_t port)
3833 {
3834 	uint32_t vtag;
3835 
3836 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3837 	struct socket *so;
3838 
3839 #endif
3840 
3841 	vtag = 0;
3842 	if (stcb != NULL) {
3843 		/* We have a TCB to abort, send notification too */
3844 		vtag = stcb->asoc.peer_vtag;
3845 		sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED);
3846 		/* get the assoc vrf id and table id */
3847 		vrf_id = stcb->asoc.vrf_id;
3848 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3849 	}
3850 	sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err,
3851 	    use_mflowid, mflowid,
3852 	    vrf_id, port);
3853 	if (stcb != NULL) {
3854 		/* Ok, now lets free it */
3855 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3856 		so = SCTP_INP_SO(inp);
3857 		atomic_add_int(&stcb->asoc.refcnt, 1);
3858 		SCTP_TCB_UNLOCK(stcb);
3859 		SCTP_SOCKET_LOCK(so, 1);
3860 		SCTP_TCB_LOCK(stcb);
3861 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3862 #endif
3863 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3864 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3865 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3866 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3867 		}
3868 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3869 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3870 		SCTP_SOCKET_UNLOCK(so, 1);
3871 #endif
3872 	}
3873 }
3874 
3875 #ifdef SCTP_ASOCLOG_OF_TSNS
3876 void
3877 sctp_print_out_track_log(struct sctp_tcb *stcb)
3878 {
3879 #ifdef NOSIY_PRINTS
3880 	int i;
3881 
3882 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3883 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3884 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3885 		SCTP_PRINTF("None rcvd\n");
3886 		goto none_in;
3887 	}
3888 	if (stcb->asoc.tsn_in_wrapped) {
3889 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3890 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3891 			    stcb->asoc.in_tsnlog[i].tsn,
3892 			    stcb->asoc.in_tsnlog[i].strm,
3893 			    stcb->asoc.in_tsnlog[i].seq,
3894 			    stcb->asoc.in_tsnlog[i].flgs,
3895 			    stcb->asoc.in_tsnlog[i].sz);
3896 		}
3897 	}
3898 	if (stcb->asoc.tsn_in_at) {
3899 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3900 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3901 			    stcb->asoc.in_tsnlog[i].tsn,
3902 			    stcb->asoc.in_tsnlog[i].strm,
3903 			    stcb->asoc.in_tsnlog[i].seq,
3904 			    stcb->asoc.in_tsnlog[i].flgs,
3905 			    stcb->asoc.in_tsnlog[i].sz);
3906 		}
3907 	}
3908 none_in:
3909 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3910 	if ((stcb->asoc.tsn_out_at == 0) &&
3911 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3912 		SCTP_PRINTF("None sent\n");
3913 	}
3914 	if (stcb->asoc.tsn_out_wrapped) {
3915 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3916 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3917 			    stcb->asoc.out_tsnlog[i].tsn,
3918 			    stcb->asoc.out_tsnlog[i].strm,
3919 			    stcb->asoc.out_tsnlog[i].seq,
3920 			    stcb->asoc.out_tsnlog[i].flgs,
3921 			    stcb->asoc.out_tsnlog[i].sz);
3922 		}
3923 	}
3924 	if (stcb->asoc.tsn_out_at) {
3925 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3926 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3927 			    stcb->asoc.out_tsnlog[i].tsn,
3928 			    stcb->asoc.out_tsnlog[i].strm,
3929 			    stcb->asoc.out_tsnlog[i].seq,
3930 			    stcb->asoc.out_tsnlog[i].flgs,
3931 			    stcb->asoc.out_tsnlog[i].sz);
3932 		}
3933 	}
3934 #endif
3935 }
3936 
3937 #endif
3938 
3939 void
3940 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3941     struct mbuf *op_err,
3942     int so_locked
3943 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3944     SCTP_UNUSED
3945 #endif
3946 )
3947 {
3948 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3949 	struct socket *so;
3950 
3951 #endif
3952 
3953 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3954 	so = SCTP_INP_SO(inp);
3955 #endif
3956 	if (stcb == NULL) {
3957 		/* Got to have a TCB */
3958 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3959 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3960 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3961 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3962 			}
3963 		}
3964 		return;
3965 	} else {
3966 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3967 	}
3968 	/* notify the ulp */
3969 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
3970 		sctp_abort_notification(stcb, 0, 0, NULL, so_locked);
3971 	}
3972 	/* notify the peer */
3973 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3974 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3975 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3976 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3977 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3978 	}
3979 	/* now free the asoc */
3980 #ifdef SCTP_ASOCLOG_OF_TSNS
3981 	sctp_print_out_track_log(stcb);
3982 #endif
3983 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3984 	if (!so_locked) {
3985 		atomic_add_int(&stcb->asoc.refcnt, 1);
3986 		SCTP_TCB_UNLOCK(stcb);
3987 		SCTP_SOCKET_LOCK(so, 1);
3988 		SCTP_TCB_LOCK(stcb);
3989 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3990 	}
3991 #endif
3992 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3993 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3994 	if (!so_locked) {
3995 		SCTP_SOCKET_UNLOCK(so, 1);
3996 	}
3997 #endif
3998 }
3999 
4000 void
4001 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset,
4002     struct sockaddr *src, struct sockaddr *dst,
4003     struct sctphdr *sh, struct sctp_inpcb *inp,
4004     uint8_t use_mflowid, uint32_t mflowid,
4005     uint32_t vrf_id, uint16_t port)
4006 {
4007 	struct sctp_chunkhdr *ch, chunk_buf;
4008 	unsigned int chk_length;
4009 	int contains_init_chunk;
4010 
4011 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
4012 	/* Generate a TO address for future reference */
4013 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4014 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
4015 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
4016 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
4017 		}
4018 	}
4019 	contains_init_chunk = 0;
4020 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4021 	    sizeof(*ch), (uint8_t *) & chunk_buf);
4022 	while (ch != NULL) {
4023 		chk_length = ntohs(ch->chunk_length);
4024 		if (chk_length < sizeof(*ch)) {
4025 			/* break to abort land */
4026 			break;
4027 		}
4028 		switch (ch->chunk_type) {
4029 		case SCTP_INIT:
4030 			contains_init_chunk = 1;
4031 			break;
4032 		case SCTP_COOKIE_ECHO:
4033 			/* We hit here only if the assoc is being freed */
4034 			return;
4035 		case SCTP_PACKET_DROPPED:
4036 			/* we don't respond to pkt-dropped */
4037 			return;
4038 		case SCTP_ABORT_ASSOCIATION:
4039 			/* we don't respond with an ABORT to an ABORT */
4040 			return;
4041 		case SCTP_SHUTDOWN_COMPLETE:
4042 			/*
4043 			 * we ignore it since we are not waiting for it and
4044 			 * peer is gone
4045 			 */
4046 			return;
4047 		case SCTP_SHUTDOWN_ACK:
4048 			sctp_send_shutdown_complete2(src, dst, sh,
4049 			    use_mflowid, mflowid,
4050 			    vrf_id, port);
4051 			return;
4052 		default:
4053 			break;
4054 		}
4055 		offset += SCTP_SIZE32(chk_length);
4056 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4057 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4058 	}
4059 	if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) ||
4060 	    ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) &&
4061 	    (contains_init_chunk == 0))) {
4062 		sctp_send_abort(m, iphlen, src, dst, sh, 0, NULL,
4063 		    use_mflowid, mflowid,
4064 		    vrf_id, port);
4065 	}
4066 }
4067 
4068 /*
4069  * check the inbound datagram to make sure there is not an abort inside it,
4070  * if there is return 1, else return 0.
4071  */
4072 int
4073 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4074 {
4075 	struct sctp_chunkhdr *ch;
4076 	struct sctp_init_chunk *init_chk, chunk_buf;
4077 	int offset;
4078 	unsigned int chk_length;
4079 
4080 	offset = iphlen + sizeof(struct sctphdr);
4081 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4082 	    (uint8_t *) & chunk_buf);
4083 	while (ch != NULL) {
4084 		chk_length = ntohs(ch->chunk_length);
4085 		if (chk_length < sizeof(*ch)) {
4086 			/* packet is probably corrupt */
4087 			break;
4088 		}
4089 		/* we seem to be ok, is it an abort? */
4090 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4091 			/* yep, tell them */
4092 			return (1);
4093 		}
4094 		if (ch->chunk_type == SCTP_INITIATION) {
4095 			/* need to update the Vtag */
4096 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4097 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4098 			if (init_chk != NULL) {
4099 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4100 			}
4101 		}
4102 		/* Nope, move to the next chunk */
4103 		offset += SCTP_SIZE32(chk_length);
4104 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4105 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4106 	}
4107 	return (0);
4108 }
4109 
4110 /*
4111  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4112  * set (i.e. it's 0) so, create this function to compare link local scopes
4113  */
4114 #ifdef INET6
4115 uint32_t
4116 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4117 {
4118 	struct sockaddr_in6 a, b;
4119 
4120 	/* save copies */
4121 	a = *addr1;
4122 	b = *addr2;
4123 
4124 	if (a.sin6_scope_id == 0)
4125 		if (sa6_recoverscope(&a)) {
4126 			/* can't get scope, so can't match */
4127 			return (0);
4128 		}
4129 	if (b.sin6_scope_id == 0)
4130 		if (sa6_recoverscope(&b)) {
4131 			/* can't get scope, so can't match */
4132 			return (0);
4133 		}
4134 	if (a.sin6_scope_id != b.sin6_scope_id)
4135 		return (0);
4136 
4137 	return (1);
4138 }
4139 
4140 /*
4141  * returns a sockaddr_in6 with embedded scope recovered and removed
4142  */
4143 struct sockaddr_in6 *
4144 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4145 {
4146 	/* check and strip embedded scope junk */
4147 	if (addr->sin6_family == AF_INET6) {
4148 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4149 			if (addr->sin6_scope_id == 0) {
4150 				*store = *addr;
4151 				if (!sa6_recoverscope(store)) {
4152 					/* use the recovered scope */
4153 					addr = store;
4154 				}
4155 			} else {
4156 				/* else, return the original "to" addr */
4157 				in6_clearscope(&addr->sin6_addr);
4158 			}
4159 		}
4160 	}
4161 	return (addr);
4162 }
4163 
4164 #endif
4165 
4166 /*
4167  * are the two addresses the same?  currently a "scopeless" check returns: 1
4168  * if same, 0 if not
4169  */
4170 int
4171 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4172 {
4173 
4174 	/* must be valid */
4175 	if (sa1 == NULL || sa2 == NULL)
4176 		return (0);
4177 
4178 	/* must be the same family */
4179 	if (sa1->sa_family != sa2->sa_family)
4180 		return (0);
4181 
4182 	switch (sa1->sa_family) {
4183 #ifdef INET6
4184 	case AF_INET6:
4185 		{
4186 			/* IPv6 addresses */
4187 			struct sockaddr_in6 *sin6_1, *sin6_2;
4188 
4189 			sin6_1 = (struct sockaddr_in6 *)sa1;
4190 			sin6_2 = (struct sockaddr_in6 *)sa2;
4191 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4192 			    sin6_2));
4193 		}
4194 #endif
4195 #ifdef INET
4196 	case AF_INET:
4197 		{
4198 			/* IPv4 addresses */
4199 			struct sockaddr_in *sin_1, *sin_2;
4200 
4201 			sin_1 = (struct sockaddr_in *)sa1;
4202 			sin_2 = (struct sockaddr_in *)sa2;
4203 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4204 		}
4205 #endif
4206 	default:
4207 		/* we don't do these... */
4208 		return (0);
4209 	}
4210 }
4211 
4212 void
4213 sctp_print_address(struct sockaddr *sa)
4214 {
4215 #ifdef INET6
4216 	char ip6buf[INET6_ADDRSTRLEN];
4217 
4218 	ip6buf[0] = 0;
4219 #endif
4220 
4221 	switch (sa->sa_family) {
4222 #ifdef INET6
4223 	case AF_INET6:
4224 		{
4225 			struct sockaddr_in6 *sin6;
4226 
4227 			sin6 = (struct sockaddr_in6 *)sa;
4228 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4229 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4230 			    ntohs(sin6->sin6_port),
4231 			    sin6->sin6_scope_id);
4232 			break;
4233 		}
4234 #endif
4235 #ifdef INET
4236 	case AF_INET:
4237 		{
4238 			struct sockaddr_in *sin;
4239 			unsigned char *p;
4240 
4241 			sin = (struct sockaddr_in *)sa;
4242 			p = (unsigned char *)&sin->sin_addr;
4243 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4244 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4245 			break;
4246 		}
4247 #endif
4248 	default:
4249 		SCTP_PRINTF("?\n");
4250 		break;
4251 	}
4252 }
4253 
4254 void
4255 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4256     struct sctp_inpcb *new_inp,
4257     struct sctp_tcb *stcb,
4258     int waitflags)
4259 {
4260 	/*
4261 	 * go through our old INP and pull off any control structures that
4262 	 * belong to stcb and move then to the new inp.
4263 	 */
4264 	struct socket *old_so, *new_so;
4265 	struct sctp_queued_to_read *control, *nctl;
4266 	struct sctp_readhead tmp_queue;
4267 	struct mbuf *m;
4268 	int error = 0;
4269 
4270 	old_so = old_inp->sctp_socket;
4271 	new_so = new_inp->sctp_socket;
4272 	TAILQ_INIT(&tmp_queue);
4273 	error = sblock(&old_so->so_rcv, waitflags);
4274 	if (error) {
4275 		/*
4276 		 * Gak, can't get sblock, we have a problem. data will be
4277 		 * left stranded.. and we don't dare look at it since the
4278 		 * other thread may be reading something. Oh well, its a
4279 		 * screwed up app that does a peeloff OR a accept while
4280 		 * reading from the main socket... actually its only the
4281 		 * peeloff() case, since I think read will fail on a
4282 		 * listening socket..
4283 		 */
4284 		return;
4285 	}
4286 	/* lock the socket buffers */
4287 	SCTP_INP_READ_LOCK(old_inp);
4288 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4289 		/* Pull off all for out target stcb */
4290 		if (control->stcb == stcb) {
4291 			/* remove it we want it */
4292 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4293 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4294 			m = control->data;
4295 			while (m) {
4296 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4297 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4298 				}
4299 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4300 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4301 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4302 				}
4303 				m = SCTP_BUF_NEXT(m);
4304 			}
4305 		}
4306 	}
4307 	SCTP_INP_READ_UNLOCK(old_inp);
4308 	/* Remove the sb-lock on the old socket */
4309 
4310 	sbunlock(&old_so->so_rcv);
4311 	/* Now we move them over to the new socket buffer */
4312 	SCTP_INP_READ_LOCK(new_inp);
4313 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4314 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4315 		m = control->data;
4316 		while (m) {
4317 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4318 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4319 			}
4320 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4321 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4322 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4323 			}
4324 			m = SCTP_BUF_NEXT(m);
4325 		}
4326 	}
4327 	SCTP_INP_READ_UNLOCK(new_inp);
4328 }
4329 
4330 void
4331 sctp_add_to_readq(struct sctp_inpcb *inp,
4332     struct sctp_tcb *stcb,
4333     struct sctp_queued_to_read *control,
4334     struct sockbuf *sb,
4335     int end,
4336     int inp_read_lock_held,
4337     int so_locked
4338 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4339     SCTP_UNUSED
4340 #endif
4341 )
4342 {
4343 	/*
4344 	 * Here we must place the control on the end of the socket read
4345 	 * queue AND increment sb_cc so that select will work properly on
4346 	 * read.
4347 	 */
4348 	struct mbuf *m, *prev = NULL;
4349 
4350 	if (inp == NULL) {
4351 		/* Gak, TSNH!! */
4352 #ifdef INVARIANTS
4353 		panic("Gak, inp NULL on add_to_readq");
4354 #endif
4355 		return;
4356 	}
4357 	if (inp_read_lock_held == 0)
4358 		SCTP_INP_READ_LOCK(inp);
4359 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4360 		sctp_free_remote_addr(control->whoFrom);
4361 		if (control->data) {
4362 			sctp_m_freem(control->data);
4363 			control->data = NULL;
4364 		}
4365 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4366 		if (inp_read_lock_held == 0)
4367 			SCTP_INP_READ_UNLOCK(inp);
4368 		return;
4369 	}
4370 	if (!(control->spec_flags & M_NOTIFICATION)) {
4371 		atomic_add_int(&inp->total_recvs, 1);
4372 		if (!control->do_not_ref_stcb) {
4373 			atomic_add_int(&stcb->total_recvs, 1);
4374 		}
4375 	}
4376 	m = control->data;
4377 	control->held_length = 0;
4378 	control->length = 0;
4379 	while (m) {
4380 		if (SCTP_BUF_LEN(m) == 0) {
4381 			/* Skip mbufs with NO length */
4382 			if (prev == NULL) {
4383 				/* First one */
4384 				control->data = sctp_m_free(m);
4385 				m = control->data;
4386 			} else {
4387 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4388 				m = SCTP_BUF_NEXT(prev);
4389 			}
4390 			if (m == NULL) {
4391 				control->tail_mbuf = prev;
4392 			}
4393 			continue;
4394 		}
4395 		prev = m;
4396 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4397 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4398 		}
4399 		sctp_sballoc(stcb, sb, m);
4400 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4401 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4402 		}
4403 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4404 		m = SCTP_BUF_NEXT(m);
4405 	}
4406 	if (prev != NULL) {
4407 		control->tail_mbuf = prev;
4408 	} else {
4409 		/* Everything got collapsed out?? */
4410 		sctp_free_remote_addr(control->whoFrom);
4411 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4412 		if (inp_read_lock_held == 0)
4413 			SCTP_INP_READ_UNLOCK(inp);
4414 		return;
4415 	}
4416 	if (end) {
4417 		control->end_added = 1;
4418 	}
4419 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4420 	if (inp_read_lock_held == 0)
4421 		SCTP_INP_READ_UNLOCK(inp);
4422 	if (inp && inp->sctp_socket) {
4423 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4424 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4425 		} else {
4426 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4427 			struct socket *so;
4428 
4429 			so = SCTP_INP_SO(inp);
4430 			if (!so_locked) {
4431 				if (stcb) {
4432 					atomic_add_int(&stcb->asoc.refcnt, 1);
4433 					SCTP_TCB_UNLOCK(stcb);
4434 				}
4435 				SCTP_SOCKET_LOCK(so, 1);
4436 				if (stcb) {
4437 					SCTP_TCB_LOCK(stcb);
4438 					atomic_subtract_int(&stcb->asoc.refcnt, 1);
4439 				}
4440 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4441 					SCTP_SOCKET_UNLOCK(so, 1);
4442 					return;
4443 				}
4444 			}
4445 #endif
4446 			sctp_sorwakeup(inp, inp->sctp_socket);
4447 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4448 			if (!so_locked) {
4449 				SCTP_SOCKET_UNLOCK(so, 1);
4450 			}
4451 #endif
4452 		}
4453 	}
4454 }
4455 
4456 
4457 int
4458 sctp_append_to_readq(struct sctp_inpcb *inp,
4459     struct sctp_tcb *stcb,
4460     struct sctp_queued_to_read *control,
4461     struct mbuf *m,
4462     int end,
4463     int ctls_cumack,
4464     struct sockbuf *sb)
4465 {
4466 	/*
4467 	 * A partial delivery API event is underway. OR we are appending on
4468 	 * the reassembly queue.
4469 	 *
4470 	 * If PDAPI this means we need to add m to the end of the data.
4471 	 * Increase the length in the control AND increment the sb_cc.
4472 	 * Otherwise sb is NULL and all we need to do is put it at the end
4473 	 * of the mbuf chain.
4474 	 */
4475 	int len = 0;
4476 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4477 
4478 	if (inp) {
4479 		SCTP_INP_READ_LOCK(inp);
4480 	}
4481 	if (control == NULL) {
4482 get_out:
4483 		if (inp) {
4484 			SCTP_INP_READ_UNLOCK(inp);
4485 		}
4486 		return (-1);
4487 	}
4488 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4489 		SCTP_INP_READ_UNLOCK(inp);
4490 		return (0);
4491 	}
4492 	if (control->end_added) {
4493 		/* huh this one is complete? */
4494 		goto get_out;
4495 	}
4496 	mm = m;
4497 	if (mm == NULL) {
4498 		goto get_out;
4499 	}
4500 	while (mm) {
4501 		if (SCTP_BUF_LEN(mm) == 0) {
4502 			/* Skip mbufs with NO lenght */
4503 			if (prev == NULL) {
4504 				/* First one */
4505 				m = sctp_m_free(mm);
4506 				mm = m;
4507 			} else {
4508 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4509 				mm = SCTP_BUF_NEXT(prev);
4510 			}
4511 			continue;
4512 		}
4513 		prev = mm;
4514 		len += SCTP_BUF_LEN(mm);
4515 		if (sb) {
4516 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4517 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4518 			}
4519 			sctp_sballoc(stcb, sb, mm);
4520 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4521 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4522 			}
4523 		}
4524 		mm = SCTP_BUF_NEXT(mm);
4525 	}
4526 	if (prev) {
4527 		tail = prev;
4528 	} else {
4529 		/* Really there should always be a prev */
4530 		if (m == NULL) {
4531 			/* Huh nothing left? */
4532 #ifdef INVARIANTS
4533 			panic("Nothing left to add?");
4534 #else
4535 			goto get_out;
4536 #endif
4537 		}
4538 		tail = m;
4539 	}
4540 	if (control->tail_mbuf) {
4541 		/* append */
4542 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4543 		control->tail_mbuf = tail;
4544 	} else {
4545 		/* nothing there */
4546 #ifdef INVARIANTS
4547 		if (control->data != NULL) {
4548 			panic("This should NOT happen");
4549 		}
4550 #endif
4551 		control->data = m;
4552 		control->tail_mbuf = tail;
4553 	}
4554 	atomic_add_int(&control->length, len);
4555 	if (end) {
4556 		/* message is complete */
4557 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4558 			stcb->asoc.control_pdapi = NULL;
4559 		}
4560 		control->held_length = 0;
4561 		control->end_added = 1;
4562 	}
4563 	if (stcb == NULL) {
4564 		control->do_not_ref_stcb = 1;
4565 	}
4566 	/*
4567 	 * When we are appending in partial delivery, the cum-ack is used
4568 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4569 	 * is populated in the outbound sinfo structure from the true cumack
4570 	 * if the association exists...
4571 	 */
4572 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4573 	if (inp) {
4574 		SCTP_INP_READ_UNLOCK(inp);
4575 	}
4576 	if (inp && inp->sctp_socket) {
4577 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4578 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4579 		} else {
4580 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4581 			struct socket *so;
4582 
4583 			so = SCTP_INP_SO(inp);
4584 			if (stcb) {
4585 				atomic_add_int(&stcb->asoc.refcnt, 1);
4586 				SCTP_TCB_UNLOCK(stcb);
4587 			}
4588 			SCTP_SOCKET_LOCK(so, 1);
4589 			if (stcb) {
4590 				SCTP_TCB_LOCK(stcb);
4591 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4592 			}
4593 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4594 				SCTP_SOCKET_UNLOCK(so, 1);
4595 				return (0);
4596 			}
4597 #endif
4598 			sctp_sorwakeup(inp, inp->sctp_socket);
4599 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4600 			SCTP_SOCKET_UNLOCK(so, 1);
4601 #endif
4602 		}
4603 	}
4604 	return (0);
4605 }
4606 
4607 
4608 
4609 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4610  *************ALTERNATE ROUTING CODE
4611  */
4612 
4613 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4614  *************ALTERNATE ROUTING CODE
4615  */
4616 
4617 struct mbuf *
4618 sctp_generate_invmanparam(int err)
4619 {
4620 	/* Return a MBUF with a invalid mandatory parameter */
4621 	struct mbuf *m;
4622 
4623 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4624 	if (m) {
4625 		struct sctp_paramhdr *ph;
4626 
4627 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4628 		ph = mtod(m, struct sctp_paramhdr *);
4629 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4630 		ph->param_type = htons(err);
4631 	}
4632 	return (m);
4633 }
4634 
4635 #ifdef SCTP_MBCNT_LOGGING
4636 void
4637 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4638     struct sctp_tmit_chunk *tp1, int chk_cnt)
4639 {
4640 	if (tp1->data == NULL) {
4641 		return;
4642 	}
4643 	asoc->chunks_on_out_queue -= chk_cnt;
4644 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4645 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4646 		    asoc->total_output_queue_size,
4647 		    tp1->book_size,
4648 		    0,
4649 		    tp1->mbcnt);
4650 	}
4651 	if (asoc->total_output_queue_size >= tp1->book_size) {
4652 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4653 	} else {
4654 		asoc->total_output_queue_size = 0;
4655 	}
4656 
4657 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4658 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4659 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4660 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4661 		} else {
4662 			stcb->sctp_socket->so_snd.sb_cc = 0;
4663 
4664 		}
4665 	}
4666 }
4667 
4668 #endif
4669 
4670 int
4671 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4672     uint8_t sent, int so_locked
4673 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4674     SCTP_UNUSED
4675 #endif
4676 )
4677 {
4678 	struct sctp_stream_out *strq;
4679 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4680 	struct sctp_stream_queue_pending *sp;
4681 	uint16_t stream = 0, seq = 0;
4682 	uint8_t foundeom = 0;
4683 	int ret_sz = 0;
4684 	int notdone;
4685 	int do_wakeup_routine = 0;
4686 
4687 	stream = tp1->rec.data.stream_number;
4688 	seq = tp1->rec.data.stream_seq;
4689 	do {
4690 		ret_sz += tp1->book_size;
4691 		if (tp1->data != NULL) {
4692 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4693 				sctp_flight_size_decrease(tp1);
4694 				sctp_total_flight_decrease(stcb, tp1);
4695 			}
4696 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4697 			stcb->asoc.peers_rwnd += tp1->send_size;
4698 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4699 			if (sent) {
4700 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4701 			} else {
4702 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4703 			}
4704 			if (tp1->data) {
4705 				sctp_m_freem(tp1->data);
4706 				tp1->data = NULL;
4707 			}
4708 			do_wakeup_routine = 1;
4709 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4710 				stcb->asoc.sent_queue_cnt_removeable--;
4711 			}
4712 		}
4713 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4714 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4715 		    SCTP_DATA_NOT_FRAG) {
4716 			/* not frag'ed we ae done   */
4717 			notdone = 0;
4718 			foundeom = 1;
4719 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4720 			/* end of frag, we are done */
4721 			notdone = 0;
4722 			foundeom = 1;
4723 		} else {
4724 			/*
4725 			 * Its a begin or middle piece, we must mark all of
4726 			 * it
4727 			 */
4728 			notdone = 1;
4729 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4730 		}
4731 	} while (tp1 && notdone);
4732 	if (foundeom == 0) {
4733 		/*
4734 		 * The multi-part message was scattered across the send and
4735 		 * sent queue.
4736 		 */
4737 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4738 			if ((tp1->rec.data.stream_number != stream) ||
4739 			    (tp1->rec.data.stream_seq != seq)) {
4740 				break;
4741 			}
4742 			/*
4743 			 * save to chk in case we have some on stream out
4744 			 * queue. If so and we have an un-transmitted one we
4745 			 * don't have to fudge the TSN.
4746 			 */
4747 			chk = tp1;
4748 			ret_sz += tp1->book_size;
4749 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4750 			if (sent) {
4751 				sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked);
4752 			} else {
4753 				sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked);
4754 			}
4755 			if (tp1->data) {
4756 				sctp_m_freem(tp1->data);
4757 				tp1->data = NULL;
4758 			}
4759 			/* No flight involved here book the size to 0 */
4760 			tp1->book_size = 0;
4761 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4762 				foundeom = 1;
4763 			}
4764 			do_wakeup_routine = 1;
4765 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4766 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4767 			/*
4768 			 * on to the sent queue so we can wait for it to be
4769 			 * passed by.
4770 			 */
4771 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4772 			    sctp_next);
4773 			stcb->asoc.send_queue_cnt--;
4774 			stcb->asoc.sent_queue_cnt++;
4775 		}
4776 	}
4777 	if (foundeom == 0) {
4778 		/*
4779 		 * Still no eom found. That means there is stuff left on the
4780 		 * stream out queue.. yuck.
4781 		 */
4782 		strq = &stcb->asoc.strmout[stream];
4783 		SCTP_TCB_SEND_LOCK(stcb);
4784 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4785 			/* FIXME: Shouldn't this be a serial number check? */
4786 			if (sp->strseq > seq) {
4787 				break;
4788 			}
4789 			/* Check if its our SEQ */
4790 			if (sp->strseq == seq) {
4791 				sp->discard_rest = 1;
4792 				/*
4793 				 * We may need to put a chunk on the queue
4794 				 * that holds the TSN that would have been
4795 				 * sent with the LAST bit.
4796 				 */
4797 				if (chk == NULL) {
4798 					/* Yep, we have to */
4799 					sctp_alloc_a_chunk(stcb, chk);
4800 					if (chk == NULL) {
4801 						/*
4802 						 * we are hosed. All we can
4803 						 * do is nothing.. which
4804 						 * will cause an abort if
4805 						 * the peer is paying
4806 						 * attention.
4807 						 */
4808 						goto oh_well;
4809 					}
4810 					memset(chk, 0, sizeof(*chk));
4811 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4812 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4813 					chk->asoc = &stcb->asoc;
4814 					chk->rec.data.stream_seq = sp->strseq;
4815 					chk->rec.data.stream_number = sp->stream;
4816 					chk->rec.data.payloadtype = sp->ppid;
4817 					chk->rec.data.context = sp->context;
4818 					chk->flags = sp->act_flags;
4819 					if (sp->net)
4820 						chk->whoTo = sp->net;
4821 					else
4822 						chk->whoTo = stcb->asoc.primary_destination;
4823 					atomic_add_int(&chk->whoTo->ref_count, 1);
4824 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4825 					stcb->asoc.pr_sctp_cnt++;
4826 					chk->pr_sctp_on = 1;
4827 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4828 					stcb->asoc.sent_queue_cnt++;
4829 					stcb->asoc.pr_sctp_cnt++;
4830 				} else {
4831 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4832 				}
4833 		oh_well:
4834 				if (sp->data) {
4835 					/*
4836 					 * Pull any data to free up the SB
4837 					 * and allow sender to "add more"
4838 					 * while we will throw away :-)
4839 					 */
4840 					sctp_free_spbufspace(stcb, &stcb->asoc,
4841 					    sp);
4842 					ret_sz += sp->length;
4843 					do_wakeup_routine = 1;
4844 					sp->some_taken = 1;
4845 					sctp_m_freem(sp->data);
4846 					sp->data = NULL;
4847 					sp->tail_mbuf = NULL;
4848 					sp->length = 0;
4849 				}
4850 				break;
4851 			}
4852 		}		/* End tailq_foreach */
4853 		SCTP_TCB_SEND_UNLOCK(stcb);
4854 	}
4855 	if (do_wakeup_routine) {
4856 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4857 		struct socket *so;
4858 
4859 		so = SCTP_INP_SO(stcb->sctp_ep);
4860 		if (!so_locked) {
4861 			atomic_add_int(&stcb->asoc.refcnt, 1);
4862 			SCTP_TCB_UNLOCK(stcb);
4863 			SCTP_SOCKET_LOCK(so, 1);
4864 			SCTP_TCB_LOCK(stcb);
4865 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4866 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4867 				/* assoc was freed while we were unlocked */
4868 				SCTP_SOCKET_UNLOCK(so, 1);
4869 				return (ret_sz);
4870 			}
4871 		}
4872 #endif
4873 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4874 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4875 		if (!so_locked) {
4876 			SCTP_SOCKET_UNLOCK(so, 1);
4877 		}
4878 #endif
4879 	}
4880 	return (ret_sz);
4881 }
4882 
4883 /*
4884  * checks to see if the given address, sa, is one that is currently known by
4885  * the kernel note: can't distinguish the same address on multiple interfaces
4886  * and doesn't handle multiple addresses with different zone/scope id's note:
4887  * ifa_ifwithaddr() compares the entire sockaddr struct
4888  */
4889 struct sctp_ifa *
4890 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4891     int holds_lock)
4892 {
4893 	struct sctp_laddr *laddr;
4894 
4895 	if (holds_lock == 0) {
4896 		SCTP_INP_RLOCK(inp);
4897 	}
4898 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4899 		if (laddr->ifa == NULL)
4900 			continue;
4901 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4902 			continue;
4903 #ifdef INET
4904 		if (addr->sa_family == AF_INET) {
4905 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4906 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4907 				/* found him. */
4908 				if (holds_lock == 0) {
4909 					SCTP_INP_RUNLOCK(inp);
4910 				}
4911 				return (laddr->ifa);
4912 				break;
4913 			}
4914 		}
4915 #endif
4916 #ifdef INET6
4917 		if (addr->sa_family == AF_INET6) {
4918 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4919 			    &laddr->ifa->address.sin6)) {
4920 				/* found him. */
4921 				if (holds_lock == 0) {
4922 					SCTP_INP_RUNLOCK(inp);
4923 				}
4924 				return (laddr->ifa);
4925 				break;
4926 			}
4927 		}
4928 #endif
4929 	}
4930 	if (holds_lock == 0) {
4931 		SCTP_INP_RUNLOCK(inp);
4932 	}
4933 	return (NULL);
4934 }
4935 
4936 uint32_t
4937 sctp_get_ifa_hash_val(struct sockaddr *addr)
4938 {
4939 	switch (addr->sa_family) {
4940 #ifdef INET
4941 	case AF_INET:
4942 		{
4943 			struct sockaddr_in *sin;
4944 
4945 			sin = (struct sockaddr_in *)addr;
4946 			return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4947 		}
4948 #endif
4949 #ifdef INET6
4950 	case INET6:
4951 		{
4952 			struct sockaddr_in6 *sin6;
4953 			uint32_t hash_of_addr;
4954 
4955 			sin6 = (struct sockaddr_in6 *)addr;
4956 			hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4957 			    sin6->sin6_addr.s6_addr32[1] +
4958 			    sin6->sin6_addr.s6_addr32[2] +
4959 			    sin6->sin6_addr.s6_addr32[3]);
4960 			hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4961 			return (hash_of_addr);
4962 		}
4963 #endif
4964 	default:
4965 		break;
4966 	}
4967 	return (0);
4968 }
4969 
4970 struct sctp_ifa *
4971 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4972 {
4973 	struct sctp_ifa *sctp_ifap;
4974 	struct sctp_vrf *vrf;
4975 	struct sctp_ifalist *hash_head;
4976 	uint32_t hash_of_addr;
4977 
4978 	if (holds_lock == 0)
4979 		SCTP_IPI_ADDR_RLOCK();
4980 
4981 	vrf = sctp_find_vrf(vrf_id);
4982 	if (vrf == NULL) {
4983 stage_right:
4984 		if (holds_lock == 0)
4985 			SCTP_IPI_ADDR_RUNLOCK();
4986 		return (NULL);
4987 	}
4988 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4989 
4990 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4991 	if (hash_head == NULL) {
4992 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4993 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4994 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4995 		sctp_print_address(addr);
4996 		SCTP_PRINTF("No such bucket for address\n");
4997 		if (holds_lock == 0)
4998 			SCTP_IPI_ADDR_RUNLOCK();
4999 
5000 		return (NULL);
5001 	}
5002 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
5003 		if (sctp_ifap == NULL) {
5004 #ifdef INVARIANTS
5005 			panic("Huh LIST_FOREACH corrupt");
5006 			goto stage_right;
5007 #else
5008 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
5009 			goto stage_right;
5010 #endif
5011 		}
5012 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
5013 			continue;
5014 #ifdef INET
5015 		if (addr->sa_family == AF_INET) {
5016 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
5017 			    sctp_ifap->address.sin.sin_addr.s_addr) {
5018 				/* found him. */
5019 				if (holds_lock == 0)
5020 					SCTP_IPI_ADDR_RUNLOCK();
5021 				return (sctp_ifap);
5022 				break;
5023 			}
5024 		}
5025 #endif
5026 #ifdef INET6
5027 		if (addr->sa_family == AF_INET6) {
5028 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
5029 			    &sctp_ifap->address.sin6)) {
5030 				/* found him. */
5031 				if (holds_lock == 0)
5032 					SCTP_IPI_ADDR_RUNLOCK();
5033 				return (sctp_ifap);
5034 				break;
5035 			}
5036 		}
5037 #endif
5038 	}
5039 	if (holds_lock == 0)
5040 		SCTP_IPI_ADDR_RUNLOCK();
5041 	return (NULL);
5042 }
5043 
5044 static void
5045 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5046     uint32_t rwnd_req)
5047 {
5048 	/* User pulled some data, do we need a rwnd update? */
5049 	int r_unlocked = 0;
5050 	uint32_t dif, rwnd;
5051 	struct socket *so = NULL;
5052 
5053 	if (stcb == NULL)
5054 		return;
5055 
5056 	atomic_add_int(&stcb->asoc.refcnt, 1);
5057 
5058 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5059 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5060 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5061 		/* Pre-check If we are freeing no update */
5062 		goto no_lock;
5063 	}
5064 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5065 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5066 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5067 		goto out;
5068 	}
5069 	so = stcb->sctp_socket;
5070 	if (so == NULL) {
5071 		goto out;
5072 	}
5073 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5074 	/* Have you have freed enough to look */
5075 	*freed_so_far = 0;
5076 	/* Yep, its worth a look and the lock overhead */
5077 
5078 	/* Figure out what the rwnd would be */
5079 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5080 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5081 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5082 	} else {
5083 		dif = 0;
5084 	}
5085 	if (dif >= rwnd_req) {
5086 		if (hold_rlock) {
5087 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5088 			r_unlocked = 1;
5089 		}
5090 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5091 			/*
5092 			 * One last check before we allow the guy possibly
5093 			 * to get in. There is a race, where the guy has not
5094 			 * reached the gate. In that case
5095 			 */
5096 			goto out;
5097 		}
5098 		SCTP_TCB_LOCK(stcb);
5099 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5100 			/* No reports here */
5101 			SCTP_TCB_UNLOCK(stcb);
5102 			goto out;
5103 		}
5104 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5105 		sctp_send_sack(stcb, SCTP_SO_LOCKED);
5106 
5107 		sctp_chunk_output(stcb->sctp_ep, stcb,
5108 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5109 		/* make sure no timer is running */
5110 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5111 		SCTP_TCB_UNLOCK(stcb);
5112 	} else {
5113 		/* Update how much we have pending */
5114 		stcb->freed_by_sorcv_sincelast = dif;
5115 	}
5116 out:
5117 	if (so && r_unlocked && hold_rlock) {
5118 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5119 	}
5120 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5121 no_lock:
5122 	atomic_add_int(&stcb->asoc.refcnt, -1);
5123 	return;
5124 }
5125 
5126 int
5127 sctp_sorecvmsg(struct socket *so,
5128     struct uio *uio,
5129     struct mbuf **mp,
5130     struct sockaddr *from,
5131     int fromlen,
5132     int *msg_flags,
5133     struct sctp_sndrcvinfo *sinfo,
5134     int filling_sinfo)
5135 {
5136 	/*
5137 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5138 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5139 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5140 	 * On the way out we may send out any combination of:
5141 	 * MSG_NOTIFICATION MSG_EOR
5142 	 *
5143 	 */
5144 	struct sctp_inpcb *inp = NULL;
5145 	int my_len = 0;
5146 	int cp_len = 0, error = 0;
5147 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5148 	struct mbuf *m = NULL;
5149 	struct sctp_tcb *stcb = NULL;
5150 	int wakeup_read_socket = 0;
5151 	int freecnt_applied = 0;
5152 	int out_flags = 0, in_flags = 0;
5153 	int block_allowed = 1;
5154 	uint32_t freed_so_far = 0;
5155 	uint32_t copied_so_far = 0;
5156 	int in_eeor_mode = 0;
5157 	int no_rcv_needed = 0;
5158 	uint32_t rwnd_req = 0;
5159 	int hold_sblock = 0;
5160 	int hold_rlock = 0;
5161 	int slen = 0;
5162 	uint32_t held_length = 0;
5163 	int sockbuf_lock = 0;
5164 
5165 	if (uio == NULL) {
5166 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5167 		return (EINVAL);
5168 	}
5169 	if (msg_flags) {
5170 		in_flags = *msg_flags;
5171 		if (in_flags & MSG_PEEK)
5172 			SCTP_STAT_INCR(sctps_read_peeks);
5173 	} else {
5174 		in_flags = 0;
5175 	}
5176 	slen = uio->uio_resid;
5177 
5178 	/* Pull in and set up our int flags */
5179 	if (in_flags & MSG_OOB) {
5180 		/* Out of band's NOT supported */
5181 		return (EOPNOTSUPP);
5182 	}
5183 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5184 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5185 		return (EINVAL);
5186 	}
5187 	if ((in_flags & (MSG_DONTWAIT
5188 	    | MSG_NBIO
5189 	    )) ||
5190 	    SCTP_SO_IS_NBIO(so)) {
5191 		block_allowed = 0;
5192 	}
5193 	/* setup the endpoint */
5194 	inp = (struct sctp_inpcb *)so->so_pcb;
5195 	if (inp == NULL) {
5196 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5197 		return (EFAULT);
5198 	}
5199 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5200 	/* Must be at least a MTU's worth */
5201 	if (rwnd_req < SCTP_MIN_RWND)
5202 		rwnd_req = SCTP_MIN_RWND;
5203 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5204 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5205 		sctp_misc_ints(SCTP_SORECV_ENTER,
5206 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5207 	}
5208 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5209 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5210 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5211 	}
5212 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5213 	sockbuf_lock = 1;
5214 	if (error) {
5215 		goto release_unlocked;
5216 	}
5217 restart:
5218 
5219 
5220 restart_nosblocks:
5221 	if (hold_sblock == 0) {
5222 		SOCKBUF_LOCK(&so->so_rcv);
5223 		hold_sblock = 1;
5224 	}
5225 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5226 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5227 		goto out;
5228 	}
5229 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
5230 		if (so->so_error) {
5231 			error = so->so_error;
5232 			if ((in_flags & MSG_PEEK) == 0)
5233 				so->so_error = 0;
5234 			goto out;
5235 		} else {
5236 			if (so->so_rcv.sb_cc == 0) {
5237 				/* indicate EOF */
5238 				error = 0;
5239 				goto out;
5240 			}
5241 		}
5242 	}
5243 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5244 		/* we need to wait for data */
5245 		if ((so->so_rcv.sb_cc == 0) &&
5246 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5247 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5248 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5249 				/*
5250 				 * For active open side clear flags for
5251 				 * re-use passive open is blocked by
5252 				 * connect.
5253 				 */
5254 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5255 					/*
5256 					 * You were aborted, passive side
5257 					 * always hits here
5258 					 */
5259 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5260 					error = ECONNRESET;
5261 				}
5262 				so->so_state &= ~(SS_ISCONNECTING |
5263 				    SS_ISDISCONNECTING |
5264 				    SS_ISCONFIRMING |
5265 				    SS_ISCONNECTED);
5266 				if (error == 0) {
5267 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5268 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5269 						error = ENOTCONN;
5270 					}
5271 				}
5272 				goto out;
5273 			}
5274 		}
5275 		error = sbwait(&so->so_rcv);
5276 		if (error) {
5277 			goto out;
5278 		}
5279 		held_length = 0;
5280 		goto restart_nosblocks;
5281 	} else if (so->so_rcv.sb_cc == 0) {
5282 		if (so->so_error) {
5283 			error = so->so_error;
5284 			if ((in_flags & MSG_PEEK) == 0)
5285 				so->so_error = 0;
5286 		} else {
5287 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5288 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5289 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5290 					/*
5291 					 * For active open side clear flags
5292 					 * for re-use passive open is
5293 					 * blocked by connect.
5294 					 */
5295 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5296 						/*
5297 						 * You were aborted, passive
5298 						 * side always hits here
5299 						 */
5300 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5301 						error = ECONNRESET;
5302 					}
5303 					so->so_state &= ~(SS_ISCONNECTING |
5304 					    SS_ISDISCONNECTING |
5305 					    SS_ISCONFIRMING |
5306 					    SS_ISCONNECTED);
5307 					if (error == 0) {
5308 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5309 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5310 							error = ENOTCONN;
5311 						}
5312 					}
5313 					goto out;
5314 				}
5315 			}
5316 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5317 			error = EWOULDBLOCK;
5318 		}
5319 		goto out;
5320 	}
5321 	if (hold_sblock == 1) {
5322 		SOCKBUF_UNLOCK(&so->so_rcv);
5323 		hold_sblock = 0;
5324 	}
5325 	/* we possibly have data we can read */
5326 	/* sa_ignore FREED_MEMORY */
5327 	control = TAILQ_FIRST(&inp->read_queue);
5328 	if (control == NULL) {
5329 		/*
5330 		 * This could be happening since the appender did the
5331 		 * increment but as not yet did the tailq insert onto the
5332 		 * read_queue
5333 		 */
5334 		if (hold_rlock == 0) {
5335 			SCTP_INP_READ_LOCK(inp);
5336 		}
5337 		control = TAILQ_FIRST(&inp->read_queue);
5338 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5339 #ifdef INVARIANTS
5340 			panic("Huh, its non zero and nothing on control?");
5341 #endif
5342 			so->so_rcv.sb_cc = 0;
5343 		}
5344 		SCTP_INP_READ_UNLOCK(inp);
5345 		hold_rlock = 0;
5346 		goto restart;
5347 	}
5348 	if ((control->length == 0) &&
5349 	    (control->do_not_ref_stcb)) {
5350 		/*
5351 		 * Clean up code for freeing assoc that left behind a
5352 		 * pdapi.. maybe a peer in EEOR that just closed after
5353 		 * sending and never indicated a EOR.
5354 		 */
5355 		if (hold_rlock == 0) {
5356 			hold_rlock = 1;
5357 			SCTP_INP_READ_LOCK(inp);
5358 		}
5359 		control->held_length = 0;
5360 		if (control->data) {
5361 			/* Hmm there is data here .. fix */
5362 			struct mbuf *m_tmp;
5363 			int cnt = 0;
5364 
5365 			m_tmp = control->data;
5366 			while (m_tmp) {
5367 				cnt += SCTP_BUF_LEN(m_tmp);
5368 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5369 					control->tail_mbuf = m_tmp;
5370 					control->end_added = 1;
5371 				}
5372 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5373 			}
5374 			control->length = cnt;
5375 		} else {
5376 			/* remove it */
5377 			TAILQ_REMOVE(&inp->read_queue, control, next);
5378 			/* Add back any hiddend data */
5379 			sctp_free_remote_addr(control->whoFrom);
5380 			sctp_free_a_readq(stcb, control);
5381 		}
5382 		if (hold_rlock) {
5383 			hold_rlock = 0;
5384 			SCTP_INP_READ_UNLOCK(inp);
5385 		}
5386 		goto restart;
5387 	}
5388 	if ((control->length == 0) &&
5389 	    (control->end_added == 1)) {
5390 		/*
5391 		 * Do we also need to check for (control->pdapi_aborted ==
5392 		 * 1)?
5393 		 */
5394 		if (hold_rlock == 0) {
5395 			hold_rlock = 1;
5396 			SCTP_INP_READ_LOCK(inp);
5397 		}
5398 		TAILQ_REMOVE(&inp->read_queue, control, next);
5399 		if (control->data) {
5400 #ifdef INVARIANTS
5401 			panic("control->data not null but control->length == 0");
5402 #else
5403 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5404 			sctp_m_freem(control->data);
5405 			control->data = NULL;
5406 #endif
5407 		}
5408 		if (control->aux_data) {
5409 			sctp_m_free(control->aux_data);
5410 			control->aux_data = NULL;
5411 		}
5412 		sctp_free_remote_addr(control->whoFrom);
5413 		sctp_free_a_readq(stcb, control);
5414 		if (hold_rlock) {
5415 			hold_rlock = 0;
5416 			SCTP_INP_READ_UNLOCK(inp);
5417 		}
5418 		goto restart;
5419 	}
5420 	if (control->length == 0) {
5421 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5422 		    (filling_sinfo)) {
5423 			/* find a more suitable one then this */
5424 			ctl = TAILQ_NEXT(control, next);
5425 			while (ctl) {
5426 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5427 				    (ctl->some_taken ||
5428 				    (ctl->spec_flags & M_NOTIFICATION) ||
5429 				    ((ctl->do_not_ref_stcb == 0) &&
5430 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5431 				    ) {
5432 					/*-
5433 					 * If we have a different TCB next, and there is data
5434 					 * present. If we have already taken some (pdapi), OR we can
5435 					 * ref the tcb and no delivery as started on this stream, we
5436 					 * take it. Note we allow a notification on a different
5437 					 * assoc to be delivered..
5438 					 */
5439 					control = ctl;
5440 					goto found_one;
5441 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5442 					    (ctl->length) &&
5443 					    ((ctl->some_taken) ||
5444 					    ((ctl->do_not_ref_stcb == 0) &&
5445 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5446 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5447 					/*-
5448 					 * If we have the same tcb, and there is data present, and we
5449 					 * have the strm interleave feature present. Then if we have
5450 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5451 					 * not started a delivery for this stream, we can take it.
5452 					 * Note we do NOT allow a notificaiton on the same assoc to
5453 					 * be delivered.
5454 					 */
5455 					control = ctl;
5456 					goto found_one;
5457 				}
5458 				ctl = TAILQ_NEXT(ctl, next);
5459 			}
5460 		}
5461 		/*
5462 		 * if we reach here, not suitable replacement is available
5463 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5464 		 * into the our held count, and its time to sleep again.
5465 		 */
5466 		held_length = so->so_rcv.sb_cc;
5467 		control->held_length = so->so_rcv.sb_cc;
5468 		goto restart;
5469 	}
5470 	/* Clear the held length since there is something to read */
5471 	control->held_length = 0;
5472 	if (hold_rlock) {
5473 		SCTP_INP_READ_UNLOCK(inp);
5474 		hold_rlock = 0;
5475 	}
5476 found_one:
5477 	/*
5478 	 * If we reach here, control has a some data for us to read off.
5479 	 * Note that stcb COULD be NULL.
5480 	 */
5481 	control->some_taken++;
5482 	if (hold_sblock) {
5483 		SOCKBUF_UNLOCK(&so->so_rcv);
5484 		hold_sblock = 0;
5485 	}
5486 	stcb = control->stcb;
5487 	if (stcb) {
5488 		if ((control->do_not_ref_stcb == 0) &&
5489 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5490 			if (freecnt_applied == 0)
5491 				stcb = NULL;
5492 		} else if (control->do_not_ref_stcb == 0) {
5493 			/* you can't free it on me please */
5494 			/*
5495 			 * The lock on the socket buffer protects us so the
5496 			 * free code will stop. But since we used the
5497 			 * socketbuf lock and the sender uses the tcb_lock
5498 			 * to increment, we need to use the atomic add to
5499 			 * the refcnt
5500 			 */
5501 			if (freecnt_applied) {
5502 #ifdef INVARIANTS
5503 				panic("refcnt already incremented");
5504 #else
5505 				SCTP_PRINTF("refcnt already incremented?\n");
5506 #endif
5507 			} else {
5508 				atomic_add_int(&stcb->asoc.refcnt, 1);
5509 				freecnt_applied = 1;
5510 			}
5511 			/*
5512 			 * Setup to remember how much we have not yet told
5513 			 * the peer our rwnd has opened up. Note we grab the
5514 			 * value from the tcb from last time. Note too that
5515 			 * sack sending clears this when a sack is sent,
5516 			 * which is fine. Once we hit the rwnd_req, we then
5517 			 * will go to the sctp_user_rcvd() that will not
5518 			 * lock until it KNOWs it MUST send a WUP-SACK.
5519 			 */
5520 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5521 			stcb->freed_by_sorcv_sincelast = 0;
5522 		}
5523 	}
5524 	if (stcb &&
5525 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5526 	    control->do_not_ref_stcb == 0) {
5527 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5528 	}
5529 	/* First lets get off the sinfo and sockaddr info */
5530 	if ((sinfo) && filling_sinfo) {
5531 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5532 		nxt = TAILQ_NEXT(control, next);
5533 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
5534 		    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
5535 			struct sctp_extrcvinfo *s_extra;
5536 
5537 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5538 			if ((nxt) &&
5539 			    (nxt->length)) {
5540 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5541 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5542 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5543 				}
5544 				if (nxt->spec_flags & M_NOTIFICATION) {
5545 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5546 				}
5547 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5548 				s_extra->sreinfo_next_length = nxt->length;
5549 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5550 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5551 				if (nxt->tail_mbuf != NULL) {
5552 					if (nxt->end_added) {
5553 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5554 					}
5555 				}
5556 			} else {
5557 				/*
5558 				 * we explicitly 0 this, since the memcpy
5559 				 * got some other things beyond the older
5560 				 * sinfo_ that is on the control's structure
5561 				 * :-D
5562 				 */
5563 				nxt = NULL;
5564 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5565 				s_extra->sreinfo_next_aid = 0;
5566 				s_extra->sreinfo_next_length = 0;
5567 				s_extra->sreinfo_next_ppid = 0;
5568 				s_extra->sreinfo_next_stream = 0;
5569 			}
5570 		}
5571 		/*
5572 		 * update off the real current cum-ack, if we have an stcb.
5573 		 */
5574 		if ((control->do_not_ref_stcb == 0) && stcb)
5575 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5576 		/*
5577 		 * mask off the high bits, we keep the actual chunk bits in
5578 		 * there.
5579 		 */
5580 		sinfo->sinfo_flags &= 0x00ff;
5581 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5582 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5583 		}
5584 	}
5585 #ifdef SCTP_ASOCLOG_OF_TSNS
5586 	{
5587 		int index, newindex;
5588 		struct sctp_pcbtsn_rlog *entry;
5589 
5590 		do {
5591 			index = inp->readlog_index;
5592 			newindex = index + 1;
5593 			if (newindex >= SCTP_READ_LOG_SIZE) {
5594 				newindex = 0;
5595 			}
5596 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5597 		entry = &inp->readlog[index];
5598 		entry->vtag = control->sinfo_assoc_id;
5599 		entry->strm = control->sinfo_stream;
5600 		entry->seq = control->sinfo_ssn;
5601 		entry->sz = control->length;
5602 		entry->flgs = control->sinfo_flags;
5603 	}
5604 #endif
5605 	if (fromlen && from) {
5606 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len);
5607 		switch (control->whoFrom->ro._l_addr.sa.sa_family) {
5608 #ifdef INET6
5609 		case AF_INET6:
5610 			((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5611 			break;
5612 #endif
5613 #ifdef INET
5614 		case AF_INET:
5615 			((struct sockaddr_in *)from)->sin_port = control->port_from;
5616 			break;
5617 #endif
5618 		default:
5619 			break;
5620 		}
5621 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5622 
5623 #if defined(INET) && defined(INET6)
5624 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5625 		    (from->sa_family == AF_INET) &&
5626 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5627 			struct sockaddr_in *sin;
5628 			struct sockaddr_in6 sin6;
5629 
5630 			sin = (struct sockaddr_in *)from;
5631 			bzero(&sin6, sizeof(sin6));
5632 			sin6.sin6_family = AF_INET6;
5633 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5634 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5635 			bcopy(&sin->sin_addr,
5636 			    &sin6.sin6_addr.s6_addr32[3],
5637 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5638 			sin6.sin6_port = sin->sin_port;
5639 			memcpy(from, &sin6, sizeof(struct sockaddr_in6));
5640 		}
5641 #endif
5642 #ifdef INET6
5643 		{
5644 			struct sockaddr_in6 lsa6, *from6;
5645 
5646 			from6 = (struct sockaddr_in6 *)from;
5647 			sctp_recover_scope_mac(from6, (&lsa6));
5648 		}
5649 #endif
5650 	}
5651 	/* now copy out what data we can */
5652 	if (mp == NULL) {
5653 		/* copy out each mbuf in the chain up to length */
5654 get_more_data:
5655 		m = control->data;
5656 		while (m) {
5657 			/* Move out all we can */
5658 			cp_len = (int)uio->uio_resid;
5659 			my_len = (int)SCTP_BUF_LEN(m);
5660 			if (cp_len > my_len) {
5661 				/* not enough in this buf */
5662 				cp_len = my_len;
5663 			}
5664 			if (hold_rlock) {
5665 				SCTP_INP_READ_UNLOCK(inp);
5666 				hold_rlock = 0;
5667 			}
5668 			if (cp_len > 0)
5669 				error = uiomove(mtod(m, char *), cp_len, uio);
5670 			/* re-read */
5671 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5672 				goto release;
5673 			}
5674 			if ((control->do_not_ref_stcb == 0) && stcb &&
5675 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5676 				no_rcv_needed = 1;
5677 			}
5678 			if (error) {
5679 				/* error we are out of here */
5680 				goto release;
5681 			}
5682 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5683 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5684 			    ((control->end_added == 0) ||
5685 			    (control->end_added &&
5686 			    (TAILQ_NEXT(control, next) == NULL)))
5687 			    ) {
5688 				SCTP_INP_READ_LOCK(inp);
5689 				hold_rlock = 1;
5690 			}
5691 			if (cp_len == SCTP_BUF_LEN(m)) {
5692 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5693 				    (control->end_added)) {
5694 					out_flags |= MSG_EOR;
5695 					if ((control->do_not_ref_stcb == 0) &&
5696 					    (control->stcb != NULL) &&
5697 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5698 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5699 				}
5700 				if (control->spec_flags & M_NOTIFICATION) {
5701 					out_flags |= MSG_NOTIFICATION;
5702 				}
5703 				/* we ate up the mbuf */
5704 				if (in_flags & MSG_PEEK) {
5705 					/* just looking */
5706 					m = SCTP_BUF_NEXT(m);
5707 					copied_so_far += cp_len;
5708 				} else {
5709 					/* dispose of the mbuf */
5710 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5711 						sctp_sblog(&so->so_rcv,
5712 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5713 					}
5714 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5715 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5716 						sctp_sblog(&so->so_rcv,
5717 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5718 					}
5719 					copied_so_far += cp_len;
5720 					freed_so_far += cp_len;
5721 					freed_so_far += MSIZE;
5722 					atomic_subtract_int(&control->length, cp_len);
5723 					control->data = sctp_m_free(m);
5724 					m = control->data;
5725 					/*
5726 					 * been through it all, must hold sb
5727 					 * lock ok to null tail
5728 					 */
5729 					if (control->data == NULL) {
5730 #ifdef INVARIANTS
5731 						if ((control->end_added == 0) ||
5732 						    (TAILQ_NEXT(control, next) == NULL)) {
5733 							/*
5734 							 * If the end is not
5735 							 * added, OR the
5736 							 * next is NOT null
5737 							 * we MUST have the
5738 							 * lock.
5739 							 */
5740 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5741 								panic("Hmm we don't own the lock?");
5742 							}
5743 						}
5744 #endif
5745 						control->tail_mbuf = NULL;
5746 #ifdef INVARIANTS
5747 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5748 							panic("end_added, nothing left and no MSG_EOR");
5749 						}
5750 #endif
5751 					}
5752 				}
5753 			} else {
5754 				/* Do we need to trim the mbuf? */
5755 				if (control->spec_flags & M_NOTIFICATION) {
5756 					out_flags |= MSG_NOTIFICATION;
5757 				}
5758 				if ((in_flags & MSG_PEEK) == 0) {
5759 					SCTP_BUF_RESV_UF(m, cp_len);
5760 					SCTP_BUF_LEN(m) -= cp_len;
5761 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5762 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5763 					}
5764 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5765 					if ((control->do_not_ref_stcb == 0) &&
5766 					    stcb) {
5767 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5768 					}
5769 					copied_so_far += cp_len;
5770 					freed_so_far += cp_len;
5771 					freed_so_far += MSIZE;
5772 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5773 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5774 						    SCTP_LOG_SBRESULT, 0);
5775 					}
5776 					atomic_subtract_int(&control->length, cp_len);
5777 				} else {
5778 					copied_so_far += cp_len;
5779 				}
5780 			}
5781 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5782 				break;
5783 			}
5784 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5785 			    (control->do_not_ref_stcb == 0) &&
5786 			    (freed_so_far >= rwnd_req)) {
5787 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5788 			}
5789 		}		/* end while(m) */
5790 		/*
5791 		 * At this point we have looked at it all and we either have
5792 		 * a MSG_EOR/or read all the user wants... <OR>
5793 		 * control->length == 0.
5794 		 */
5795 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5796 			/* we are done with this control */
5797 			if (control->length == 0) {
5798 				if (control->data) {
5799 #ifdef INVARIANTS
5800 					panic("control->data not null at read eor?");
5801 #else
5802 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5803 					sctp_m_freem(control->data);
5804 					control->data = NULL;
5805 #endif
5806 				}
5807 		done_with_control:
5808 				if (TAILQ_NEXT(control, next) == NULL) {
5809 					/*
5810 					 * If we don't have a next we need a
5811 					 * lock, if there is a next
5812 					 * interrupt is filling ahead of us
5813 					 * and we don't need a lock to
5814 					 * remove this guy (which is the
5815 					 * head of the queue).
5816 					 */
5817 					if (hold_rlock == 0) {
5818 						SCTP_INP_READ_LOCK(inp);
5819 						hold_rlock = 1;
5820 					}
5821 				}
5822 				TAILQ_REMOVE(&inp->read_queue, control, next);
5823 				/* Add back any hiddend data */
5824 				if (control->held_length) {
5825 					held_length = 0;
5826 					control->held_length = 0;
5827 					wakeup_read_socket = 1;
5828 				}
5829 				if (control->aux_data) {
5830 					sctp_m_free(control->aux_data);
5831 					control->aux_data = NULL;
5832 				}
5833 				no_rcv_needed = control->do_not_ref_stcb;
5834 				sctp_free_remote_addr(control->whoFrom);
5835 				control->data = NULL;
5836 				sctp_free_a_readq(stcb, control);
5837 				control = NULL;
5838 				if ((freed_so_far >= rwnd_req) &&
5839 				    (no_rcv_needed == 0))
5840 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5841 
5842 			} else {
5843 				/*
5844 				 * The user did not read all of this
5845 				 * message, turn off the returned MSG_EOR
5846 				 * since we are leaving more behind on the
5847 				 * control to read.
5848 				 */
5849 #ifdef INVARIANTS
5850 				if (control->end_added &&
5851 				    (control->data == NULL) &&
5852 				    (control->tail_mbuf == NULL)) {
5853 					panic("Gak, control->length is corrupt?");
5854 				}
5855 #endif
5856 				no_rcv_needed = control->do_not_ref_stcb;
5857 				out_flags &= ~MSG_EOR;
5858 			}
5859 		}
5860 		if (out_flags & MSG_EOR) {
5861 			goto release;
5862 		}
5863 		if ((uio->uio_resid == 0) ||
5864 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5865 		    ) {
5866 			goto release;
5867 		}
5868 		/*
5869 		 * If I hit here the receiver wants more and this message is
5870 		 * NOT done (pd-api). So two questions. Can we block? if not
5871 		 * we are done. Did the user NOT set MSG_WAITALL?
5872 		 */
5873 		if (block_allowed == 0) {
5874 			goto release;
5875 		}
5876 		/*
5877 		 * We need to wait for more data a few things: - We don't
5878 		 * sbunlock() so we don't get someone else reading. - We
5879 		 * must be sure to account for the case where what is added
5880 		 * is NOT to our control when we wakeup.
5881 		 */
5882 
5883 		/*
5884 		 * Do we need to tell the transport a rwnd update might be
5885 		 * needed before we go to sleep?
5886 		 */
5887 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5888 		    ((freed_so_far >= rwnd_req) &&
5889 		    (control->do_not_ref_stcb == 0) &&
5890 		    (no_rcv_needed == 0))) {
5891 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5892 		}
5893 wait_some_more:
5894 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5895 			goto release;
5896 		}
5897 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5898 			goto release;
5899 
5900 		if (hold_rlock == 1) {
5901 			SCTP_INP_READ_UNLOCK(inp);
5902 			hold_rlock = 0;
5903 		}
5904 		if (hold_sblock == 0) {
5905 			SOCKBUF_LOCK(&so->so_rcv);
5906 			hold_sblock = 1;
5907 		}
5908 		if ((copied_so_far) && (control->length == 0) &&
5909 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5910 			goto release;
5911 		}
5912 		if (so->so_rcv.sb_cc <= control->held_length) {
5913 			error = sbwait(&so->so_rcv);
5914 			if (error) {
5915 				goto release;
5916 			}
5917 			control->held_length = 0;
5918 		}
5919 		if (hold_sblock) {
5920 			SOCKBUF_UNLOCK(&so->so_rcv);
5921 			hold_sblock = 0;
5922 		}
5923 		if (control->length == 0) {
5924 			/* still nothing here */
5925 			if (control->end_added == 1) {
5926 				/* he aborted, or is done i.e.did a shutdown */
5927 				out_flags |= MSG_EOR;
5928 				if (control->pdapi_aborted) {
5929 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5930 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5931 
5932 					out_flags |= MSG_TRUNC;
5933 				} else {
5934 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5935 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5936 				}
5937 				goto done_with_control;
5938 			}
5939 			if (so->so_rcv.sb_cc > held_length) {
5940 				control->held_length = so->so_rcv.sb_cc;
5941 				held_length = 0;
5942 			}
5943 			goto wait_some_more;
5944 		} else if (control->data == NULL) {
5945 			/*
5946 			 * we must re-sync since data is probably being
5947 			 * added
5948 			 */
5949 			SCTP_INP_READ_LOCK(inp);
5950 			if ((control->length > 0) && (control->data == NULL)) {
5951 				/*
5952 				 * big trouble.. we have the lock and its
5953 				 * corrupt?
5954 				 */
5955 #ifdef INVARIANTS
5956 				panic("Impossible data==NULL length !=0");
5957 #endif
5958 				out_flags |= MSG_EOR;
5959 				out_flags |= MSG_TRUNC;
5960 				control->length = 0;
5961 				SCTP_INP_READ_UNLOCK(inp);
5962 				goto done_with_control;
5963 			}
5964 			SCTP_INP_READ_UNLOCK(inp);
5965 			/* We will fall around to get more data */
5966 		}
5967 		goto get_more_data;
5968 	} else {
5969 		/*-
5970 		 * Give caller back the mbuf chain,
5971 		 * store in uio_resid the length
5972 		 */
5973 		wakeup_read_socket = 0;
5974 		if ((control->end_added == 0) ||
5975 		    (TAILQ_NEXT(control, next) == NULL)) {
5976 			/* Need to get rlock */
5977 			if (hold_rlock == 0) {
5978 				SCTP_INP_READ_LOCK(inp);
5979 				hold_rlock = 1;
5980 			}
5981 		}
5982 		if (control->end_added) {
5983 			out_flags |= MSG_EOR;
5984 			if ((control->do_not_ref_stcb == 0) &&
5985 			    (control->stcb != NULL) &&
5986 			    ((control->spec_flags & M_NOTIFICATION) == 0))
5987 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5988 		}
5989 		if (control->spec_flags & M_NOTIFICATION) {
5990 			out_flags |= MSG_NOTIFICATION;
5991 		}
5992 		uio->uio_resid = control->length;
5993 		*mp = control->data;
5994 		m = control->data;
5995 		while (m) {
5996 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5997 				sctp_sblog(&so->so_rcv,
5998 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5999 			}
6000 			sctp_sbfree(control, stcb, &so->so_rcv, m);
6001 			freed_so_far += SCTP_BUF_LEN(m);
6002 			freed_so_far += MSIZE;
6003 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
6004 				sctp_sblog(&so->so_rcv,
6005 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
6006 			}
6007 			m = SCTP_BUF_NEXT(m);
6008 		}
6009 		control->data = control->tail_mbuf = NULL;
6010 		control->length = 0;
6011 		if (out_flags & MSG_EOR) {
6012 			/* Done with this control */
6013 			goto done_with_control;
6014 		}
6015 	}
6016 release:
6017 	if (hold_rlock == 1) {
6018 		SCTP_INP_READ_UNLOCK(inp);
6019 		hold_rlock = 0;
6020 	}
6021 	if (hold_sblock == 1) {
6022 		SOCKBUF_UNLOCK(&so->so_rcv);
6023 		hold_sblock = 0;
6024 	}
6025 	sbunlock(&so->so_rcv);
6026 	sockbuf_lock = 0;
6027 
6028 release_unlocked:
6029 	if (hold_sblock) {
6030 		SOCKBUF_UNLOCK(&so->so_rcv);
6031 		hold_sblock = 0;
6032 	}
6033 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6034 		if ((freed_so_far >= rwnd_req) &&
6035 		    (control && (control->do_not_ref_stcb == 0)) &&
6036 		    (no_rcv_needed == 0))
6037 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6038 	}
6039 out:
6040 	if (msg_flags) {
6041 		*msg_flags = out_flags;
6042 	}
6043 	if (((out_flags & MSG_EOR) == 0) &&
6044 	    ((in_flags & MSG_PEEK) == 0) &&
6045 	    (sinfo) &&
6046 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
6047 	    sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) {
6048 		struct sctp_extrcvinfo *s_extra;
6049 
6050 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6051 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6052 	}
6053 	if (hold_rlock == 1) {
6054 		SCTP_INP_READ_UNLOCK(inp);
6055 	}
6056 	if (hold_sblock) {
6057 		SOCKBUF_UNLOCK(&so->so_rcv);
6058 	}
6059 	if (sockbuf_lock) {
6060 		sbunlock(&so->so_rcv);
6061 	}
6062 	if (freecnt_applied) {
6063 		/*
6064 		 * The lock on the socket buffer protects us so the free
6065 		 * code will stop. But since we used the socketbuf lock and
6066 		 * the sender uses the tcb_lock to increment, we need to use
6067 		 * the atomic add to the refcnt.
6068 		 */
6069 		if (stcb == NULL) {
6070 #ifdef INVARIANTS
6071 			panic("stcb for refcnt has gone NULL?");
6072 			goto stage_left;
6073 #else
6074 			goto stage_left;
6075 #endif
6076 		}
6077 		atomic_add_int(&stcb->asoc.refcnt, -1);
6078 		/* Save the value back for next time */
6079 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6080 	}
6081 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6082 		if (stcb) {
6083 			sctp_misc_ints(SCTP_SORECV_DONE,
6084 			    freed_so_far,
6085 			    ((uio) ? (slen - uio->uio_resid) : slen),
6086 			    stcb->asoc.my_rwnd,
6087 			    so->so_rcv.sb_cc);
6088 		} else {
6089 			sctp_misc_ints(SCTP_SORECV_DONE,
6090 			    freed_so_far,
6091 			    ((uio) ? (slen - uio->uio_resid) : slen),
6092 			    0,
6093 			    so->so_rcv.sb_cc);
6094 		}
6095 	}
6096 stage_left:
6097 	if (wakeup_read_socket) {
6098 		sctp_sorwakeup(inp, so);
6099 	}
6100 	return (error);
6101 }
6102 
6103 
6104 #ifdef SCTP_MBUF_LOGGING
6105 struct mbuf *
6106 sctp_m_free(struct mbuf *m)
6107 {
6108 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6109 		if (SCTP_BUF_IS_EXTENDED(m)) {
6110 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6111 		}
6112 	}
6113 	return (m_free(m));
6114 }
6115 
6116 void
6117 sctp_m_freem(struct mbuf *mb)
6118 {
6119 	while (mb != NULL)
6120 		mb = sctp_m_free(mb);
6121 }
6122 
6123 #endif
6124 
6125 int
6126 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6127 {
6128 	/*
6129 	 * Given a local address. For all associations that holds the
6130 	 * address, request a peer-set-primary.
6131 	 */
6132 	struct sctp_ifa *ifa;
6133 	struct sctp_laddr *wi;
6134 
6135 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6136 	if (ifa == NULL) {
6137 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6138 		return (EADDRNOTAVAIL);
6139 	}
6140 	/*
6141 	 * Now that we have the ifa we must awaken the iterator with this
6142 	 * message.
6143 	 */
6144 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6145 	if (wi == NULL) {
6146 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6147 		return (ENOMEM);
6148 	}
6149 	/* Now incr the count and int wi structure */
6150 	SCTP_INCR_LADDR_COUNT();
6151 	bzero(wi, sizeof(*wi));
6152 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6153 	wi->ifa = ifa;
6154 	wi->action = SCTP_SET_PRIM_ADDR;
6155 	atomic_add_int(&ifa->refcount, 1);
6156 
6157 	/* Now add it to the work queue */
6158 	SCTP_WQ_ADDR_LOCK();
6159 	/*
6160 	 * Should this really be a tailq? As it is we will process the
6161 	 * newest first :-0
6162 	 */
6163 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6164 	SCTP_WQ_ADDR_UNLOCK();
6165 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6166 	    (struct sctp_inpcb *)NULL,
6167 	    (struct sctp_tcb *)NULL,
6168 	    (struct sctp_nets *)NULL);
6169 	return (0);
6170 }
6171 
6172 
6173 int
6174 sctp_soreceive(struct socket *so,
6175     struct sockaddr **psa,
6176     struct uio *uio,
6177     struct mbuf **mp0,
6178     struct mbuf **controlp,
6179     int *flagsp)
6180 {
6181 	int error, fromlen;
6182 	uint8_t sockbuf[256];
6183 	struct sockaddr *from;
6184 	struct sctp_extrcvinfo sinfo;
6185 	int filling_sinfo = 1;
6186 	struct sctp_inpcb *inp;
6187 
6188 	inp = (struct sctp_inpcb *)so->so_pcb;
6189 	/* pickup the assoc we are reading from */
6190 	if (inp == NULL) {
6191 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6192 		return (EINVAL);
6193 	}
6194 	if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
6195 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
6196 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) ||
6197 	    (controlp == NULL)) {
6198 		/* user does not want the sndrcv ctl */
6199 		filling_sinfo = 0;
6200 	}
6201 	if (psa) {
6202 		from = (struct sockaddr *)sockbuf;
6203 		fromlen = sizeof(sockbuf);
6204 		from->sa_len = 0;
6205 	} else {
6206 		from = NULL;
6207 		fromlen = 0;
6208 	}
6209 
6210 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6211 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6212 	if ((controlp) && (filling_sinfo)) {
6213 		/* copy back the sinfo in a CMSG format */
6214 		if (filling_sinfo)
6215 			*controlp = sctp_build_ctl_nchunk(inp,
6216 			    (struct sctp_sndrcvinfo *)&sinfo);
6217 		else
6218 			*controlp = NULL;
6219 	}
6220 	if (psa) {
6221 		/* copy back the address info */
6222 		if (from && from->sa_len) {
6223 			*psa = sodupsockaddr(from, M_NOWAIT);
6224 		} else {
6225 			*psa = NULL;
6226 		}
6227 	}
6228 	return (error);
6229 }
6230 
6231 
6232 
6233 
6234 
6235 int
6236 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6237     int totaddr, int *error)
6238 {
6239 	int added = 0;
6240 	int i;
6241 	struct sctp_inpcb *inp;
6242 	struct sockaddr *sa;
6243 	size_t incr = 0;
6244 
6245 #ifdef INET
6246 	struct sockaddr_in *sin;
6247 
6248 #endif
6249 #ifdef INET6
6250 	struct sockaddr_in6 *sin6;
6251 
6252 #endif
6253 
6254 	sa = addr;
6255 	inp = stcb->sctp_ep;
6256 	*error = 0;
6257 	for (i = 0; i < totaddr; i++) {
6258 		switch (sa->sa_family) {
6259 #ifdef INET
6260 		case AF_INET:
6261 			incr = sizeof(struct sockaddr_in);
6262 			sin = (struct sockaddr_in *)sa;
6263 			if ((sin->sin_addr.s_addr == INADDR_ANY) ||
6264 			    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
6265 			    IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) {
6266 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6267 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6268 				*error = EINVAL;
6269 				goto out_now;
6270 			}
6271 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6272 				/* assoc gone no un-lock */
6273 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6274 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6275 				*error = ENOBUFS;
6276 				goto out_now;
6277 			}
6278 			added++;
6279 			break;
6280 #endif
6281 #ifdef INET6
6282 		case AF_INET6:
6283 			incr = sizeof(struct sockaddr_in6);
6284 			sin6 = (struct sockaddr_in6 *)sa;
6285 			if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) ||
6286 			    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
6287 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6288 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6289 				*error = EINVAL;
6290 				goto out_now;
6291 			}
6292 			if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6293 				/* assoc gone no un-lock */
6294 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6295 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6296 				*error = ENOBUFS;
6297 				goto out_now;
6298 			}
6299 			added++;
6300 			break;
6301 #endif
6302 		default:
6303 			break;
6304 		}
6305 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6306 	}
6307 out_now:
6308 	return (added);
6309 }
6310 
6311 struct sctp_tcb *
6312 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6313     int *totaddr, int *num_v4, int *num_v6, int *error,
6314     int limit, int *bad_addr)
6315 {
6316 	struct sockaddr *sa;
6317 	struct sctp_tcb *stcb = NULL;
6318 	size_t incr, at, i;
6319 
6320 	at = incr = 0;
6321 	sa = addr;
6322 
6323 	*error = *num_v6 = *num_v4 = 0;
6324 	/* account and validate addresses */
6325 	for (i = 0; i < (size_t)*totaddr; i++) {
6326 		switch (sa->sa_family) {
6327 #ifdef INET
6328 		case AF_INET:
6329 			(*num_v4) += 1;
6330 			incr = sizeof(struct sockaddr_in);
6331 			if (sa->sa_len != incr) {
6332 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6333 				*error = EINVAL;
6334 				*bad_addr = 1;
6335 				return (NULL);
6336 			}
6337 			break;
6338 #endif
6339 #ifdef INET6
6340 		case AF_INET6:
6341 			{
6342 				struct sockaddr_in6 *sin6;
6343 
6344 				sin6 = (struct sockaddr_in6 *)sa;
6345 				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6346 					/* Must be non-mapped for connectx */
6347 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6348 					*error = EINVAL;
6349 					*bad_addr = 1;
6350 					return (NULL);
6351 				}
6352 				(*num_v6) += 1;
6353 				incr = sizeof(struct sockaddr_in6);
6354 				if (sa->sa_len != incr) {
6355 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6356 					*error = EINVAL;
6357 					*bad_addr = 1;
6358 					return (NULL);
6359 				}
6360 				break;
6361 			}
6362 #endif
6363 		default:
6364 			*totaddr = i;
6365 			/* we are done */
6366 			break;
6367 		}
6368 		if (i == (size_t)*totaddr) {
6369 			break;
6370 		}
6371 		SCTP_INP_INCR_REF(inp);
6372 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6373 		if (stcb != NULL) {
6374 			/* Already have or am bring up an association */
6375 			return (stcb);
6376 		} else {
6377 			SCTP_INP_DECR_REF(inp);
6378 		}
6379 		if ((at + incr) > (size_t)limit) {
6380 			*totaddr = i;
6381 			break;
6382 		}
6383 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6384 	}
6385 	return ((struct sctp_tcb *)NULL);
6386 }
6387 
6388 /*
6389  * sctp_bindx(ADD) for one address.
6390  * assumes all arguments are valid/checked by caller.
6391  */
6392 void
6393 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6394     struct sockaddr *sa, sctp_assoc_t assoc_id,
6395     uint32_t vrf_id, int *error, void *p)
6396 {
6397 	struct sockaddr *addr_touse;
6398 
6399 #ifdef INET6
6400 	struct sockaddr_in sin;
6401 
6402 #endif
6403 
6404 	/* see if we're bound all already! */
6405 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6406 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6407 		*error = EINVAL;
6408 		return;
6409 	}
6410 	addr_touse = sa;
6411 #ifdef INET6
6412 	if (sa->sa_family == AF_INET6) {
6413 		struct sockaddr_in6 *sin6;
6414 
6415 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6416 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6417 			*error = EINVAL;
6418 			return;
6419 		}
6420 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6421 			/* can only bind v6 on PF_INET6 sockets */
6422 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6423 			*error = EINVAL;
6424 			return;
6425 		}
6426 		sin6 = (struct sockaddr_in6 *)addr_touse;
6427 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6428 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6429 			    SCTP_IPV6_V6ONLY(inp)) {
6430 				/* can't bind v4-mapped on PF_INET sockets */
6431 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6432 				*error = EINVAL;
6433 				return;
6434 			}
6435 			in6_sin6_2_sin(&sin, sin6);
6436 			addr_touse = (struct sockaddr *)&sin;
6437 		}
6438 	}
6439 #endif
6440 #ifdef INET
6441 	if (sa->sa_family == AF_INET) {
6442 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6443 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6444 			*error = EINVAL;
6445 			return;
6446 		}
6447 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6448 		    SCTP_IPV6_V6ONLY(inp)) {
6449 			/* can't bind v4 on PF_INET sockets */
6450 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6451 			*error = EINVAL;
6452 			return;
6453 		}
6454 	}
6455 #endif
6456 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6457 		if (p == NULL) {
6458 			/* Can't get proc for Net/Open BSD */
6459 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6460 			*error = EINVAL;
6461 			return;
6462 		}
6463 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6464 		return;
6465 	}
6466 	/*
6467 	 * No locks required here since bind and mgmt_ep_sa all do their own
6468 	 * locking. If we do something for the FIX: below we may need to
6469 	 * lock in that case.
6470 	 */
6471 	if (assoc_id == 0) {
6472 		/* add the address */
6473 		struct sctp_inpcb *lep;
6474 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6475 
6476 		/* validate the incoming port */
6477 		if ((lsin->sin_port != 0) &&
6478 		    (lsin->sin_port != inp->sctp_lport)) {
6479 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6480 			*error = EINVAL;
6481 			return;
6482 		} else {
6483 			/* user specified 0 port, set it to existing port */
6484 			lsin->sin_port = inp->sctp_lport;
6485 		}
6486 
6487 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6488 		if (lep != NULL) {
6489 			/*
6490 			 * We must decrement the refcount since we have the
6491 			 * ep already and are binding. No remove going on
6492 			 * here.
6493 			 */
6494 			SCTP_INP_DECR_REF(lep);
6495 		}
6496 		if (lep == inp) {
6497 			/* already bound to it.. ok */
6498 			return;
6499 		} else if (lep == NULL) {
6500 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6501 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6502 			    SCTP_ADD_IP_ADDRESS,
6503 			    vrf_id, NULL);
6504 		} else {
6505 			*error = EADDRINUSE;
6506 		}
6507 		if (*error)
6508 			return;
6509 	} else {
6510 		/*
6511 		 * FIX: decide whether we allow assoc based bindx
6512 		 */
6513 	}
6514 }
6515 
6516 /*
6517  * sctp_bindx(DELETE) for one address.
6518  * assumes all arguments are valid/checked by caller.
6519  */
6520 void
6521 sctp_bindx_delete_address(struct sctp_inpcb *inp,
6522     struct sockaddr *sa, sctp_assoc_t assoc_id,
6523     uint32_t vrf_id, int *error)
6524 {
6525 	struct sockaddr *addr_touse;
6526 
6527 #ifdef INET6
6528 	struct sockaddr_in sin;
6529 
6530 #endif
6531 
6532 	/* see if we're bound all already! */
6533 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6534 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6535 		*error = EINVAL;
6536 		return;
6537 	}
6538 	addr_touse = sa;
6539 #ifdef INET6
6540 	if (sa->sa_family == AF_INET6) {
6541 		struct sockaddr_in6 *sin6;
6542 
6543 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6544 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6545 			*error = EINVAL;
6546 			return;
6547 		}
6548 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6549 			/* can only bind v6 on PF_INET6 sockets */
6550 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6551 			*error = EINVAL;
6552 			return;
6553 		}
6554 		sin6 = (struct sockaddr_in6 *)addr_touse;
6555 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6556 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6557 			    SCTP_IPV6_V6ONLY(inp)) {
6558 				/* can't bind mapped-v4 on PF_INET sockets */
6559 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6560 				*error = EINVAL;
6561 				return;
6562 			}
6563 			in6_sin6_2_sin(&sin, sin6);
6564 			addr_touse = (struct sockaddr *)&sin;
6565 		}
6566 	}
6567 #endif
6568 #ifdef INET
6569 	if (sa->sa_family == AF_INET) {
6570 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6571 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6572 			*error = EINVAL;
6573 			return;
6574 		}
6575 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6576 		    SCTP_IPV6_V6ONLY(inp)) {
6577 			/* can't bind v4 on PF_INET sockets */
6578 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6579 			*error = EINVAL;
6580 			return;
6581 		}
6582 	}
6583 #endif
6584 	/*
6585 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6586 	 * below is ever changed we may need to lock before calling
6587 	 * association level binding.
6588 	 */
6589 	if (assoc_id == 0) {
6590 		/* delete the address */
6591 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6592 		    SCTP_DEL_IP_ADDRESS,
6593 		    vrf_id, NULL);
6594 	} else {
6595 		/*
6596 		 * FIX: decide whether we allow assoc based bindx
6597 		 */
6598 	}
6599 }
6600 
6601 /*
6602  * returns the valid local address count for an assoc, taking into account
6603  * all scoping rules
6604  */
6605 int
6606 sctp_local_addr_count(struct sctp_tcb *stcb)
6607 {
6608 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6609 	int ipv4_addr_legal, ipv6_addr_legal;
6610 	struct sctp_vrf *vrf;
6611 	struct sctp_ifn *sctp_ifn;
6612 	struct sctp_ifa *sctp_ifa;
6613 	int count = 0;
6614 
6615 	/* Turn on all the appropriate scopes */
6616 	loopback_scope = stcb->asoc.loopback_scope;
6617 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6618 	local_scope = stcb->asoc.local_scope;
6619 	site_scope = stcb->asoc.site_scope;
6620 	ipv4_addr_legal = ipv6_addr_legal = 0;
6621 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6622 		ipv6_addr_legal = 1;
6623 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6624 			ipv4_addr_legal = 1;
6625 		}
6626 	} else {
6627 		ipv4_addr_legal = 1;
6628 	}
6629 
6630 	SCTP_IPI_ADDR_RLOCK();
6631 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6632 	if (vrf == NULL) {
6633 		/* no vrf, no addresses */
6634 		SCTP_IPI_ADDR_RUNLOCK();
6635 		return (0);
6636 	}
6637 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6638 		/*
6639 		 * bound all case: go through all ifns on the vrf
6640 		 */
6641 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6642 			if ((loopback_scope == 0) &&
6643 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6644 				continue;
6645 			}
6646 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6647 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6648 					continue;
6649 				switch (sctp_ifa->address.sa.sa_family) {
6650 #ifdef INET
6651 				case AF_INET:
6652 					if (ipv4_addr_legal) {
6653 						struct sockaddr_in *sin;
6654 
6655 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6656 						if (sin->sin_addr.s_addr == 0) {
6657 							/*
6658 							 * skip unspecified
6659 							 * addrs
6660 							 */
6661 							continue;
6662 						}
6663 						if ((ipv4_local_scope == 0) &&
6664 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6665 							continue;
6666 						}
6667 						/* count this one */
6668 						count++;
6669 					} else {
6670 						continue;
6671 					}
6672 					break;
6673 #endif
6674 #ifdef INET6
6675 				case AF_INET6:
6676 					if (ipv6_addr_legal) {
6677 						struct sockaddr_in6 *sin6;
6678 
6679 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6680 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6681 							continue;
6682 						}
6683 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6684 							if (local_scope == 0)
6685 								continue;
6686 							if (sin6->sin6_scope_id == 0) {
6687 								if (sa6_recoverscope(sin6) != 0)
6688 									/*
6689 									 *
6690 									 * bad
6691 									 *
6692 									 * li
6693 									 * nk
6694 									 *
6695 									 * loc
6696 									 * al
6697 									 *
6698 									 * add
6699 									 * re
6700 									 * ss
6701 									 * */
6702 									continue;
6703 							}
6704 						}
6705 						if ((site_scope == 0) &&
6706 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6707 							continue;
6708 						}
6709 						/* count this one */
6710 						count++;
6711 					}
6712 					break;
6713 #endif
6714 				default:
6715 					/* TSNH */
6716 					break;
6717 				}
6718 			}
6719 		}
6720 	} else {
6721 		/*
6722 		 * subset bound case
6723 		 */
6724 		struct sctp_laddr *laddr;
6725 
6726 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6727 		    sctp_nxt_addr) {
6728 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6729 				continue;
6730 			}
6731 			/* count this one */
6732 			count++;
6733 		}
6734 	}
6735 	SCTP_IPI_ADDR_RUNLOCK();
6736 	return (count);
6737 }
6738 
6739 #if defined(SCTP_LOCAL_TRACE_BUF)
6740 
6741 void
6742 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6743 {
6744 	uint32_t saveindex, newindex;
6745 
6746 	do {
6747 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6748 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6749 			newindex = 1;
6750 		} else {
6751 			newindex = saveindex + 1;
6752 		}
6753 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6754 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6755 		saveindex = 0;
6756 	}
6757 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6758 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6759 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6760 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6761 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6762 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6763 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6764 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6765 }
6766 
6767 #endif
6768 /* XXX: Remove the #ifdef after tunneling over IPv6 works also on FreeBSD. */
6769 #ifdef INET
6770 /* We will need to add support
6771  * to bind the ports and such here
6772  * so we can do UDP tunneling. In
6773  * the mean-time, we return error
6774  */
6775 #include <netinet/udp.h>
6776 #include <netinet/udp_var.h>
6777 #include <sys/proc.h>
6778 #ifdef INET6
6779 #include <netinet6/sctp6_var.h>
6780 #endif
6781 
6782 static void
6783 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6784 {
6785 	struct ip *iph;
6786 	struct mbuf *sp, *last;
6787 	struct udphdr *uhdr;
6788 	uint16_t port;
6789 
6790 	if ((m->m_flags & M_PKTHDR) == 0) {
6791 		/* Can't handle one that is not a pkt hdr */
6792 		goto out;
6793 	}
6794 	/* Pull the src port */
6795 	iph = mtod(m, struct ip *);
6796 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6797 	port = uhdr->uh_sport;
6798 	/*
6799 	 * Split out the mbuf chain. Leave the IP header in m, place the
6800 	 * rest in the sp.
6801 	 */
6802 	sp = m_split(m, off, M_DONTWAIT);
6803 	if (sp == NULL) {
6804 		/* Gak, drop packet, we can't do a split */
6805 		goto out;
6806 	}
6807 	if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) {
6808 		/* Gak, packet can't have an SCTP header in it - too small */
6809 		m_freem(sp);
6810 		goto out;
6811 	}
6812 	/* Now pull up the UDP header and SCTP header together */
6813 	sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr));
6814 	if (sp == NULL) {
6815 		/* Gak pullup failed */
6816 		goto out;
6817 	}
6818 	/* Trim out the UDP header */
6819 	m_adj(sp, sizeof(struct udphdr));
6820 
6821 	/* Now reconstruct the mbuf chain */
6822 	for (last = m; last->m_next; last = last->m_next);
6823 	last->m_next = sp;
6824 	m->m_pkthdr.len += sp->m_pkthdr.len;
6825 	iph = mtod(m, struct ip *);
6826 	switch (iph->ip_v) {
6827 #ifdef INET
6828 	case IPVERSION:
6829 		iph->ip_len -= sizeof(struct udphdr);
6830 		sctp_input_with_port(m, off, port);
6831 		break;
6832 #endif
6833 #ifdef INET6
6834 	case IPV6_VERSION >> 4:
6835 		/* Not yet supported. */
6836 		goto out;
6837 		break;
6838 
6839 #endif
6840 	default:
6841 		goto out;
6842 		break;
6843 	}
6844 	return;
6845 out:
6846 	m_freem(m);
6847 }
6848 
6849 void
6850 sctp_over_udp_stop(void)
6851 {
6852 	struct socket *sop;
6853 
6854 	/*
6855 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6856 	 * for writting!
6857 	 */
6858 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6859 		/* Nothing to do */
6860 		return;
6861 	}
6862 	sop = SCTP_BASE_INFO(udp_tun_socket);
6863 	soclose(sop);
6864 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6865 }
6866 
6867 int
6868 sctp_over_udp_start(void)
6869 {
6870 	uint16_t port;
6871 	int ret;
6872 	struct sockaddr_in sin;
6873 	struct socket *sop = NULL;
6874 	struct thread *th;
6875 	struct ucred *cred;
6876 
6877 	/*
6878 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6879 	 * for writting!
6880 	 */
6881 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6882 	if (port == 0) {
6883 		/* Must have a port set */
6884 		return (EINVAL);
6885 	}
6886 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6887 		/* Already running -- must stop first */
6888 		return (EALREADY);
6889 	}
6890 	th = curthread;
6891 	cred = th->td_ucred;
6892 	if ((ret = socreate(PF_INET, &sop,
6893 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6894 		return (ret);
6895 	}
6896 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6897 	/* call the special UDP hook */
6898 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6899 	if (ret) {
6900 		goto exit_stage_left;
6901 	}
6902 	/* Ok we have a socket, bind it to the port */
6903 	memset(&sin, 0, sizeof(sin));
6904 	sin.sin_len = sizeof(sin);
6905 	sin.sin_family = AF_INET;
6906 	sin.sin_port = htons(port);
6907 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6908 	if (ret) {
6909 		/* Close up we cant get the port */
6910 exit_stage_left:
6911 		sctp_over_udp_stop();
6912 		return (ret);
6913 	}
6914 	/*
6915 	 * Ok we should now get UDP packets directly to our input routine
6916 	 * sctp_recv_upd_tunneled_packet().
6917 	 */
6918 	return (0);
6919 }
6920 
6921 #endif
6922