xref: /freebsd/sys/netinet/sctputil.c (revision 0f27aaf940f2fa5a6540285537b33115a96161a4)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_bsd_addr.h>
51 
52 
53 #ifndef KTR_SCTP
54 #define KTR_SCTP KTR_SUBSYS
55 #endif
56 
57 extern struct sctp_cc_functions sctp_cc_functions[];
58 extern struct sctp_ss_functions sctp_ss_functions[];
59 
60 void
61 sctp_sblog(struct sockbuf *sb,
62     struct sctp_tcb *stcb, int from, int incr)
63 {
64 	struct sctp_cwnd_log sctp_clog;
65 
66 	sctp_clog.x.sb.stcb = stcb;
67 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
68 	if (stcb)
69 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
70 	else
71 		sctp_clog.x.sb.stcb_sbcc = 0;
72 	sctp_clog.x.sb.incr = incr;
73 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
74 	    SCTP_LOG_EVENT_SB,
75 	    from,
76 	    sctp_clog.x.misc.log1,
77 	    sctp_clog.x.misc.log2,
78 	    sctp_clog.x.misc.log3,
79 	    sctp_clog.x.misc.log4);
80 }
81 
82 void
83 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
84 {
85 	struct sctp_cwnd_log sctp_clog;
86 
87 	sctp_clog.x.close.inp = (void *)inp;
88 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
89 	if (stcb) {
90 		sctp_clog.x.close.stcb = (void *)stcb;
91 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
92 	} else {
93 		sctp_clog.x.close.stcb = 0;
94 		sctp_clog.x.close.state = 0;
95 	}
96 	sctp_clog.x.close.loc = loc;
97 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
98 	    SCTP_LOG_EVENT_CLOSE,
99 	    0,
100 	    sctp_clog.x.misc.log1,
101 	    sctp_clog.x.misc.log2,
102 	    sctp_clog.x.misc.log3,
103 	    sctp_clog.x.misc.log4);
104 }
105 
106 
107 void
108 rto_logging(struct sctp_nets *net, int from)
109 {
110 	struct sctp_cwnd_log sctp_clog;
111 
112 	memset(&sctp_clog, 0, sizeof(sctp_clog));
113 	sctp_clog.x.rto.net = (void *)net;
114 	sctp_clog.x.rto.rtt = net->prev_rtt;
115 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 	    SCTP_LOG_EVENT_RTT,
117 	    from,
118 	    sctp_clog.x.misc.log1,
119 	    sctp_clog.x.misc.log2,
120 	    sctp_clog.x.misc.log3,
121 	    sctp_clog.x.misc.log4);
122 
123 }
124 
125 void
126 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
127 {
128 	struct sctp_cwnd_log sctp_clog;
129 
130 	sctp_clog.x.strlog.stcb = stcb;
131 	sctp_clog.x.strlog.n_tsn = tsn;
132 	sctp_clog.x.strlog.n_sseq = sseq;
133 	sctp_clog.x.strlog.e_tsn = 0;
134 	sctp_clog.x.strlog.e_sseq = 0;
135 	sctp_clog.x.strlog.strm = stream;
136 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
137 	    SCTP_LOG_EVENT_STRM,
138 	    from,
139 	    sctp_clog.x.misc.log1,
140 	    sctp_clog.x.misc.log2,
141 	    sctp_clog.x.misc.log3,
142 	    sctp_clog.x.misc.log4);
143 
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 	struct sctp_cwnd_log sctp_clog;
170 
171 	sctp_clog.x.sack.cumack = cumack;
172 	sctp_clog.x.sack.oldcumack = old_cumack;
173 	sctp_clog.x.sack.tsn = tsn;
174 	sctp_clog.x.sack.numGaps = gaps;
175 	sctp_clog.x.sack.numDups = dups;
176 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 	    SCTP_LOG_EVENT_SACK,
178 	    from,
179 	    sctp_clog.x.misc.log1,
180 	    sctp_clog.x.misc.log2,
181 	    sctp_clog.x.misc.log3,
182 	    sctp_clog.x.misc.log4);
183 }
184 
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	memset(&sctp_clog, 0, sizeof(sctp_clog));
191 	sctp_clog.x.map.base = map;
192 	sctp_clog.x.map.cum = cum;
193 	sctp_clog.x.map.high = high;
194 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
195 	    SCTP_LOG_EVENT_MAP,
196 	    from,
197 	    sctp_clog.x.misc.log1,
198 	    sctp_clog.x.misc.log2,
199 	    sctp_clog.x.misc.log3,
200 	    sctp_clog.x.misc.log4);
201 }
202 
203 void
204 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
205     int from)
206 {
207 	struct sctp_cwnd_log sctp_clog;
208 
209 	memset(&sctp_clog, 0, sizeof(sctp_clog));
210 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
211 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
212 	sctp_clog.x.fr.tsn = tsn;
213 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
214 	    SCTP_LOG_EVENT_FR,
215 	    from,
216 	    sctp_clog.x.misc.log1,
217 	    sctp_clog.x.misc.log2,
218 	    sctp_clog.x.misc.log3,
219 	    sctp_clog.x.misc.log4);
220 
221 }
222 
223 
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 	struct sctp_cwnd_log sctp_clog;
228 
229 	sctp_clog.x.mb.mp = m;
230 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 	if (SCTP_BUF_IS_EXTENDED(m)) {
234 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 	} else {
237 		sctp_clog.x.mb.ext = 0;
238 		sctp_clog.x.mb.refcnt = 0;
239 	}
240 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 	    SCTP_LOG_EVENT_MBUF,
242 	    from,
243 	    sctp_clog.x.misc.log1,
244 	    sctp_clog.x.misc.log2,
245 	    sctp_clog.x.misc.log3,
246 	    sctp_clog.x.misc.log4);
247 }
248 
249 
250 void
251 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
252     int from)
253 {
254 	struct sctp_cwnd_log sctp_clog;
255 
256 	if (control == NULL) {
257 		SCTP_PRINTF("Gak log of NULL?\n");
258 		return;
259 	}
260 	sctp_clog.x.strlog.stcb = control->stcb;
261 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
262 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
263 	sctp_clog.x.strlog.strm = control->sinfo_stream;
264 	if (poschk != NULL) {
265 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
266 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
267 	} else {
268 		sctp_clog.x.strlog.e_tsn = 0;
269 		sctp_clog.x.strlog.e_sseq = 0;
270 	}
271 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
272 	    SCTP_LOG_EVENT_STRM,
273 	    from,
274 	    sctp_clog.x.misc.log1,
275 	    sctp_clog.x.misc.log2,
276 	    sctp_clog.x.misc.log3,
277 	    sctp_clog.x.misc.log4);
278 
279 }
280 
281 void
282 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
283 {
284 	struct sctp_cwnd_log sctp_clog;
285 
286 	sctp_clog.x.cwnd.net = net;
287 	if (stcb->asoc.send_queue_cnt > 255)
288 		sctp_clog.x.cwnd.cnt_in_send = 255;
289 	else
290 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
291 	if (stcb->asoc.stream_queue_cnt > 255)
292 		sctp_clog.x.cwnd.cnt_in_str = 255;
293 	else
294 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
295 
296 	if (net) {
297 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
298 		sctp_clog.x.cwnd.inflight = net->flight_size;
299 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
300 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
301 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
302 	}
303 	if (SCTP_CWNDLOG_PRESEND == from) {
304 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
305 	}
306 	sctp_clog.x.cwnd.cwnd_augment = augment;
307 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
308 	    SCTP_LOG_EVENT_CWND,
309 	    from,
310 	    sctp_clog.x.misc.log1,
311 	    sctp_clog.x.misc.log2,
312 	    sctp_clog.x.misc.log3,
313 	    sctp_clog.x.misc.log4);
314 
315 }
316 
317 void
318 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
319 {
320 	struct sctp_cwnd_log sctp_clog;
321 
322 	memset(&sctp_clog, 0, sizeof(sctp_clog));
323 	if (inp) {
324 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
325 
326 	} else {
327 		sctp_clog.x.lock.sock = (void *)NULL;
328 	}
329 	sctp_clog.x.lock.inp = (void *)inp;
330 	if (stcb) {
331 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
332 	} else {
333 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
334 	}
335 	if (inp) {
336 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
337 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
338 	} else {
339 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
340 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
341 	}
342 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
343 	if (inp && (inp->sctp_socket)) {
344 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
345 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
346 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
347 	} else {
348 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
349 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
351 	}
352 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
353 	    SCTP_LOG_LOCK_EVENT,
354 	    from,
355 	    sctp_clog.x.misc.log1,
356 	    sctp_clog.x.misc.log2,
357 	    sctp_clog.x.misc.log3,
358 	    sctp_clog.x.misc.log4);
359 
360 }
361 
362 void
363 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
364 {
365 	struct sctp_cwnd_log sctp_clog;
366 
367 	memset(&sctp_clog, 0, sizeof(sctp_clog));
368 	sctp_clog.x.cwnd.net = net;
369 	sctp_clog.x.cwnd.cwnd_new_value = error;
370 	sctp_clog.x.cwnd.inflight = net->flight_size;
371 	sctp_clog.x.cwnd.cwnd_augment = burst;
372 	if (stcb->asoc.send_queue_cnt > 255)
373 		sctp_clog.x.cwnd.cnt_in_send = 255;
374 	else
375 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
376 	if (stcb->asoc.stream_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_str = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
380 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
381 	    SCTP_LOG_EVENT_MAXBURST,
382 	    from,
383 	    sctp_clog.x.misc.log1,
384 	    sctp_clog.x.misc.log2,
385 	    sctp_clog.x.misc.log3,
386 	    sctp_clog.x.misc.log4);
387 
388 }
389 
390 void
391 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
392 {
393 	struct sctp_cwnd_log sctp_clog;
394 
395 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
396 	sctp_clog.x.rwnd.send_size = snd_size;
397 	sctp_clog.x.rwnd.overhead = overhead;
398 	sctp_clog.x.rwnd.new_rwnd = 0;
399 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
400 	    SCTP_LOG_EVENT_RWND,
401 	    from,
402 	    sctp_clog.x.misc.log1,
403 	    sctp_clog.x.misc.log2,
404 	    sctp_clog.x.misc.log3,
405 	    sctp_clog.x.misc.log4);
406 }
407 
408 void
409 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
410 {
411 	struct sctp_cwnd_log sctp_clog;
412 
413 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
414 	sctp_clog.x.rwnd.send_size = flight_size;
415 	sctp_clog.x.rwnd.overhead = overhead;
416 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_RWND,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 }
425 
426 void
427 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
428 {
429 	struct sctp_cwnd_log sctp_clog;
430 
431 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
432 	sctp_clog.x.mbcnt.size_change = book;
433 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
434 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
435 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
436 	    SCTP_LOG_EVENT_MBCNT,
437 	    from,
438 	    sctp_clog.x.misc.log1,
439 	    sctp_clog.x.misc.log2,
440 	    sctp_clog.x.misc.log3,
441 	    sctp_clog.x.misc.log4);
442 
443 }
444 
445 void
446 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
447 {
448 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
449 	    SCTP_LOG_MISC_EVENT,
450 	    from,
451 	    a, b, c, d);
452 }
453 
454 void
455 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
456 {
457 	struct sctp_cwnd_log sctp_clog;
458 
459 	sctp_clog.x.wake.stcb = (void *)stcb;
460 	sctp_clog.x.wake.wake_cnt = wake_cnt;
461 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
462 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
463 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
464 
465 	if (stcb->asoc.stream_queue_cnt < 0xff)
466 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
467 	else
468 		sctp_clog.x.wake.stream_qcnt = 0xff;
469 
470 	if (stcb->asoc.chunks_on_out_queue < 0xff)
471 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
472 	else
473 		sctp_clog.x.wake.chunks_on_oque = 0xff;
474 
475 	sctp_clog.x.wake.sctpflags = 0;
476 	/* set in the defered mode stuff */
477 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
478 		sctp_clog.x.wake.sctpflags |= 1;
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
480 		sctp_clog.x.wake.sctpflags |= 2;
481 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
482 		sctp_clog.x.wake.sctpflags |= 4;
483 	/* what about the sb */
484 	if (stcb->sctp_socket) {
485 		struct socket *so = stcb->sctp_socket;
486 
487 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
488 	} else {
489 		sctp_clog.x.wake.sbflags = 0xff;
490 	}
491 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
492 	    SCTP_LOG_EVENT_WAKE,
493 	    from,
494 	    sctp_clog.x.misc.log1,
495 	    sctp_clog.x.misc.log2,
496 	    sctp_clog.x.misc.log3,
497 	    sctp_clog.x.misc.log4);
498 
499 }
500 
501 void
502 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
503 {
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
507 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
508 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
509 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
510 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
511 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
512 	sctp_clog.x.blk.sndlen = sendlen;
513 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
514 	    SCTP_LOG_EVENT_BLOCK,
515 	    from,
516 	    sctp_clog.x.misc.log1,
517 	    sctp_clog.x.misc.log2,
518 	    sctp_clog.x.misc.log3,
519 	    sctp_clog.x.misc.log4);
520 
521 }
522 
523 int
524 sctp_fill_stat_log(void *optval, size_t *optsize)
525 {
526 	/* May need to fix this if ktrdump does not work */
527 	return (0);
528 }
529 
530 #ifdef SCTP_AUDITING_ENABLED
531 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
532 static int sctp_audit_indx = 0;
533 
534 static
535 void
536 sctp_print_audit_report(void)
537 {
538 	int i;
539 	int cnt;
540 
541 	cnt = 0;
542 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
543 		if ((sctp_audit_data[i][0] == 0xe0) &&
544 		    (sctp_audit_data[i][1] == 0x01)) {
545 			cnt = 0;
546 			SCTP_PRINTF("\n");
547 		} else if (sctp_audit_data[i][0] == 0xf0) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
551 		    (sctp_audit_data[i][1] == 0x01)) {
552 			SCTP_PRINTF("\n");
553 			cnt = 0;
554 		}
555 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
556 		    (uint32_t) sctp_audit_data[i][1]);
557 		cnt++;
558 		if ((cnt % 14) == 0)
559 			SCTP_PRINTF("\n");
560 	}
561 	for (i = 0; i < sctp_audit_indx; i++) {
562 		if ((sctp_audit_data[i][0] == 0xe0) &&
563 		    (sctp_audit_data[i][1] == 0x01)) {
564 			cnt = 0;
565 			SCTP_PRINTF("\n");
566 		} else if (sctp_audit_data[i][0] == 0xf0) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
570 		    (sctp_audit_data[i][1] == 0x01)) {
571 			SCTP_PRINTF("\n");
572 			cnt = 0;
573 		}
574 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
575 		    (uint32_t) sctp_audit_data[i][1]);
576 		cnt++;
577 		if ((cnt % 14) == 0)
578 			SCTP_PRINTF("\n");
579 	}
580 	SCTP_PRINTF("\n");
581 }
582 
583 void
584 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
585     struct sctp_nets *net)
586 {
587 	int resend_cnt, tot_out, rep, tot_book_cnt;
588 	struct sctp_nets *lnet;
589 	struct sctp_tmit_chunk *chk;
590 
591 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
592 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
593 	sctp_audit_indx++;
594 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
595 		sctp_audit_indx = 0;
596 	}
597 	if (inp == NULL) {
598 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
599 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
600 		sctp_audit_indx++;
601 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 			sctp_audit_indx = 0;
603 		}
604 		return;
605 	}
606 	if (stcb == NULL) {
607 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
608 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
609 		sctp_audit_indx++;
610 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
611 			sctp_audit_indx = 0;
612 		}
613 		return;
614 	}
615 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
616 	sctp_audit_data[sctp_audit_indx][1] =
617 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
618 	sctp_audit_indx++;
619 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
620 		sctp_audit_indx = 0;
621 	}
622 	rep = 0;
623 	tot_book_cnt = 0;
624 	resend_cnt = tot_out = 0;
625 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
626 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
627 			resend_cnt++;
628 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
629 			tot_out += chk->book_size;
630 			tot_book_cnt++;
631 		}
632 	}
633 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
634 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
635 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
636 		sctp_audit_indx++;
637 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
638 			sctp_audit_indx = 0;
639 		}
640 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
641 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
642 		rep = 1;
643 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
644 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
645 		sctp_audit_data[sctp_audit_indx][1] =
646 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
647 		sctp_audit_indx++;
648 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
649 			sctp_audit_indx = 0;
650 		}
651 	}
652 	if (tot_out != stcb->asoc.total_flight) {
653 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
654 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
655 		sctp_audit_indx++;
656 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
657 			sctp_audit_indx = 0;
658 		}
659 		rep = 1;
660 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
661 		    (int)stcb->asoc.total_flight);
662 		stcb->asoc.total_flight = tot_out;
663 	}
664 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
665 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
666 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
667 		sctp_audit_indx++;
668 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
669 			sctp_audit_indx = 0;
670 		}
671 		rep = 1;
672 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
673 
674 		stcb->asoc.total_flight_count = tot_book_cnt;
675 	}
676 	tot_out = 0;
677 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
678 		tot_out += lnet->flight_size;
679 	}
680 	if (tot_out != stcb->asoc.total_flight) {
681 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
682 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
683 		sctp_audit_indx++;
684 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
685 			sctp_audit_indx = 0;
686 		}
687 		rep = 1;
688 		SCTP_PRINTF("real flight:%d net total was %d\n",
689 		    stcb->asoc.total_flight, tot_out);
690 		/* now corrective action */
691 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
692 
693 			tot_out = 0;
694 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
695 				if ((chk->whoTo == lnet) &&
696 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
697 					tot_out += chk->book_size;
698 				}
699 			}
700 			if (lnet->flight_size != tot_out) {
701 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
702 				    lnet, lnet->flight_size,
703 				    tot_out);
704 				lnet->flight_size = tot_out;
705 			}
706 		}
707 	}
708 	if (rep) {
709 		sctp_print_audit_report();
710 	}
711 }
712 
713 void
714 sctp_audit_log(uint8_t ev, uint8_t fd)
715 {
716 
717 	sctp_audit_data[sctp_audit_indx][0] = ev;
718 	sctp_audit_data[sctp_audit_indx][1] = fd;
719 	sctp_audit_indx++;
720 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
721 		sctp_audit_indx = 0;
722 	}
723 }
724 
725 #endif
726 
727 /*
728  * sctp_stop_timers_for_shutdown() should be called
729  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
730  * state to make sure that all timers are stopped.
731  */
732 void
733 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
734 {
735 	struct sctp_association *asoc;
736 	struct sctp_nets *net;
737 
738 	asoc = &stcb->asoc;
739 
740 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
741 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
742 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
743 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
746 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
747 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
748 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
749 	}
750 }
751 
752 /*
753  * a list of sizes based on typical mtu's, used only if next hop size not
754  * returned.
755  */
756 static uint32_t sctp_mtu_sizes[] = {
757 	68,
758 	296,
759 	508,
760 	512,
761 	544,
762 	576,
763 	1006,
764 	1492,
765 	1500,
766 	1536,
767 	2002,
768 	2048,
769 	4352,
770 	4464,
771 	8166,
772 	17914,
773 	32000,
774 	65535
775 };
776 
777 /*
778  * Return the largest MTU smaller than val. If there is no
779  * entry, just return val.
780  */
781 uint32_t
782 sctp_get_prev_mtu(uint32_t val)
783 {
784 	uint32_t i;
785 
786 	if (val <= sctp_mtu_sizes[0]) {
787 		return (val);
788 	}
789 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
790 		if (val <= sctp_mtu_sizes[i]) {
791 			break;
792 		}
793 	}
794 	return (sctp_mtu_sizes[i - 1]);
795 }
796 
797 /*
798  * Return the smallest MTU larger than val. If there is no
799  * entry, just return val.
800  */
801 uint32_t
802 sctp_get_next_mtu(struct sctp_inpcb *inp, uint32_t val)
803 {
804 	/* select another MTU that is just bigger than this one */
805 	uint32_t i;
806 
807 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
808 		if (val < sctp_mtu_sizes[i]) {
809 			return (sctp_mtu_sizes[i]);
810 		}
811 	}
812 	return (val);
813 }
814 
815 void
816 sctp_fill_random_store(struct sctp_pcb *m)
817 {
818 	/*
819 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
820 	 * our counter. The result becomes our good random numbers and we
821 	 * then setup to give these out. Note that we do no locking to
822 	 * protect this. This is ok, since if competing folks call this we
823 	 * will get more gobbled gook in the random store which is what we
824 	 * want. There is a danger that two guys will use the same random
825 	 * numbers, but thats ok too since that is random as well :->
826 	 */
827 	m->store_at = 0;
828 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
829 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
830 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
831 	m->random_counter++;
832 }
833 
834 uint32_t
835 sctp_select_initial_TSN(struct sctp_pcb *inp)
836 {
837 	/*
838 	 * A true implementation should use random selection process to get
839 	 * the initial stream sequence number, using RFC1750 as a good
840 	 * guideline
841 	 */
842 	uint32_t x, *xp;
843 	uint8_t *p;
844 	int store_at, new_store;
845 
846 	if (inp->initial_sequence_debug != 0) {
847 		uint32_t ret;
848 
849 		ret = inp->initial_sequence_debug;
850 		inp->initial_sequence_debug++;
851 		return (ret);
852 	}
853 retry:
854 	store_at = inp->store_at;
855 	new_store = store_at + sizeof(uint32_t);
856 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
857 		new_store = 0;
858 	}
859 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
860 		goto retry;
861 	}
862 	if (new_store == 0) {
863 		/* Refill the random store */
864 		sctp_fill_random_store(inp);
865 	}
866 	p = &inp->random_store[store_at];
867 	xp = (uint32_t *) p;
868 	x = *xp;
869 	return (x);
870 }
871 
872 uint32_t
873 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
874 {
875 	uint32_t x, not_done;
876 	struct timeval now;
877 
878 	(void)SCTP_GETTIME_TIMEVAL(&now);
879 	not_done = 1;
880 	while (not_done) {
881 		x = sctp_select_initial_TSN(&inp->sctp_ep);
882 		if (x == 0) {
883 			/* we never use 0 */
884 			continue;
885 		}
886 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
887 			not_done = 0;
888 		}
889 	}
890 	return (x);
891 }
892 
893 int
894 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
895     uint32_t override_tag, uint32_t vrf_id)
896 {
897 	struct sctp_association *asoc;
898 
899 	/*
900 	 * Anything set to zero is taken care of by the allocation routine's
901 	 * bzero
902 	 */
903 
904 	/*
905 	 * Up front select what scoping to apply on addresses I tell my peer
906 	 * Not sure what to do with these right now, we will need to come up
907 	 * with a way to set them. We may need to pass them through from the
908 	 * caller in the sctp_aloc_assoc() function.
909 	 */
910 	int i;
911 
912 	asoc = &stcb->asoc;
913 	/* init all variables to a known value. */
914 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
915 	asoc->max_burst = m->sctp_ep.max_burst;
916 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
917 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
918 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
919 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
920 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
921 	asoc->sctp_frag_point = m->sctp_frag_point;
922 #ifdef INET
923 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
924 #else
925 	asoc->default_tos = 0;
926 #endif
927 
928 #ifdef INET6
929 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
930 #else
931 	asoc->default_flowlabel = 0;
932 #endif
933 	asoc->sb_send_resv = 0;
934 	if (override_tag) {
935 		asoc->my_vtag = override_tag;
936 	} else {
937 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
938 	}
939 	/* Get the nonce tags */
940 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
941 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
942 	asoc->vrf_id = vrf_id;
943 
944 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
945 		asoc->hb_is_disabled = 1;
946 	else
947 		asoc->hb_is_disabled = 0;
948 
949 #ifdef SCTP_ASOCLOG_OF_TSNS
950 	asoc->tsn_in_at = 0;
951 	asoc->tsn_out_at = 0;
952 	asoc->tsn_in_wrapped = 0;
953 	asoc->tsn_out_wrapped = 0;
954 	asoc->cumack_log_at = 0;
955 	asoc->cumack_log_atsnt = 0;
956 #endif
957 #ifdef SCTP_FS_SPEC_LOG
958 	asoc->fs_index = 0;
959 #endif
960 	asoc->refcnt = 0;
961 	asoc->assoc_up_sent = 0;
962 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
963 	    sctp_select_initial_TSN(&m->sctp_ep);
964 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
965 	/* we are optimisitic here */
966 	asoc->peer_supports_pktdrop = 1;
967 	asoc->peer_supports_nat = 0;
968 	asoc->sent_queue_retran_cnt = 0;
969 
970 	/* for CMT */
971 	asoc->last_net_cmt_send_started = NULL;
972 
973 	/* This will need to be adjusted */
974 	asoc->last_acked_seq = asoc->init_seq_number - 1;
975 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
976 	asoc->asconf_seq_in = asoc->last_acked_seq;
977 
978 	/* here we are different, we hold the next one we expect */
979 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
980 
981 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
982 	asoc->initial_rto = m->sctp_ep.initial_rto;
983 
984 	asoc->max_init_times = m->sctp_ep.max_init_times;
985 	asoc->max_send_times = m->sctp_ep.max_send_times;
986 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
987 	asoc->free_chunk_cnt = 0;
988 
989 	asoc->iam_blocking = 0;
990 	/* ECN Nonce initialization */
991 	asoc->context = m->sctp_context;
992 	asoc->def_send = m->def_send;
993 	asoc->ecn_nonce_allowed = 0;
994 	asoc->receiver_nonce_sum = 1;
995 	asoc->nonce_sum_expect_base = 1;
996 	asoc->nonce_sum_check = 1;
997 	asoc->nonce_resync_tsn = 0;
998 	asoc->nonce_wait_for_ecne = 0;
999 	asoc->nonce_wait_tsn = 0;
1000 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1001 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
1002 	asoc->pr_sctp_cnt = 0;
1003 	asoc->total_output_queue_size = 0;
1004 
1005 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1006 		struct in6pcb *inp6;
1007 
1008 		/* Its a V6 socket */
1009 		inp6 = (struct in6pcb *)m;
1010 		asoc->ipv6_addr_legal = 1;
1011 		/* Now look at the binding flag to see if V4 will be legal */
1012 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1013 			asoc->ipv4_addr_legal = 1;
1014 		} else {
1015 			/* V4 addresses are NOT legal on the association */
1016 			asoc->ipv4_addr_legal = 0;
1017 		}
1018 	} else {
1019 		/* Its a V4 socket, no - V6 */
1020 		asoc->ipv4_addr_legal = 1;
1021 		asoc->ipv6_addr_legal = 0;
1022 	}
1023 
1024 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1025 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1026 
1027 	asoc->smallest_mtu = m->sctp_frag_point;
1028 	asoc->minrto = m->sctp_ep.sctp_minrto;
1029 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1030 
1031 	asoc->locked_on_sending = NULL;
1032 	asoc->stream_locked_on = 0;
1033 	asoc->ecn_echo_cnt_onq = 0;
1034 	asoc->stream_locked = 0;
1035 
1036 	asoc->send_sack = 1;
1037 
1038 	LIST_INIT(&asoc->sctp_restricted_addrs);
1039 
1040 	TAILQ_INIT(&asoc->nets);
1041 	TAILQ_INIT(&asoc->pending_reply_queue);
1042 	TAILQ_INIT(&asoc->asconf_ack_sent);
1043 	/* Setup to fill the hb random cache at first HB */
1044 	asoc->hb_random_idx = 4;
1045 
1046 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1047 
1048 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1049 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1050 
1051 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1052 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1053 
1054 	/*
1055 	 * Now the stream parameters, here we allocate space for all streams
1056 	 * that we request by default.
1057 	 */
1058 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1059 	    m->sctp_ep.pre_open_stream_count;
1060 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1061 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1062 	    SCTP_M_STRMO);
1063 	if (asoc->strmout == NULL) {
1064 		/* big trouble no memory */
1065 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1066 		return (ENOMEM);
1067 	}
1068 	for (i = 0; i < asoc->streamoutcnt; i++) {
1069 		/*
1070 		 * inbound side must be set to 0xffff, also NOTE when we get
1071 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1072 		 * count (streamoutcnt) but first check if we sent to any of
1073 		 * the upper streams that were dropped (if some were). Those
1074 		 * that were dropped must be notified to the upper layer as
1075 		 * failed to send.
1076 		 */
1077 		asoc->strmout[i].next_sequence_sent = 0x0;
1078 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1079 		asoc->strmout[i].stream_no = i;
1080 		asoc->strmout[i].last_msg_incomplete = 0;
1081 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i]);
1082 	}
1083 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1084 
1085 	/* Now the mapping array */
1086 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1087 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1088 	    SCTP_M_MAP);
1089 	if (asoc->mapping_array == NULL) {
1090 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1091 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1092 		return (ENOMEM);
1093 	}
1094 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1095 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1096 	    SCTP_M_MAP);
1097 	if (asoc->nr_mapping_array == NULL) {
1098 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1099 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1100 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1101 		return (ENOMEM);
1102 	}
1103 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1104 
1105 	/* Now the init of the other outqueues */
1106 	TAILQ_INIT(&asoc->free_chunks);
1107 	TAILQ_INIT(&asoc->control_send_queue);
1108 	TAILQ_INIT(&asoc->asconf_send_queue);
1109 	TAILQ_INIT(&asoc->send_queue);
1110 	TAILQ_INIT(&asoc->sent_queue);
1111 	TAILQ_INIT(&asoc->reasmqueue);
1112 	TAILQ_INIT(&asoc->resetHead);
1113 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1114 	TAILQ_INIT(&asoc->asconf_queue);
1115 	/* authentication fields */
1116 	asoc->authinfo.random = NULL;
1117 	asoc->authinfo.active_keyid = 0;
1118 	asoc->authinfo.assoc_key = NULL;
1119 	asoc->authinfo.assoc_keyid = 0;
1120 	asoc->authinfo.recv_key = NULL;
1121 	asoc->authinfo.recv_keyid = 0;
1122 	LIST_INIT(&asoc->shared_keys);
1123 	asoc->marked_retrans = 0;
1124 	asoc->timoinit = 0;
1125 	asoc->timodata = 0;
1126 	asoc->timosack = 0;
1127 	asoc->timoshutdown = 0;
1128 	asoc->timoheartbeat = 0;
1129 	asoc->timocookie = 0;
1130 	asoc->timoshutdownack = 0;
1131 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1132 	asoc->discontinuity_time = asoc->start_time;
1133 	/*
1134 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1135 	 * freed later when the association is freed.
1136 	 */
1137 	return (0);
1138 }
1139 
1140 void
1141 sctp_print_mapping_array(struct sctp_association *asoc)
1142 {
1143 	unsigned int i, limit;
1144 
1145 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1146 	    asoc->mapping_array_size,
1147 	    asoc->mapping_array_base_tsn,
1148 	    asoc->cumulative_tsn,
1149 	    asoc->highest_tsn_inside_map,
1150 	    asoc->highest_tsn_inside_nr_map);
1151 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1152 		if (asoc->mapping_array[limit - 1]) {
1153 			break;
1154 		}
1155 	}
1156 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1157 	for (i = 0; i < limit; i++) {
1158 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1159 	}
1160 	if (limit % 16)
1161 		printf("\n");
1162 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1163 		if (asoc->nr_mapping_array[limit - 1]) {
1164 			break;
1165 		}
1166 	}
1167 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1168 	for (i = 0; i < limit; i++) {
1169 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1170 	}
1171 	if (limit % 16)
1172 		printf("\n");
1173 }
1174 
1175 int
1176 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1177 {
1178 	/* mapping array needs to grow */
1179 	uint8_t *new_array1, *new_array2;
1180 	uint32_t new_size;
1181 
1182 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1183 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1184 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1185 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1186 		/* can't get more, forget it */
1187 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1188 		if (new_array1) {
1189 			SCTP_FREE(new_array1, SCTP_M_MAP);
1190 		}
1191 		if (new_array2) {
1192 			SCTP_FREE(new_array2, SCTP_M_MAP);
1193 		}
1194 		return (-1);
1195 	}
1196 	memset(new_array1, 0, new_size);
1197 	memset(new_array2, 0, new_size);
1198 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1199 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1200 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1201 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1202 	asoc->mapping_array = new_array1;
1203 	asoc->nr_mapping_array = new_array2;
1204 	asoc->mapping_array_size = new_size;
1205 	return (0);
1206 }
1207 
1208 
1209 static void
1210 sctp_iterator_work(struct sctp_iterator *it)
1211 {
1212 	int iteration_count = 0;
1213 	int inp_skip = 0;
1214 	int first_in = 1;
1215 	struct sctp_inpcb *tinp;
1216 
1217 	SCTP_INP_INFO_RLOCK();
1218 	SCTP_ITERATOR_LOCK();
1219 	if (it->inp) {
1220 		SCTP_INP_RLOCK(it->inp);
1221 		SCTP_INP_DECR_REF(it->inp);
1222 	}
1223 	if (it->inp == NULL) {
1224 		/* iterator is complete */
1225 done_with_iterator:
1226 		SCTP_ITERATOR_UNLOCK();
1227 		SCTP_INP_INFO_RUNLOCK();
1228 		if (it->function_atend != NULL) {
1229 			(*it->function_atend) (it->pointer, it->val);
1230 		}
1231 		SCTP_FREE(it, SCTP_M_ITER);
1232 		return;
1233 	}
1234 select_a_new_ep:
1235 	if (first_in) {
1236 		first_in = 0;
1237 	} else {
1238 		SCTP_INP_RLOCK(it->inp);
1239 	}
1240 	while (((it->pcb_flags) &&
1241 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1242 	    ((it->pcb_features) &&
1243 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1244 		/* endpoint flags or features don't match, so keep looking */
1245 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1246 			SCTP_INP_RUNLOCK(it->inp);
1247 			goto done_with_iterator;
1248 		}
1249 		tinp = it->inp;
1250 		it->inp = LIST_NEXT(it->inp, sctp_list);
1251 		SCTP_INP_RUNLOCK(tinp);
1252 		if (it->inp == NULL) {
1253 			goto done_with_iterator;
1254 		}
1255 		SCTP_INP_RLOCK(it->inp);
1256 	}
1257 	/* now go through each assoc which is in the desired state */
1258 	if (it->done_current_ep == 0) {
1259 		if (it->function_inp != NULL)
1260 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1261 		it->done_current_ep = 1;
1262 	}
1263 	if (it->stcb == NULL) {
1264 		/* run the per instance function */
1265 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1266 	}
1267 	if ((inp_skip) || it->stcb == NULL) {
1268 		if (it->function_inp_end != NULL) {
1269 			inp_skip = (*it->function_inp_end) (it->inp,
1270 			    it->pointer,
1271 			    it->val);
1272 		}
1273 		SCTP_INP_RUNLOCK(it->inp);
1274 		goto no_stcb;
1275 	}
1276 	while (it->stcb) {
1277 		SCTP_TCB_LOCK(it->stcb);
1278 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1279 			/* not in the right state... keep looking */
1280 			SCTP_TCB_UNLOCK(it->stcb);
1281 			goto next_assoc;
1282 		}
1283 		/* see if we have limited out the iterator loop */
1284 		iteration_count++;
1285 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1286 			/* Pause to let others grab the lock */
1287 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1288 			SCTP_TCB_UNLOCK(it->stcb);
1289 			SCTP_INP_INCR_REF(it->inp);
1290 			SCTP_INP_RUNLOCK(it->inp);
1291 			SCTP_ITERATOR_UNLOCK();
1292 			SCTP_INP_INFO_RUNLOCK();
1293 			SCTP_INP_INFO_RLOCK();
1294 			SCTP_ITERATOR_LOCK();
1295 			if (sctp_it_ctl.iterator_flags) {
1296 				/* We won't be staying here */
1297 				SCTP_INP_DECR_REF(it->inp);
1298 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1299 				if (sctp_it_ctl.iterator_flags &
1300 				    SCTP_ITERATOR_MUST_EXIT) {
1301 					goto done_with_iterator;
1302 				}
1303 				if (sctp_it_ctl.iterator_flags &
1304 				    SCTP_ITERATOR_STOP_CUR_IT) {
1305 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1306 					goto done_with_iterator;
1307 				}
1308 				if (sctp_it_ctl.iterator_flags &
1309 				    SCTP_ITERATOR_STOP_CUR_INP) {
1310 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1311 					goto no_stcb;
1312 				}
1313 				/* If we reach here huh? */
1314 				printf("Unknown it ctl flag %x\n",
1315 				    sctp_it_ctl.iterator_flags);
1316 				sctp_it_ctl.iterator_flags = 0;
1317 			}
1318 			SCTP_INP_RLOCK(it->inp);
1319 			SCTP_INP_DECR_REF(it->inp);
1320 			SCTP_TCB_LOCK(it->stcb);
1321 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1322 			iteration_count = 0;
1323 		}
1324 		/* run function on this one */
1325 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1326 
1327 		/*
1328 		 * we lie here, it really needs to have its own type but
1329 		 * first I must verify that this won't effect things :-0
1330 		 */
1331 		if (it->no_chunk_output == 0)
1332 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1333 
1334 		SCTP_TCB_UNLOCK(it->stcb);
1335 next_assoc:
1336 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1337 		if (it->stcb == NULL) {
1338 			/* Run last function */
1339 			if (it->function_inp_end != NULL) {
1340 				inp_skip = (*it->function_inp_end) (it->inp,
1341 				    it->pointer,
1342 				    it->val);
1343 			}
1344 		}
1345 	}
1346 	SCTP_INP_RUNLOCK(it->inp);
1347 no_stcb:
1348 	/* done with all assocs on this endpoint, move on to next endpoint */
1349 	it->done_current_ep = 0;
1350 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1351 		it->inp = NULL;
1352 	} else {
1353 		it->inp = LIST_NEXT(it->inp, sctp_list);
1354 	}
1355 	if (it->inp == NULL) {
1356 		goto done_with_iterator;
1357 	}
1358 	goto select_a_new_ep;
1359 }
1360 
1361 void
1362 sctp_iterator_worker(void)
1363 {
1364 	struct sctp_iterator *it, *nit;
1365 
1366 	/* This function is called with the WQ lock in place */
1367 
1368 	sctp_it_ctl.iterator_running = 1;
1369 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1370 		sctp_it_ctl.cur_it = it;
1371 		/* now lets work on this one */
1372 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1373 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1374 		CURVNET_SET(it->vn);
1375 		sctp_iterator_work(it);
1376 
1377 		CURVNET_RESTORE();
1378 		SCTP_IPI_ITERATOR_WQ_LOCK();
1379 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1380 			sctp_it_ctl.cur_it = NULL;
1381 			break;
1382 		}
1383 		/* sa_ignore FREED_MEMORY */
1384 	}
1385 	sctp_it_ctl.iterator_running = 0;
1386 	return;
1387 }
1388 
1389 
1390 static void
1391 sctp_handle_addr_wq(void)
1392 {
1393 	/* deal with the ADDR wq from the rtsock calls */
1394 	struct sctp_laddr *wi, *nwi;
1395 	struct sctp_asconf_iterator *asc;
1396 
1397 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1398 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1399 	if (asc == NULL) {
1400 		/* Try later, no memory */
1401 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1402 		    (struct sctp_inpcb *)NULL,
1403 		    (struct sctp_tcb *)NULL,
1404 		    (struct sctp_nets *)NULL);
1405 		return;
1406 	}
1407 	LIST_INIT(&asc->list_of_work);
1408 	asc->cnt = 0;
1409 
1410 	SCTP_WQ_ADDR_LOCK();
1411 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1412 		LIST_REMOVE(wi, sctp_nxt_addr);
1413 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1414 		asc->cnt++;
1415 	}
1416 	SCTP_WQ_ADDR_UNLOCK();
1417 
1418 	if (asc->cnt == 0) {
1419 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1420 	} else {
1421 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1422 		    sctp_asconf_iterator_stcb,
1423 		    NULL,	/* No ep end for boundall */
1424 		    SCTP_PCB_FLAGS_BOUNDALL,
1425 		    SCTP_PCB_ANY_FEATURES,
1426 		    SCTP_ASOC_ANY_STATE,
1427 		    (void *)asc, 0,
1428 		    sctp_asconf_iterator_end, NULL, 0);
1429 	}
1430 }
1431 
1432 int retcode = 0;
1433 int cur_oerr = 0;
1434 
1435 void
1436 sctp_timeout_handler(void *t)
1437 {
1438 	struct sctp_inpcb *inp;
1439 	struct sctp_tcb *stcb;
1440 	struct sctp_nets *net;
1441 	struct sctp_timer *tmr;
1442 
1443 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1444 	struct socket *so;
1445 
1446 #endif
1447 	int did_output, type;
1448 
1449 	tmr = (struct sctp_timer *)t;
1450 	inp = (struct sctp_inpcb *)tmr->ep;
1451 	stcb = (struct sctp_tcb *)tmr->tcb;
1452 	net = (struct sctp_nets *)tmr->net;
1453 	CURVNET_SET((struct vnet *)tmr->vnet);
1454 	did_output = 1;
1455 
1456 #ifdef SCTP_AUDITING_ENABLED
1457 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1458 	sctp_auditing(3, inp, stcb, net);
1459 #endif
1460 
1461 	/* sanity checks... */
1462 	if (tmr->self != (void *)tmr) {
1463 		/*
1464 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1465 		 * tmr);
1466 		 */
1467 		CURVNET_RESTORE();
1468 		return;
1469 	}
1470 	tmr->stopped_from = 0xa001;
1471 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1472 		/*
1473 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1474 		 * tmr->type);
1475 		 */
1476 		CURVNET_RESTORE();
1477 		return;
1478 	}
1479 	tmr->stopped_from = 0xa002;
1480 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1481 		CURVNET_RESTORE();
1482 		return;
1483 	}
1484 	/* if this is an iterator timeout, get the struct and clear inp */
1485 	tmr->stopped_from = 0xa003;
1486 	type = tmr->type;
1487 	if (inp) {
1488 		SCTP_INP_INCR_REF(inp);
1489 		if ((inp->sctp_socket == 0) &&
1490 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1491 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1492 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1493 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1494 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1495 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1496 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1497 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1498 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1499 		    ) {
1500 			SCTP_INP_DECR_REF(inp);
1501 			CURVNET_RESTORE();
1502 			return;
1503 		}
1504 	}
1505 	tmr->stopped_from = 0xa004;
1506 	if (stcb) {
1507 		atomic_add_int(&stcb->asoc.refcnt, 1);
1508 		if (stcb->asoc.state == 0) {
1509 			atomic_add_int(&stcb->asoc.refcnt, -1);
1510 			if (inp) {
1511 				SCTP_INP_DECR_REF(inp);
1512 			}
1513 			CURVNET_RESTORE();
1514 			return;
1515 		}
1516 	}
1517 	tmr->stopped_from = 0xa005;
1518 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1519 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1520 		if (inp) {
1521 			SCTP_INP_DECR_REF(inp);
1522 		}
1523 		if (stcb) {
1524 			atomic_add_int(&stcb->asoc.refcnt, -1);
1525 		}
1526 		CURVNET_RESTORE();
1527 		return;
1528 	}
1529 	tmr->stopped_from = 0xa006;
1530 
1531 	if (stcb) {
1532 		SCTP_TCB_LOCK(stcb);
1533 		atomic_add_int(&stcb->asoc.refcnt, -1);
1534 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1535 		    ((stcb->asoc.state == 0) ||
1536 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1537 			SCTP_TCB_UNLOCK(stcb);
1538 			if (inp) {
1539 				SCTP_INP_DECR_REF(inp);
1540 			}
1541 			CURVNET_RESTORE();
1542 			return;
1543 		}
1544 	}
1545 	/* record in stopped what t-o occured */
1546 	tmr->stopped_from = tmr->type;
1547 
1548 	/* mark as being serviced now */
1549 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1550 		/*
1551 		 * Callout has been rescheduled.
1552 		 */
1553 		goto get_out;
1554 	}
1555 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1556 		/*
1557 		 * Not active, so no action.
1558 		 */
1559 		goto get_out;
1560 	}
1561 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1562 
1563 	/* call the handler for the appropriate timer type */
1564 	switch (tmr->type) {
1565 	case SCTP_TIMER_TYPE_ZERO_COPY:
1566 		if (inp == NULL) {
1567 			break;
1568 		}
1569 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1570 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1571 		}
1572 		break;
1573 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1574 		if (inp == NULL) {
1575 			break;
1576 		}
1577 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1578 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1579 		}
1580 		break;
1581 	case SCTP_TIMER_TYPE_ADDR_WQ:
1582 		sctp_handle_addr_wq();
1583 		break;
1584 	case SCTP_TIMER_TYPE_SEND:
1585 		if ((stcb == NULL) || (inp == NULL)) {
1586 			break;
1587 		}
1588 		SCTP_STAT_INCR(sctps_timodata);
1589 		stcb->asoc.timodata++;
1590 		stcb->asoc.num_send_timers_up--;
1591 		if (stcb->asoc.num_send_timers_up < 0) {
1592 			stcb->asoc.num_send_timers_up = 0;
1593 		}
1594 		SCTP_TCB_LOCK_ASSERT(stcb);
1595 		cur_oerr = stcb->asoc.overall_error_count;
1596 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1597 		if (retcode) {
1598 			/* no need to unlock on tcb its gone */
1599 
1600 			goto out_decr;
1601 		}
1602 		SCTP_TCB_LOCK_ASSERT(stcb);
1603 #ifdef SCTP_AUDITING_ENABLED
1604 		sctp_auditing(4, inp, stcb, net);
1605 #endif
1606 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1607 		if ((stcb->asoc.num_send_timers_up == 0) &&
1608 		    (stcb->asoc.sent_queue_cnt > 0)) {
1609 			struct sctp_tmit_chunk *chk;
1610 
1611 			/*
1612 			 * safeguard. If there on some on the sent queue
1613 			 * somewhere but no timers running something is
1614 			 * wrong... so we start a timer on the first chunk
1615 			 * on the send queue on whatever net it is sent to.
1616 			 */
1617 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1618 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1619 			    chk->whoTo);
1620 		}
1621 		break;
1622 	case SCTP_TIMER_TYPE_INIT:
1623 		if ((stcb == NULL) || (inp == NULL)) {
1624 			break;
1625 		}
1626 		SCTP_STAT_INCR(sctps_timoinit);
1627 		stcb->asoc.timoinit++;
1628 		if (sctp_t1init_timer(inp, stcb, net)) {
1629 			/* no need to unlock on tcb its gone */
1630 			goto out_decr;
1631 		}
1632 		/* We do output but not here */
1633 		did_output = 0;
1634 		break;
1635 	case SCTP_TIMER_TYPE_RECV:
1636 		if ((stcb == NULL) || (inp == NULL)) {
1637 			break;
1638 		} {
1639 			SCTP_STAT_INCR(sctps_timosack);
1640 			stcb->asoc.timosack++;
1641 			sctp_send_sack(stcb);
1642 		}
1643 #ifdef SCTP_AUDITING_ENABLED
1644 		sctp_auditing(4, inp, stcb, net);
1645 #endif
1646 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1647 		break;
1648 	case SCTP_TIMER_TYPE_SHUTDOWN:
1649 		if ((stcb == NULL) || (inp == NULL)) {
1650 			break;
1651 		}
1652 		if (sctp_shutdown_timer(inp, stcb, net)) {
1653 			/* no need to unlock on tcb its gone */
1654 			goto out_decr;
1655 		}
1656 		SCTP_STAT_INCR(sctps_timoshutdown);
1657 		stcb->asoc.timoshutdown++;
1658 #ifdef SCTP_AUDITING_ENABLED
1659 		sctp_auditing(4, inp, stcb, net);
1660 #endif
1661 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1662 		break;
1663 	case SCTP_TIMER_TYPE_HEARTBEAT:
1664 		{
1665 			struct sctp_nets *lnet;
1666 			int cnt_of_unconf = 0;
1667 
1668 			if ((stcb == NULL) || (inp == NULL)) {
1669 				break;
1670 			}
1671 			SCTP_STAT_INCR(sctps_timoheartbeat);
1672 			stcb->asoc.timoheartbeat++;
1673 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1674 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1675 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1676 					cnt_of_unconf++;
1677 				}
1678 			}
1679 			if (cnt_of_unconf == 0) {
1680 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1681 				    cnt_of_unconf)) {
1682 					/* no need to unlock on tcb its gone */
1683 					goto out_decr;
1684 				}
1685 			}
1686 #ifdef SCTP_AUDITING_ENABLED
1687 			sctp_auditing(4, inp, stcb, lnet);
1688 #endif
1689 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1690 			    stcb->sctp_ep, stcb, lnet);
1691 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1692 		}
1693 		break;
1694 	case SCTP_TIMER_TYPE_COOKIE:
1695 		if ((stcb == NULL) || (inp == NULL)) {
1696 			break;
1697 		}
1698 		if (sctp_cookie_timer(inp, stcb, net)) {
1699 			/* no need to unlock on tcb its gone */
1700 			goto out_decr;
1701 		}
1702 		SCTP_STAT_INCR(sctps_timocookie);
1703 		stcb->asoc.timocookie++;
1704 #ifdef SCTP_AUDITING_ENABLED
1705 		sctp_auditing(4, inp, stcb, net);
1706 #endif
1707 		/*
1708 		 * We consider T3 and Cookie timer pretty much the same with
1709 		 * respect to where from in chunk_output.
1710 		 */
1711 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1712 		break;
1713 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1714 		{
1715 			struct timeval tv;
1716 			int i, secret;
1717 
1718 			if (inp == NULL) {
1719 				break;
1720 			}
1721 			SCTP_STAT_INCR(sctps_timosecret);
1722 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1723 			SCTP_INP_WLOCK(inp);
1724 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1725 			inp->sctp_ep.last_secret_number =
1726 			    inp->sctp_ep.current_secret_number;
1727 			inp->sctp_ep.current_secret_number++;
1728 			if (inp->sctp_ep.current_secret_number >=
1729 			    SCTP_HOW_MANY_SECRETS) {
1730 				inp->sctp_ep.current_secret_number = 0;
1731 			}
1732 			secret = (int)inp->sctp_ep.current_secret_number;
1733 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1734 				inp->sctp_ep.secret_key[secret][i] =
1735 				    sctp_select_initial_TSN(&inp->sctp_ep);
1736 			}
1737 			SCTP_INP_WUNLOCK(inp);
1738 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1739 		}
1740 		did_output = 0;
1741 		break;
1742 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1743 		if ((stcb == NULL) || (inp == NULL)) {
1744 			break;
1745 		}
1746 		SCTP_STAT_INCR(sctps_timopathmtu);
1747 		sctp_pathmtu_timer(inp, stcb, net);
1748 		did_output = 0;
1749 		break;
1750 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1751 		if ((stcb == NULL) || (inp == NULL)) {
1752 			break;
1753 		}
1754 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1755 			/* no need to unlock on tcb its gone */
1756 			goto out_decr;
1757 		}
1758 		SCTP_STAT_INCR(sctps_timoshutdownack);
1759 		stcb->asoc.timoshutdownack++;
1760 #ifdef SCTP_AUDITING_ENABLED
1761 		sctp_auditing(4, inp, stcb, net);
1762 #endif
1763 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1764 		break;
1765 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1766 		if ((stcb == NULL) || (inp == NULL)) {
1767 			break;
1768 		}
1769 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1770 		sctp_abort_an_association(inp, stcb,
1771 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1772 		/* no need to unlock on tcb its gone */
1773 		goto out_decr;
1774 
1775 	case SCTP_TIMER_TYPE_STRRESET:
1776 		if ((stcb == NULL) || (inp == NULL)) {
1777 			break;
1778 		}
1779 		if (sctp_strreset_timer(inp, stcb, net)) {
1780 			/* no need to unlock on tcb its gone */
1781 			goto out_decr;
1782 		}
1783 		SCTP_STAT_INCR(sctps_timostrmrst);
1784 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1785 		break;
1786 	case SCTP_TIMER_TYPE_EARLYFR:
1787 		/* Need to do FR of things for net */
1788 		if ((stcb == NULL) || (inp == NULL)) {
1789 			break;
1790 		}
1791 		SCTP_STAT_INCR(sctps_timoearlyfr);
1792 		sctp_early_fr_timer(inp, stcb, net);
1793 		break;
1794 	case SCTP_TIMER_TYPE_ASCONF:
1795 		if ((stcb == NULL) || (inp == NULL)) {
1796 			break;
1797 		}
1798 		if (sctp_asconf_timer(inp, stcb, net)) {
1799 			/* no need to unlock on tcb its gone */
1800 			goto out_decr;
1801 		}
1802 		SCTP_STAT_INCR(sctps_timoasconf);
1803 #ifdef SCTP_AUDITING_ENABLED
1804 		sctp_auditing(4, inp, stcb, net);
1805 #endif
1806 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1807 		break;
1808 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1809 		if ((stcb == NULL) || (inp == NULL)) {
1810 			break;
1811 		}
1812 		sctp_delete_prim_timer(inp, stcb, net);
1813 		SCTP_STAT_INCR(sctps_timodelprim);
1814 		break;
1815 
1816 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1817 		if ((stcb == NULL) || (inp == NULL)) {
1818 			break;
1819 		}
1820 		SCTP_STAT_INCR(sctps_timoautoclose);
1821 		sctp_autoclose_timer(inp, stcb, net);
1822 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1823 		did_output = 0;
1824 		break;
1825 	case SCTP_TIMER_TYPE_ASOCKILL:
1826 		if ((stcb == NULL) || (inp == NULL)) {
1827 			break;
1828 		}
1829 		SCTP_STAT_INCR(sctps_timoassockill);
1830 		/* Can we free it yet? */
1831 		SCTP_INP_DECR_REF(inp);
1832 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1833 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1834 		so = SCTP_INP_SO(inp);
1835 		atomic_add_int(&stcb->asoc.refcnt, 1);
1836 		SCTP_TCB_UNLOCK(stcb);
1837 		SCTP_SOCKET_LOCK(so, 1);
1838 		SCTP_TCB_LOCK(stcb);
1839 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1840 #endif
1841 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1842 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1843 		SCTP_SOCKET_UNLOCK(so, 1);
1844 #endif
1845 		/*
1846 		 * free asoc, always unlocks (or destroy's) so prevent
1847 		 * duplicate unlock or unlock of a free mtx :-0
1848 		 */
1849 		stcb = NULL;
1850 		goto out_no_decr;
1851 	case SCTP_TIMER_TYPE_INPKILL:
1852 		SCTP_STAT_INCR(sctps_timoinpkill);
1853 		if (inp == NULL) {
1854 			break;
1855 		}
1856 		/*
1857 		 * special case, take away our increment since WE are the
1858 		 * killer
1859 		 */
1860 		SCTP_INP_DECR_REF(inp);
1861 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1862 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1863 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1864 		inp = NULL;
1865 		goto out_no_decr;
1866 	default:
1867 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1868 		    tmr->type);
1869 		break;
1870 	};
1871 #ifdef SCTP_AUDITING_ENABLED
1872 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1873 	if (inp)
1874 		sctp_auditing(5, inp, stcb, net);
1875 #endif
1876 	if ((did_output) && stcb) {
1877 		/*
1878 		 * Now we need to clean up the control chunk chain if an
1879 		 * ECNE is on it. It must be marked as UNSENT again so next
1880 		 * call will continue to send it until such time that we get
1881 		 * a CWR, to remove it. It is, however, less likely that we
1882 		 * will find a ecn echo on the chain though.
1883 		 */
1884 		sctp_fix_ecn_echo(&stcb->asoc);
1885 	}
1886 get_out:
1887 	if (stcb) {
1888 		SCTP_TCB_UNLOCK(stcb);
1889 	}
1890 out_decr:
1891 	if (inp) {
1892 		SCTP_INP_DECR_REF(inp);
1893 	}
1894 out_no_decr:
1895 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1896 	    type);
1897 	CURVNET_RESTORE();
1898 }
1899 
1900 void
1901 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1902     struct sctp_nets *net)
1903 {
1904 	int to_ticks;
1905 	struct sctp_timer *tmr;
1906 
1907 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1908 		return;
1909 
1910 	to_ticks = 0;
1911 
1912 	tmr = NULL;
1913 	if (stcb) {
1914 		SCTP_TCB_LOCK_ASSERT(stcb);
1915 	}
1916 	switch (t_type) {
1917 	case SCTP_TIMER_TYPE_ZERO_COPY:
1918 		tmr = &inp->sctp_ep.zero_copy_timer;
1919 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1920 		break;
1921 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1922 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1923 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1924 		break;
1925 	case SCTP_TIMER_TYPE_ADDR_WQ:
1926 		/* Only 1 tick away :-) */
1927 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1928 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1929 		break;
1930 	case SCTP_TIMER_TYPE_SEND:
1931 		/* Here we use the RTO timer */
1932 		{
1933 			int rto_val;
1934 
1935 			if ((stcb == NULL) || (net == NULL)) {
1936 				return;
1937 			}
1938 			tmr = &net->rxt_timer;
1939 			if (net->RTO == 0) {
1940 				rto_val = stcb->asoc.initial_rto;
1941 			} else {
1942 				rto_val = net->RTO;
1943 			}
1944 			to_ticks = MSEC_TO_TICKS(rto_val);
1945 		}
1946 		break;
1947 	case SCTP_TIMER_TYPE_INIT:
1948 		/*
1949 		 * Here we use the INIT timer default usually about 1
1950 		 * minute.
1951 		 */
1952 		if ((stcb == NULL) || (net == NULL)) {
1953 			return;
1954 		}
1955 		tmr = &net->rxt_timer;
1956 		if (net->RTO == 0) {
1957 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1958 		} else {
1959 			to_ticks = MSEC_TO_TICKS(net->RTO);
1960 		}
1961 		break;
1962 	case SCTP_TIMER_TYPE_RECV:
1963 		/*
1964 		 * Here we use the Delayed-Ack timer value from the inp
1965 		 * ususually about 200ms.
1966 		 */
1967 		if (stcb == NULL) {
1968 			return;
1969 		}
1970 		tmr = &stcb->asoc.dack_timer;
1971 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1972 		break;
1973 	case SCTP_TIMER_TYPE_SHUTDOWN:
1974 		/* Here we use the RTO of the destination. */
1975 		if ((stcb == NULL) || (net == NULL)) {
1976 			return;
1977 		}
1978 		if (net->RTO == 0) {
1979 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1980 		} else {
1981 			to_ticks = MSEC_TO_TICKS(net->RTO);
1982 		}
1983 		tmr = &net->rxt_timer;
1984 		break;
1985 	case SCTP_TIMER_TYPE_HEARTBEAT:
1986 		/*
1987 		 * the net is used here so that we can add in the RTO. Even
1988 		 * though we use a different timer. We also add the HB timer
1989 		 * PLUS a random jitter.
1990 		 */
1991 		if ((inp == NULL) || (stcb == NULL)) {
1992 			return;
1993 		} else {
1994 			uint32_t rndval;
1995 			uint8_t this_random;
1996 			int cnt_of_unconf = 0;
1997 			struct sctp_nets *lnet;
1998 
1999 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2000 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2001 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2002 					cnt_of_unconf++;
2003 				}
2004 			}
2005 			if (cnt_of_unconf) {
2006 				net = lnet = NULL;
2007 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2008 			}
2009 			if (stcb->asoc.hb_random_idx > 3) {
2010 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2011 				memcpy(stcb->asoc.hb_random_values, &rndval,
2012 				    sizeof(stcb->asoc.hb_random_values));
2013 				stcb->asoc.hb_random_idx = 0;
2014 			}
2015 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2016 			stcb->asoc.hb_random_idx++;
2017 			stcb->asoc.hb_ect_randombit = 0;
2018 			/*
2019 			 * this_random will be 0 - 256 ms RTO is in ms.
2020 			 */
2021 			if ((stcb->asoc.hb_is_disabled) &&
2022 			    (cnt_of_unconf == 0)) {
2023 				return;
2024 			}
2025 			if (net) {
2026 				int delay;
2027 
2028 				delay = stcb->asoc.heart_beat_delay;
2029 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2030 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2031 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2032 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2033 						delay = 0;
2034 					}
2035 				}
2036 				if (net->RTO == 0) {
2037 					/* Never been checked */
2038 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2039 				} else {
2040 					/* set rto_val to the ms */
2041 					to_ticks = delay + net->RTO + this_random;
2042 				}
2043 			} else {
2044 				if (cnt_of_unconf) {
2045 					to_ticks = this_random + stcb->asoc.initial_rto;
2046 				} else {
2047 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2048 				}
2049 			}
2050 			/*
2051 			 * Now we must convert the to_ticks that are now in
2052 			 * ms to ticks.
2053 			 */
2054 			to_ticks = MSEC_TO_TICKS(to_ticks);
2055 			tmr = &stcb->asoc.hb_timer;
2056 		}
2057 		break;
2058 	case SCTP_TIMER_TYPE_COOKIE:
2059 		/*
2060 		 * Here we can use the RTO timer from the network since one
2061 		 * RTT was compelete. If a retran happened then we will be
2062 		 * using the RTO initial value.
2063 		 */
2064 		if ((stcb == NULL) || (net == NULL)) {
2065 			return;
2066 		}
2067 		if (net->RTO == 0) {
2068 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2069 		} else {
2070 			to_ticks = MSEC_TO_TICKS(net->RTO);
2071 		}
2072 		tmr = &net->rxt_timer;
2073 		break;
2074 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2075 		/*
2076 		 * nothing needed but the endpoint here ususually about 60
2077 		 * minutes.
2078 		 */
2079 		if (inp == NULL) {
2080 			return;
2081 		}
2082 		tmr = &inp->sctp_ep.signature_change;
2083 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2084 		break;
2085 	case SCTP_TIMER_TYPE_ASOCKILL:
2086 		if (stcb == NULL) {
2087 			return;
2088 		}
2089 		tmr = &stcb->asoc.strreset_timer;
2090 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2091 		break;
2092 	case SCTP_TIMER_TYPE_INPKILL:
2093 		/*
2094 		 * The inp is setup to die. We re-use the signature_chage
2095 		 * timer since that has stopped and we are in the GONE
2096 		 * state.
2097 		 */
2098 		if (inp == NULL) {
2099 			return;
2100 		}
2101 		tmr = &inp->sctp_ep.signature_change;
2102 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2103 		break;
2104 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2105 		/*
2106 		 * Here we use the value found in the EP for PMTU ususually
2107 		 * about 10 minutes.
2108 		 */
2109 		if ((stcb == NULL) || (inp == NULL)) {
2110 			return;
2111 		}
2112 		if (net == NULL) {
2113 			return;
2114 		}
2115 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2116 		tmr = &net->pmtu_timer;
2117 		break;
2118 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2119 		/* Here we use the RTO of the destination */
2120 		if ((stcb == NULL) || (net == NULL)) {
2121 			return;
2122 		}
2123 		if (net->RTO == 0) {
2124 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2125 		} else {
2126 			to_ticks = MSEC_TO_TICKS(net->RTO);
2127 		}
2128 		tmr = &net->rxt_timer;
2129 		break;
2130 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2131 		/*
2132 		 * Here we use the endpoints shutdown guard timer usually
2133 		 * about 3 minutes.
2134 		 */
2135 		if ((inp == NULL) || (stcb == NULL)) {
2136 			return;
2137 		}
2138 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2139 		tmr = &stcb->asoc.shut_guard_timer;
2140 		break;
2141 	case SCTP_TIMER_TYPE_STRRESET:
2142 		/*
2143 		 * Here the timer comes from the stcb but its value is from
2144 		 * the net's RTO.
2145 		 */
2146 		if ((stcb == NULL) || (net == NULL)) {
2147 			return;
2148 		}
2149 		if (net->RTO == 0) {
2150 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2151 		} else {
2152 			to_ticks = MSEC_TO_TICKS(net->RTO);
2153 		}
2154 		tmr = &stcb->asoc.strreset_timer;
2155 		break;
2156 
2157 	case SCTP_TIMER_TYPE_EARLYFR:
2158 		{
2159 			unsigned int msec;
2160 
2161 			if ((stcb == NULL) || (net == NULL)) {
2162 				return;
2163 			}
2164 			if (net->flight_size > net->cwnd) {
2165 				/* no need to start */
2166 				return;
2167 			}
2168 			SCTP_STAT_INCR(sctps_earlyfrstart);
2169 			if (net->lastsa == 0) {
2170 				/* Hmm no rtt estimate yet? */
2171 				msec = stcb->asoc.initial_rto >> 2;
2172 			} else {
2173 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2174 			}
2175 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2176 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2177 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2178 					msec = SCTP_MINFR_MSEC_FLOOR;
2179 				}
2180 			}
2181 			to_ticks = MSEC_TO_TICKS(msec);
2182 			tmr = &net->fr_timer;
2183 		}
2184 		break;
2185 	case SCTP_TIMER_TYPE_ASCONF:
2186 		/*
2187 		 * Here the timer comes from the stcb but its value is from
2188 		 * the net's RTO.
2189 		 */
2190 		if ((stcb == NULL) || (net == NULL)) {
2191 			return;
2192 		}
2193 		if (net->RTO == 0) {
2194 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2195 		} else {
2196 			to_ticks = MSEC_TO_TICKS(net->RTO);
2197 		}
2198 		tmr = &stcb->asoc.asconf_timer;
2199 		break;
2200 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2201 		if ((stcb == NULL) || (net != NULL)) {
2202 			return;
2203 		}
2204 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2205 		tmr = &stcb->asoc.delete_prim_timer;
2206 		break;
2207 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2208 		if (stcb == NULL) {
2209 			return;
2210 		}
2211 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2212 			/*
2213 			 * Really an error since stcb is NOT set to
2214 			 * autoclose
2215 			 */
2216 			return;
2217 		}
2218 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2219 		tmr = &stcb->asoc.autoclose_timer;
2220 		break;
2221 	default:
2222 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2223 		    __FUNCTION__, t_type);
2224 		return;
2225 		break;
2226 	};
2227 	if ((to_ticks <= 0) || (tmr == NULL)) {
2228 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2229 		    __FUNCTION__, t_type, to_ticks, tmr);
2230 		return;
2231 	}
2232 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2233 		/*
2234 		 * we do NOT allow you to have it already running. if it is
2235 		 * we leave the current one up unchanged
2236 		 */
2237 		return;
2238 	}
2239 	/* At this point we can proceed */
2240 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2241 		stcb->asoc.num_send_timers_up++;
2242 	}
2243 	tmr->stopped_from = 0;
2244 	tmr->type = t_type;
2245 	tmr->ep = (void *)inp;
2246 	tmr->tcb = (void *)stcb;
2247 	tmr->net = (void *)net;
2248 	tmr->self = (void *)tmr;
2249 	tmr->vnet = (void *)curvnet;
2250 	tmr->ticks = sctp_get_tick_count();
2251 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2252 	return;
2253 }
2254 
2255 void
2256 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2257     struct sctp_nets *net, uint32_t from)
2258 {
2259 	struct sctp_timer *tmr;
2260 
2261 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2262 	    (inp == NULL))
2263 		return;
2264 
2265 	tmr = NULL;
2266 	if (stcb) {
2267 		SCTP_TCB_LOCK_ASSERT(stcb);
2268 	}
2269 	switch (t_type) {
2270 	case SCTP_TIMER_TYPE_ZERO_COPY:
2271 		tmr = &inp->sctp_ep.zero_copy_timer;
2272 		break;
2273 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2274 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2275 		break;
2276 	case SCTP_TIMER_TYPE_ADDR_WQ:
2277 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2278 		break;
2279 	case SCTP_TIMER_TYPE_EARLYFR:
2280 		if ((stcb == NULL) || (net == NULL)) {
2281 			return;
2282 		}
2283 		tmr = &net->fr_timer;
2284 		SCTP_STAT_INCR(sctps_earlyfrstop);
2285 		break;
2286 	case SCTP_TIMER_TYPE_SEND:
2287 		if ((stcb == NULL) || (net == NULL)) {
2288 			return;
2289 		}
2290 		tmr = &net->rxt_timer;
2291 		break;
2292 	case SCTP_TIMER_TYPE_INIT:
2293 		if ((stcb == NULL) || (net == NULL)) {
2294 			return;
2295 		}
2296 		tmr = &net->rxt_timer;
2297 		break;
2298 	case SCTP_TIMER_TYPE_RECV:
2299 		if (stcb == NULL) {
2300 			return;
2301 		}
2302 		tmr = &stcb->asoc.dack_timer;
2303 		break;
2304 	case SCTP_TIMER_TYPE_SHUTDOWN:
2305 		if ((stcb == NULL) || (net == NULL)) {
2306 			return;
2307 		}
2308 		tmr = &net->rxt_timer;
2309 		break;
2310 	case SCTP_TIMER_TYPE_HEARTBEAT:
2311 		if (stcb == NULL) {
2312 			return;
2313 		}
2314 		tmr = &stcb->asoc.hb_timer;
2315 		break;
2316 	case SCTP_TIMER_TYPE_COOKIE:
2317 		if ((stcb == NULL) || (net == NULL)) {
2318 			return;
2319 		}
2320 		tmr = &net->rxt_timer;
2321 		break;
2322 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2323 		/* nothing needed but the endpoint here */
2324 		tmr = &inp->sctp_ep.signature_change;
2325 		/*
2326 		 * We re-use the newcookie timer for the INP kill timer. We
2327 		 * must assure that we do not kill it by accident.
2328 		 */
2329 		break;
2330 	case SCTP_TIMER_TYPE_ASOCKILL:
2331 		/*
2332 		 * Stop the asoc kill timer.
2333 		 */
2334 		if (stcb == NULL) {
2335 			return;
2336 		}
2337 		tmr = &stcb->asoc.strreset_timer;
2338 		break;
2339 
2340 	case SCTP_TIMER_TYPE_INPKILL:
2341 		/*
2342 		 * The inp is setup to die. We re-use the signature_chage
2343 		 * timer since that has stopped and we are in the GONE
2344 		 * state.
2345 		 */
2346 		tmr = &inp->sctp_ep.signature_change;
2347 		break;
2348 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2349 		if ((stcb == NULL) || (net == NULL)) {
2350 			return;
2351 		}
2352 		tmr = &net->pmtu_timer;
2353 		break;
2354 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2355 		if ((stcb == NULL) || (net == NULL)) {
2356 			return;
2357 		}
2358 		tmr = &net->rxt_timer;
2359 		break;
2360 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2361 		if (stcb == NULL) {
2362 			return;
2363 		}
2364 		tmr = &stcb->asoc.shut_guard_timer;
2365 		break;
2366 	case SCTP_TIMER_TYPE_STRRESET:
2367 		if (stcb == NULL) {
2368 			return;
2369 		}
2370 		tmr = &stcb->asoc.strreset_timer;
2371 		break;
2372 	case SCTP_TIMER_TYPE_ASCONF:
2373 		if (stcb == NULL) {
2374 			return;
2375 		}
2376 		tmr = &stcb->asoc.asconf_timer;
2377 		break;
2378 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2379 		if (stcb == NULL) {
2380 			return;
2381 		}
2382 		tmr = &stcb->asoc.delete_prim_timer;
2383 		break;
2384 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2385 		if (stcb == NULL) {
2386 			return;
2387 		}
2388 		tmr = &stcb->asoc.autoclose_timer;
2389 		break;
2390 	default:
2391 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2392 		    __FUNCTION__, t_type);
2393 		break;
2394 	};
2395 	if (tmr == NULL) {
2396 		return;
2397 	}
2398 	if ((tmr->type != t_type) && tmr->type) {
2399 		/*
2400 		 * Ok we have a timer that is under joint use. Cookie timer
2401 		 * per chance with the SEND timer. We therefore are NOT
2402 		 * running the timer that the caller wants stopped.  So just
2403 		 * return.
2404 		 */
2405 		return;
2406 	}
2407 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2408 		stcb->asoc.num_send_timers_up--;
2409 		if (stcb->asoc.num_send_timers_up < 0) {
2410 			stcb->asoc.num_send_timers_up = 0;
2411 		}
2412 	}
2413 	tmr->self = NULL;
2414 	tmr->stopped_from = from;
2415 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2416 	return;
2417 }
2418 
2419 uint32_t
2420 sctp_calculate_len(struct mbuf *m)
2421 {
2422 	uint32_t tlen = 0;
2423 	struct mbuf *at;
2424 
2425 	at = m;
2426 	while (at) {
2427 		tlen += SCTP_BUF_LEN(at);
2428 		at = SCTP_BUF_NEXT(at);
2429 	}
2430 	return (tlen);
2431 }
2432 
2433 void
2434 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2435     struct sctp_association *asoc, uint32_t mtu)
2436 {
2437 	/*
2438 	 * Reset the P-MTU size on this association, this involves changing
2439 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2440 	 * allow the DF flag to be cleared.
2441 	 */
2442 	struct sctp_tmit_chunk *chk;
2443 	unsigned int eff_mtu, ovh;
2444 
2445 	asoc->smallest_mtu = mtu;
2446 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2447 		ovh = SCTP_MIN_OVERHEAD;
2448 	} else {
2449 		ovh = SCTP_MIN_V4_OVERHEAD;
2450 	}
2451 	eff_mtu = mtu - ovh;
2452 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2453 		if (chk->send_size > eff_mtu) {
2454 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2455 		}
2456 	}
2457 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2458 		if (chk->send_size > eff_mtu) {
2459 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2460 		}
2461 	}
2462 }
2463 
2464 
2465 /*
2466  * given an association and starting time of the current RTT period return
2467  * RTO in number of msecs net should point to the current network
2468  */
2469 uint32_t
2470 sctp_calculate_rto(struct sctp_tcb *stcb,
2471     struct sctp_association *asoc,
2472     struct sctp_nets *net,
2473     struct timeval *told,
2474     int safe)
2475 {
2476 	/*-
2477 	 * given an association and the starting time of the current RTT
2478 	 * period (in value1/value2) return RTO in number of msecs.
2479 	 */
2480 	int calc_time = 0;
2481 	int o_calctime;
2482 	uint32_t new_rto = 0;
2483 	int first_measure = 0;
2484 	struct timeval now, then, *old;
2485 
2486 	/* Copy it out for sparc64 */
2487 	if (safe == sctp_align_unsafe_makecopy) {
2488 		old = &then;
2489 		memcpy(&then, told, sizeof(struct timeval));
2490 	} else if (safe == sctp_align_safe_nocopy) {
2491 		old = told;
2492 	} else {
2493 		/* error */
2494 		SCTP_PRINTF("Huh, bad rto calc call\n");
2495 		return (0);
2496 	}
2497 	/************************/
2498 	/* 1. calculate new RTT */
2499 	/************************/
2500 	/* get the current time */
2501 	(void)SCTP_GETTIME_TIMEVAL(&now);
2502 
2503 	/*
2504 	 * Record the real time of the last RTT for use in DC-CC.
2505 	 */
2506 	net->last_measured_rtt = now;
2507 	timevalsub(&net->last_measured_rtt, old);
2508 
2509 	/* compute the RTT value */
2510 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2511 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2512 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2513 			calc_time += (((u_long)now.tv_usec -
2514 			    (u_long)old->tv_usec) / 1000);
2515 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2516 			/* Borrow 1,000ms from current calculation */
2517 			calc_time -= 1000;
2518 			/* Add in the slop over */
2519 			calc_time += ((int)now.tv_usec / 1000);
2520 			/* Add in the pre-second ms's */
2521 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2522 		}
2523 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2524 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2525 			calc_time = ((u_long)now.tv_usec -
2526 			    (u_long)old->tv_usec) / 1000;
2527 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2528 			/* impossible .. garbage in nothing out */
2529 			goto calc_rto;
2530 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2531 			/*
2532 			 * We have to have 1 usec :-D this must be the
2533 			 * loopback.
2534 			 */
2535 			calc_time = 1;
2536 		} else {
2537 			/* impossible .. garbage in nothing out */
2538 			goto calc_rto;
2539 		}
2540 	} else {
2541 		/* Clock wrapped? */
2542 		goto calc_rto;
2543 	}
2544 	/***************************/
2545 	/* 2. update RTTVAR & SRTT */
2546 	/***************************/
2547 	net->rtt = o_calctime = calc_time;
2548 	/* this is Van Jacobson's integer version */
2549 	if (net->RTO_measured) {
2550 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2551 								 * shift=3 */
2552 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2553 			rto_logging(net, SCTP_LOG_RTTVAR);
2554 		}
2555 		net->prev_rtt = o_calctime;
2556 		net->lastsa += calc_time;	/* add 7/8th into sa when
2557 						 * shift=3 */
2558 		if (calc_time < 0) {
2559 			calc_time = -calc_time;
2560 		}
2561 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2562 									 * VAR shift=2 */
2563 		net->lastsv += calc_time;
2564 		if (net->lastsv == 0) {
2565 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2566 		}
2567 	} else {
2568 		/* First RTO measurment */
2569 		net->RTO_measured = 1;
2570 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2571 								 * shift=3 */
2572 		net->lastsv = calc_time;
2573 		if (net->lastsv == 0) {
2574 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2575 		}
2576 		first_measure = 1;
2577 		net->prev_rtt = o_calctime;
2578 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2579 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2580 		}
2581 	}
2582 calc_rto:
2583 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2584 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2585 	    (stcb->asoc.sat_network_lockout == 0)) {
2586 		stcb->asoc.sat_network = 1;
2587 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2588 		stcb->asoc.sat_network = 0;
2589 		stcb->asoc.sat_network_lockout = 1;
2590 	}
2591 	/* bound it, per C6/C7 in Section 5.3.1 */
2592 	if (new_rto < stcb->asoc.minrto) {
2593 		new_rto = stcb->asoc.minrto;
2594 	}
2595 	if (new_rto > stcb->asoc.maxrto) {
2596 		new_rto = stcb->asoc.maxrto;
2597 	}
2598 	/* we are now returning the RTO */
2599 	return (new_rto);
2600 }
2601 
2602 /*
2603  * return a pointer to a contiguous piece of data from the given mbuf chain
2604  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2605  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2606  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2607  */
2608 caddr_t
2609 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2610 {
2611 	uint32_t count;
2612 	uint8_t *ptr;
2613 
2614 	ptr = in_ptr;
2615 	if ((off < 0) || (len <= 0))
2616 		return (NULL);
2617 
2618 	/* find the desired start location */
2619 	while ((m != NULL) && (off > 0)) {
2620 		if (off < SCTP_BUF_LEN(m))
2621 			break;
2622 		off -= SCTP_BUF_LEN(m);
2623 		m = SCTP_BUF_NEXT(m);
2624 	}
2625 	if (m == NULL)
2626 		return (NULL);
2627 
2628 	/* is the current mbuf large enough (eg. contiguous)? */
2629 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2630 		return (mtod(m, caddr_t)+off);
2631 	} else {
2632 		/* else, it spans more than one mbuf, so save a temp copy... */
2633 		while ((m != NULL) && (len > 0)) {
2634 			count = min(SCTP_BUF_LEN(m) - off, len);
2635 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2636 			len -= count;
2637 			ptr += count;
2638 			off = 0;
2639 			m = SCTP_BUF_NEXT(m);
2640 		}
2641 		if ((m == NULL) && (len > 0))
2642 			return (NULL);
2643 		else
2644 			return ((caddr_t)in_ptr);
2645 	}
2646 }
2647 
2648 
2649 
2650 struct sctp_paramhdr *
2651 sctp_get_next_param(struct mbuf *m,
2652     int offset,
2653     struct sctp_paramhdr *pull,
2654     int pull_limit)
2655 {
2656 	/* This just provides a typed signature to Peter's Pull routine */
2657 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2658 	    (uint8_t *) pull));
2659 }
2660 
2661 
2662 int
2663 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2664 {
2665 	/*
2666 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2667 	 * padlen is > 3 this routine will fail.
2668 	 */
2669 	uint8_t *dp;
2670 	int i;
2671 
2672 	if (padlen > 3) {
2673 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2674 		return (ENOBUFS);
2675 	}
2676 	if (padlen <= M_TRAILINGSPACE(m)) {
2677 		/*
2678 		 * The easy way. We hope the majority of the time we hit
2679 		 * here :)
2680 		 */
2681 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2682 		SCTP_BUF_LEN(m) += padlen;
2683 	} else {
2684 		/* Hard way we must grow the mbuf */
2685 		struct mbuf *tmp;
2686 
2687 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2688 		if (tmp == NULL) {
2689 			/* Out of space GAK! we are in big trouble. */
2690 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2691 			return (ENOSPC);
2692 		}
2693 		/* setup and insert in middle */
2694 		SCTP_BUF_LEN(tmp) = padlen;
2695 		SCTP_BUF_NEXT(tmp) = NULL;
2696 		SCTP_BUF_NEXT(m) = tmp;
2697 		dp = mtod(tmp, uint8_t *);
2698 	}
2699 	/* zero out the pad */
2700 	for (i = 0; i < padlen; i++) {
2701 		*dp = 0;
2702 		dp++;
2703 	}
2704 	return (0);
2705 }
2706 
2707 int
2708 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2709 {
2710 	/* find the last mbuf in chain and pad it */
2711 	struct mbuf *m_at;
2712 
2713 	m_at = m;
2714 	if (last_mbuf) {
2715 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2716 	} else {
2717 		while (m_at) {
2718 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2719 				return (sctp_add_pad_tombuf(m_at, padval));
2720 			}
2721 			m_at = SCTP_BUF_NEXT(m_at);
2722 		}
2723 	}
2724 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2725 	return (EFAULT);
2726 }
2727 
2728 static void
2729 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2730     uint32_t error, void *data, int so_locked
2731 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2732     SCTP_UNUSED
2733 #endif
2734 )
2735 {
2736 	struct mbuf *m_notify;
2737 	struct sctp_assoc_change *sac;
2738 	struct sctp_queued_to_read *control;
2739 
2740 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2741 	struct socket *so;
2742 
2743 #endif
2744 
2745 	/*
2746 	 * For TCP model AND UDP connected sockets we will send an error up
2747 	 * when an ABORT comes in.
2748 	 */
2749 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2750 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2751 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2752 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2753 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2754 			stcb->sctp_socket->so_error = ECONNREFUSED;
2755 		} else {
2756 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2757 			stcb->sctp_socket->so_error = ECONNRESET;
2758 		}
2759 		/* Wake ANY sleepers */
2760 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2761 		so = SCTP_INP_SO(stcb->sctp_ep);
2762 		if (!so_locked) {
2763 			atomic_add_int(&stcb->asoc.refcnt, 1);
2764 			SCTP_TCB_UNLOCK(stcb);
2765 			SCTP_SOCKET_LOCK(so, 1);
2766 			SCTP_TCB_LOCK(stcb);
2767 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2768 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2769 				SCTP_SOCKET_UNLOCK(so, 1);
2770 				return;
2771 			}
2772 		}
2773 #endif
2774 		socantrcvmore(stcb->sctp_socket);
2775 		sorwakeup(stcb->sctp_socket);
2776 		sowwakeup(stcb->sctp_socket);
2777 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2778 		if (!so_locked) {
2779 			SCTP_SOCKET_UNLOCK(so, 1);
2780 		}
2781 #endif
2782 	}
2783 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2784 		/* event not enabled */
2785 		return;
2786 	}
2787 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2788 	if (m_notify == NULL)
2789 		/* no space left */
2790 		return;
2791 	SCTP_BUF_LEN(m_notify) = 0;
2792 
2793 	sac = mtod(m_notify, struct sctp_assoc_change *);
2794 	sac->sac_type = SCTP_ASSOC_CHANGE;
2795 	sac->sac_flags = 0;
2796 	sac->sac_length = sizeof(struct sctp_assoc_change);
2797 	sac->sac_state = event;
2798 	sac->sac_error = error;
2799 	/* XXX verify these stream counts */
2800 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2801 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2802 	sac->sac_assoc_id = sctp_get_associd(stcb);
2803 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2804 	SCTP_BUF_NEXT(m_notify) = NULL;
2805 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2806 	    0, 0, 0, 0, 0, 0,
2807 	    m_notify);
2808 	if (control == NULL) {
2809 		/* no memory */
2810 		sctp_m_freem(m_notify);
2811 		return;
2812 	}
2813 	control->length = SCTP_BUF_LEN(m_notify);
2814 	/* not that we need this */
2815 	control->tail_mbuf = m_notify;
2816 	control->spec_flags = M_NOTIFICATION;
2817 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2818 	    control,
2819 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2820 	    so_locked);
2821 	if (event == SCTP_COMM_LOST) {
2822 		/* Wake up any sleeper */
2823 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2824 		so = SCTP_INP_SO(stcb->sctp_ep);
2825 		if (!so_locked) {
2826 			atomic_add_int(&stcb->asoc.refcnt, 1);
2827 			SCTP_TCB_UNLOCK(stcb);
2828 			SCTP_SOCKET_LOCK(so, 1);
2829 			SCTP_TCB_LOCK(stcb);
2830 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2831 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2832 				SCTP_SOCKET_UNLOCK(so, 1);
2833 				return;
2834 			}
2835 		}
2836 #endif
2837 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2838 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2839 		if (!so_locked) {
2840 			SCTP_SOCKET_UNLOCK(so, 1);
2841 		}
2842 #endif
2843 	}
2844 }
2845 
2846 static void
2847 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2848     struct sockaddr *sa, uint32_t error)
2849 {
2850 	struct mbuf *m_notify;
2851 	struct sctp_paddr_change *spc;
2852 	struct sctp_queued_to_read *control;
2853 
2854 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2855 		/* event not enabled */
2856 		return;
2857 	}
2858 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2859 	if (m_notify == NULL)
2860 		return;
2861 	SCTP_BUF_LEN(m_notify) = 0;
2862 	spc = mtod(m_notify, struct sctp_paddr_change *);
2863 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2864 	spc->spc_flags = 0;
2865 	spc->spc_length = sizeof(struct sctp_paddr_change);
2866 	switch (sa->sa_family) {
2867 	case AF_INET:
2868 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2869 		break;
2870 #ifdef INET6
2871 	case AF_INET6:
2872 		{
2873 			struct sockaddr_in6 *sin6;
2874 
2875 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2876 
2877 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2878 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2879 				if (sin6->sin6_scope_id == 0) {
2880 					/* recover scope_id for user */
2881 					(void)sa6_recoverscope(sin6);
2882 				} else {
2883 					/* clear embedded scope_id for user */
2884 					in6_clearscope(&sin6->sin6_addr);
2885 				}
2886 			}
2887 			break;
2888 		}
2889 #endif
2890 	default:
2891 		/* TSNH */
2892 		break;
2893 	}
2894 	spc->spc_state = state;
2895 	spc->spc_error = error;
2896 	spc->spc_assoc_id = sctp_get_associd(stcb);
2897 
2898 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2899 	SCTP_BUF_NEXT(m_notify) = NULL;
2900 
2901 	/* append to socket */
2902 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2903 	    0, 0, 0, 0, 0, 0,
2904 	    m_notify);
2905 	if (control == NULL) {
2906 		/* no memory */
2907 		sctp_m_freem(m_notify);
2908 		return;
2909 	}
2910 	control->length = SCTP_BUF_LEN(m_notify);
2911 	control->spec_flags = M_NOTIFICATION;
2912 	/* not that we need this */
2913 	control->tail_mbuf = m_notify;
2914 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2915 	    control,
2916 	    &stcb->sctp_socket->so_rcv, 1,
2917 	    SCTP_READ_LOCK_NOT_HELD,
2918 	    SCTP_SO_NOT_LOCKED);
2919 }
2920 
2921 
2922 static void
2923 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2924     struct sctp_tmit_chunk *chk, int so_locked
2925 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2926     SCTP_UNUSED
2927 #endif
2928 )
2929 {
2930 	struct mbuf *m_notify;
2931 	struct sctp_send_failed *ssf;
2932 	struct sctp_queued_to_read *control;
2933 	int length;
2934 
2935 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2936 		/* event not enabled */
2937 		return;
2938 	}
2939 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2940 	if (m_notify == NULL)
2941 		/* no space left */
2942 		return;
2943 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2944 	length -= sizeof(struct sctp_data_chunk);
2945 	SCTP_BUF_LEN(m_notify) = 0;
2946 	ssf = mtod(m_notify, struct sctp_send_failed *);
2947 	ssf->ssf_type = SCTP_SEND_FAILED;
2948 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2949 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2950 	else
2951 		ssf->ssf_flags = SCTP_DATA_SENT;
2952 	ssf->ssf_length = length;
2953 	ssf->ssf_error = error;
2954 	/* not exactly what the user sent in, but should be close :) */
2955 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2956 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2957 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2958 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2959 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2960 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2961 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2962 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2963 
2964 	if (chk->data) {
2965 		/*
2966 		 * trim off the sctp chunk header(it should be there)
2967 		 */
2968 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2969 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2970 			sctp_mbuf_crush(chk->data);
2971 			chk->send_size -= sizeof(struct sctp_data_chunk);
2972 		}
2973 	}
2974 	SCTP_BUF_NEXT(m_notify) = chk->data;
2975 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2976 	/* Steal off the mbuf */
2977 	chk->data = NULL;
2978 	/*
2979 	 * For this case, we check the actual socket buffer, since the assoc
2980 	 * is going away we don't want to overfill the socket buffer for a
2981 	 * non-reader
2982 	 */
2983 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2984 		sctp_m_freem(m_notify);
2985 		return;
2986 	}
2987 	/* append to socket */
2988 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2989 	    0, 0, 0, 0, 0, 0,
2990 	    m_notify);
2991 	if (control == NULL) {
2992 		/* no memory */
2993 		sctp_m_freem(m_notify);
2994 		return;
2995 	}
2996 	control->spec_flags = M_NOTIFICATION;
2997 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2998 	    control,
2999 	    &stcb->sctp_socket->so_rcv, 1,
3000 	    SCTP_READ_LOCK_NOT_HELD,
3001 	    so_locked);
3002 }
3003 
3004 
3005 static void
3006 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3007     struct sctp_stream_queue_pending *sp, int so_locked
3008 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3009     SCTP_UNUSED
3010 #endif
3011 )
3012 {
3013 	struct mbuf *m_notify;
3014 	struct sctp_send_failed *ssf;
3015 	struct sctp_queued_to_read *control;
3016 	int length;
3017 
3018 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3019 		/* event not enabled */
3020 		return;
3021 	}
3022 	length = sizeof(struct sctp_send_failed) + sp->length;
3023 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3024 	if (m_notify == NULL)
3025 		/* no space left */
3026 		return;
3027 	SCTP_BUF_LEN(m_notify) = 0;
3028 	ssf = mtod(m_notify, struct sctp_send_failed *);
3029 	ssf->ssf_type = SCTP_SEND_FAILED;
3030 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3031 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3032 	else
3033 		ssf->ssf_flags = SCTP_DATA_SENT;
3034 	ssf->ssf_length = length;
3035 	ssf->ssf_error = error;
3036 	/* not exactly what the user sent in, but should be close :) */
3037 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3038 	ssf->ssf_info.sinfo_stream = sp->stream;
3039 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3040 	if (sp->some_taken) {
3041 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3042 	} else {
3043 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3044 	}
3045 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3046 	ssf->ssf_info.sinfo_context = sp->context;
3047 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3048 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3049 	SCTP_BUF_NEXT(m_notify) = sp->data;
3050 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3051 
3052 	/* Steal off the mbuf */
3053 	sp->data = NULL;
3054 	/*
3055 	 * For this case, we check the actual socket buffer, since the assoc
3056 	 * is going away we don't want to overfill the socket buffer for a
3057 	 * non-reader
3058 	 */
3059 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3060 		sctp_m_freem(m_notify);
3061 		return;
3062 	}
3063 	/* append to socket */
3064 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3065 	    0, 0, 0, 0, 0, 0,
3066 	    m_notify);
3067 	if (control == NULL) {
3068 		/* no memory */
3069 		sctp_m_freem(m_notify);
3070 		return;
3071 	}
3072 	control->spec_flags = M_NOTIFICATION;
3073 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3074 	    control,
3075 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3076 }
3077 
3078 
3079 
3080 static void
3081 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3082     uint32_t error)
3083 {
3084 	struct mbuf *m_notify;
3085 	struct sctp_adaptation_event *sai;
3086 	struct sctp_queued_to_read *control;
3087 
3088 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3089 		/* event not enabled */
3090 		return;
3091 	}
3092 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3093 	if (m_notify == NULL)
3094 		/* no space left */
3095 		return;
3096 	SCTP_BUF_LEN(m_notify) = 0;
3097 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3098 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3099 	sai->sai_flags = 0;
3100 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3101 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3102 	sai->sai_assoc_id = sctp_get_associd(stcb);
3103 
3104 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3105 	SCTP_BUF_NEXT(m_notify) = NULL;
3106 
3107 	/* append to socket */
3108 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3109 	    0, 0, 0, 0, 0, 0,
3110 	    m_notify);
3111 	if (control == NULL) {
3112 		/* no memory */
3113 		sctp_m_freem(m_notify);
3114 		return;
3115 	}
3116 	control->length = SCTP_BUF_LEN(m_notify);
3117 	control->spec_flags = M_NOTIFICATION;
3118 	/* not that we need this */
3119 	control->tail_mbuf = m_notify;
3120 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3121 	    control,
3122 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3123 }
3124 
3125 /* This always must be called with the read-queue LOCKED in the INP */
3126 static void
3127 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3128     uint32_t val, int so_locked
3129 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3130     SCTP_UNUSED
3131 #endif
3132 )
3133 {
3134 	struct mbuf *m_notify;
3135 	struct sctp_pdapi_event *pdapi;
3136 	struct sctp_queued_to_read *control;
3137 	struct sockbuf *sb;
3138 
3139 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3140 		/* event not enabled */
3141 		return;
3142 	}
3143 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3144 		return;
3145 	}
3146 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3147 	if (m_notify == NULL)
3148 		/* no space left */
3149 		return;
3150 	SCTP_BUF_LEN(m_notify) = 0;
3151 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3152 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3153 	pdapi->pdapi_flags = 0;
3154 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3155 	pdapi->pdapi_indication = error;
3156 	pdapi->pdapi_stream = (val >> 16);
3157 	pdapi->pdapi_seq = (val & 0x0000ffff);
3158 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3159 
3160 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3161 	SCTP_BUF_NEXT(m_notify) = NULL;
3162 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3163 	    0, 0, 0, 0, 0, 0,
3164 	    m_notify);
3165 	if (control == NULL) {
3166 		/* no memory */
3167 		sctp_m_freem(m_notify);
3168 		return;
3169 	}
3170 	control->spec_flags = M_NOTIFICATION;
3171 	control->length = SCTP_BUF_LEN(m_notify);
3172 	/* not that we need this */
3173 	control->tail_mbuf = m_notify;
3174 	control->held_length = 0;
3175 	control->length = 0;
3176 	sb = &stcb->sctp_socket->so_rcv;
3177 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3178 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3179 	}
3180 	sctp_sballoc(stcb, sb, m_notify);
3181 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3182 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3183 	}
3184 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3185 	control->end_added = 1;
3186 	if (stcb->asoc.control_pdapi)
3187 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3188 	else {
3189 		/* we really should not see this case */
3190 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3191 	}
3192 	if (stcb->sctp_ep && stcb->sctp_socket) {
3193 		/* This should always be the case */
3194 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3195 		struct socket *so;
3196 
3197 		so = SCTP_INP_SO(stcb->sctp_ep);
3198 		if (!so_locked) {
3199 			atomic_add_int(&stcb->asoc.refcnt, 1);
3200 			SCTP_TCB_UNLOCK(stcb);
3201 			SCTP_SOCKET_LOCK(so, 1);
3202 			SCTP_TCB_LOCK(stcb);
3203 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3204 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3205 				SCTP_SOCKET_UNLOCK(so, 1);
3206 				return;
3207 			}
3208 		}
3209 #endif
3210 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3211 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3212 		if (!so_locked) {
3213 			SCTP_SOCKET_UNLOCK(so, 1);
3214 		}
3215 #endif
3216 	}
3217 }
3218 
3219 static void
3220 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3221 {
3222 	struct mbuf *m_notify;
3223 	struct sctp_shutdown_event *sse;
3224 	struct sctp_queued_to_read *control;
3225 
3226 	/*
3227 	 * For TCP model AND UDP connected sockets we will send an error up
3228 	 * when an SHUTDOWN completes
3229 	 */
3230 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3231 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3232 		/* mark socket closed for read/write and wakeup! */
3233 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3234 		struct socket *so;
3235 
3236 		so = SCTP_INP_SO(stcb->sctp_ep);
3237 		atomic_add_int(&stcb->asoc.refcnt, 1);
3238 		SCTP_TCB_UNLOCK(stcb);
3239 		SCTP_SOCKET_LOCK(so, 1);
3240 		SCTP_TCB_LOCK(stcb);
3241 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3242 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3243 			SCTP_SOCKET_UNLOCK(so, 1);
3244 			return;
3245 		}
3246 #endif
3247 		socantsendmore(stcb->sctp_socket);
3248 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3249 		SCTP_SOCKET_UNLOCK(so, 1);
3250 #endif
3251 	}
3252 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3253 		/* event not enabled */
3254 		return;
3255 	}
3256 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3257 	if (m_notify == NULL)
3258 		/* no space left */
3259 		return;
3260 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3261 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3262 	sse->sse_flags = 0;
3263 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3264 	sse->sse_assoc_id = sctp_get_associd(stcb);
3265 
3266 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3267 	SCTP_BUF_NEXT(m_notify) = NULL;
3268 
3269 	/* append to socket */
3270 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3271 	    0, 0, 0, 0, 0, 0,
3272 	    m_notify);
3273 	if (control == NULL) {
3274 		/* no memory */
3275 		sctp_m_freem(m_notify);
3276 		return;
3277 	}
3278 	control->spec_flags = M_NOTIFICATION;
3279 	control->length = SCTP_BUF_LEN(m_notify);
3280 	/* not that we need this */
3281 	control->tail_mbuf = m_notify;
3282 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3283 	    control,
3284 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3285 }
3286 
3287 static void
3288 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3289     int so_locked
3290 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3291     SCTP_UNUSED
3292 #endif
3293 )
3294 {
3295 	struct mbuf *m_notify;
3296 	struct sctp_sender_dry_event *event;
3297 	struct sctp_queued_to_read *control;
3298 
3299 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3300 		/* event not enabled */
3301 		return;
3302 	}
3303 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3304 	if (m_notify == NULL) {
3305 		/* no space left */
3306 		return;
3307 	}
3308 	SCTP_BUF_LEN(m_notify) = 0;
3309 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3310 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3311 	event->sender_dry_flags = 0;
3312 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3313 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3314 
3315 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3316 	SCTP_BUF_NEXT(m_notify) = NULL;
3317 
3318 	/* append to socket */
3319 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3320 	    0, 0, 0, 0, 0, 0, m_notify);
3321 	if (control == NULL) {
3322 		/* no memory */
3323 		sctp_m_freem(m_notify);
3324 		return;
3325 	}
3326 	control->length = SCTP_BUF_LEN(m_notify);
3327 	control->spec_flags = M_NOTIFICATION;
3328 	/* not that we need this */
3329 	control->tail_mbuf = m_notify;
3330 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3331 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3332 }
3333 
3334 
3335 static void
3336 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3337 {
3338 	struct mbuf *m_notify;
3339 	struct sctp_queued_to_read *control;
3340 	struct sctp_stream_reset_event *strreset;
3341 	int len;
3342 
3343 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3344 		/* event not enabled */
3345 		return;
3346 	}
3347 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3348 	if (m_notify == NULL)
3349 		/* no space left */
3350 		return;
3351 	SCTP_BUF_LEN(m_notify) = 0;
3352 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3353 	if (len > M_TRAILINGSPACE(m_notify)) {
3354 		/* never enough room */
3355 		sctp_m_freem(m_notify);
3356 		return;
3357 	}
3358 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3359 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3360 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3361 	strreset->strreset_length = len;
3362 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3363 	strreset->strreset_list[0] = number_entries;
3364 
3365 	SCTP_BUF_LEN(m_notify) = len;
3366 	SCTP_BUF_NEXT(m_notify) = NULL;
3367 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3368 		/* no space */
3369 		sctp_m_freem(m_notify);
3370 		return;
3371 	}
3372 	/* append to socket */
3373 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3374 	    0, 0, 0, 0, 0, 0,
3375 	    m_notify);
3376 	if (control == NULL) {
3377 		/* no memory */
3378 		sctp_m_freem(m_notify);
3379 		return;
3380 	}
3381 	control->spec_flags = M_NOTIFICATION;
3382 	control->length = SCTP_BUF_LEN(m_notify);
3383 	/* not that we need this */
3384 	control->tail_mbuf = m_notify;
3385 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3386 	    control,
3387 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3388 }
3389 
3390 
3391 static void
3392 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3393     int number_entries, uint16_t * list, int flag)
3394 {
3395 	struct mbuf *m_notify;
3396 	struct sctp_queued_to_read *control;
3397 	struct sctp_stream_reset_event *strreset;
3398 	int len;
3399 
3400 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3401 		/* event not enabled */
3402 		return;
3403 	}
3404 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3405 	if (m_notify == NULL)
3406 		/* no space left */
3407 		return;
3408 	SCTP_BUF_LEN(m_notify) = 0;
3409 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3410 	if (len > M_TRAILINGSPACE(m_notify)) {
3411 		/* never enough room */
3412 		sctp_m_freem(m_notify);
3413 		return;
3414 	}
3415 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3416 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3417 	if (number_entries == 0) {
3418 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3419 	} else {
3420 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3421 	}
3422 	strreset->strreset_length = len;
3423 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3424 	if (number_entries) {
3425 		int i;
3426 
3427 		for (i = 0; i < number_entries; i++) {
3428 			strreset->strreset_list[i] = ntohs(list[i]);
3429 		}
3430 	}
3431 	SCTP_BUF_LEN(m_notify) = len;
3432 	SCTP_BUF_NEXT(m_notify) = NULL;
3433 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3434 		/* no space */
3435 		sctp_m_freem(m_notify);
3436 		return;
3437 	}
3438 	/* append to socket */
3439 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3440 	    0, 0, 0, 0, 0, 0,
3441 	    m_notify);
3442 	if (control == NULL) {
3443 		/* no memory */
3444 		sctp_m_freem(m_notify);
3445 		return;
3446 	}
3447 	control->spec_flags = M_NOTIFICATION;
3448 	control->length = SCTP_BUF_LEN(m_notify);
3449 	/* not that we need this */
3450 	control->tail_mbuf = m_notify;
3451 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3452 	    control,
3453 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3454 }
3455 
3456 
3457 void
3458 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3459     uint32_t error, void *data, int so_locked
3460 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3461     SCTP_UNUSED
3462 #endif
3463 )
3464 {
3465 	if ((stcb == NULL) ||
3466 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3467 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3468 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3469 		/* If the socket is gone we are out of here */
3470 		return;
3471 	}
3472 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3473 		return;
3474 	}
3475 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3476 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3477 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3478 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3479 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3480 			/* Don't report these in front states */
3481 			return;
3482 		}
3483 	}
3484 	switch (notification) {
3485 	case SCTP_NOTIFY_ASSOC_UP:
3486 		if (stcb->asoc.assoc_up_sent == 0) {
3487 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3488 			stcb->asoc.assoc_up_sent = 1;
3489 		}
3490 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3491 			sctp_notify_adaptation_layer(stcb, error);
3492 		}
3493 		if (stcb->asoc.peer_supports_auth == 0) {
3494 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3495 			    NULL, so_locked);
3496 		}
3497 		break;
3498 	case SCTP_NOTIFY_ASSOC_DOWN:
3499 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3500 		break;
3501 	case SCTP_NOTIFY_INTERFACE_DOWN:
3502 		{
3503 			struct sctp_nets *net;
3504 
3505 			net = (struct sctp_nets *)data;
3506 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3507 			    (struct sockaddr *)&net->ro._l_addr, error);
3508 			break;
3509 		}
3510 	case SCTP_NOTIFY_INTERFACE_UP:
3511 		{
3512 			struct sctp_nets *net;
3513 
3514 			net = (struct sctp_nets *)data;
3515 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3516 			    (struct sockaddr *)&net->ro._l_addr, error);
3517 			break;
3518 		}
3519 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3520 		{
3521 			struct sctp_nets *net;
3522 
3523 			net = (struct sctp_nets *)data;
3524 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3525 			    (struct sockaddr *)&net->ro._l_addr, error);
3526 			break;
3527 		}
3528 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3529 		sctp_notify_send_failed2(stcb, error,
3530 		    (struct sctp_stream_queue_pending *)data, so_locked);
3531 		break;
3532 	case SCTP_NOTIFY_DG_FAIL:
3533 		sctp_notify_send_failed(stcb, error,
3534 		    (struct sctp_tmit_chunk *)data, so_locked);
3535 		break;
3536 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3537 		{
3538 			uint32_t val;
3539 
3540 			val = *((uint32_t *) data);
3541 
3542 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3543 			break;
3544 		}
3545 	case SCTP_NOTIFY_STRDATA_ERR:
3546 		break;
3547 	case SCTP_NOTIFY_ASSOC_ABORTED:
3548 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3549 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3550 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3551 		} else {
3552 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3553 		}
3554 		break;
3555 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3556 		break;
3557 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3558 		break;
3559 	case SCTP_NOTIFY_ASSOC_RESTART:
3560 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3561 		if (stcb->asoc.peer_supports_auth == 0) {
3562 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3563 			    NULL, so_locked);
3564 		}
3565 		break;
3566 	case SCTP_NOTIFY_HB_RESP:
3567 		break;
3568 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3569 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3570 		break;
3571 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3572 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3573 		break;
3574 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3575 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3576 		break;
3577 
3578 	case SCTP_NOTIFY_STR_RESET_SEND:
3579 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3580 		break;
3581 	case SCTP_NOTIFY_STR_RESET_RECV:
3582 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3583 		break;
3584 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3585 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3586 		break;
3587 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3588 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3589 		break;
3590 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3591 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3592 		    error);
3593 		break;
3594 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3595 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3596 		    error);
3597 		break;
3598 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3599 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3600 		    error);
3601 		break;
3602 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3603 		break;
3604 	case SCTP_NOTIFY_ASCONF_FAILED:
3605 		break;
3606 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3607 		sctp_notify_shutdown_event(stcb);
3608 		break;
3609 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3610 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3611 		    (uint16_t) (uintptr_t) data,
3612 		    so_locked);
3613 		break;
3614 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3615 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3616 		    (uint16_t) (uintptr_t) data,
3617 		    so_locked);
3618 		break;
3619 	case SCTP_NOTIFY_NO_PEER_AUTH:
3620 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3621 		    (uint16_t) (uintptr_t) data,
3622 		    so_locked);
3623 		break;
3624 	case SCTP_NOTIFY_SENDER_DRY:
3625 		sctp_notify_sender_dry_event(stcb, so_locked);
3626 		break;
3627 	default:
3628 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3629 		    __FUNCTION__, notification, notification);
3630 		break;
3631 	}			/* end switch */
3632 }
3633 
3634 void
3635 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3636 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3637     SCTP_UNUSED
3638 #endif
3639 )
3640 {
3641 	struct sctp_association *asoc;
3642 	struct sctp_stream_out *outs;
3643 	struct sctp_tmit_chunk *chk, *nchk;
3644 	struct sctp_stream_queue_pending *sp, *nsp;
3645 	int i;
3646 
3647 	if (stcb == NULL) {
3648 		return;
3649 	}
3650 	asoc = &stcb->asoc;
3651 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3652 		/* already being freed */
3653 		return;
3654 	}
3655 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3656 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3657 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3658 		return;
3659 	}
3660 	/* now through all the gunk freeing chunks */
3661 	if (holds_lock == 0) {
3662 		SCTP_TCB_SEND_LOCK(stcb);
3663 	}
3664 	/* sent queue SHOULD be empty */
3665 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3666 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3667 		asoc->sent_queue_cnt--;
3668 		if (chk->data != NULL) {
3669 			sctp_free_bufspace(stcb, asoc, chk, 1);
3670 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3671 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3672 			if (chk->data) {
3673 				sctp_m_freem(chk->data);
3674 				chk->data = NULL;
3675 			}
3676 		}
3677 		sctp_free_a_chunk(stcb, chk);
3678 		/* sa_ignore FREED_MEMORY */
3679 	}
3680 	/* pending send queue SHOULD be empty */
3681 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3682 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3683 		asoc->send_queue_cnt--;
3684 		if (chk->data != NULL) {
3685 			sctp_free_bufspace(stcb, asoc, chk, 1);
3686 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3687 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3688 			if (chk->data) {
3689 				sctp_m_freem(chk->data);
3690 				chk->data = NULL;
3691 			}
3692 		}
3693 		sctp_free_a_chunk(stcb, chk);
3694 		/* sa_ignore FREED_MEMORY */
3695 	}
3696 	for (i = 0; i < asoc->streamoutcnt; i++) {
3697 		/* For each stream */
3698 		outs = &asoc->strmout[i];
3699 		/* clean up any sends there */
3700 		asoc->locked_on_sending = NULL;
3701 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3702 			asoc->stream_queue_cnt--;
3703 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3704 			sctp_free_spbufspace(stcb, asoc, sp);
3705 			if (sp->data) {
3706 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3707 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3708 				if (sp->data) {
3709 					sctp_m_freem(sp->data);
3710 					sp->data = NULL;
3711 				}
3712 			}
3713 			if (sp->net) {
3714 				sctp_free_remote_addr(sp->net);
3715 				sp->net = NULL;
3716 			}
3717 			/* Free the chunk */
3718 			sctp_free_a_strmoq(stcb, sp);
3719 			/* sa_ignore FREED_MEMORY */
3720 		}
3721 	}
3722 
3723 	if (holds_lock == 0) {
3724 		SCTP_TCB_SEND_UNLOCK(stcb);
3725 	}
3726 }
3727 
3728 void
3729 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3730 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3731     SCTP_UNUSED
3732 #endif
3733 )
3734 {
3735 
3736 	if (stcb == NULL) {
3737 		return;
3738 	}
3739 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3740 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3741 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3742 		return;
3743 	}
3744 	/* Tell them we lost the asoc */
3745 	sctp_report_all_outbound(stcb, 1, so_locked);
3746 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3747 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3748 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3749 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3750 	}
3751 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3752 }
3753 
3754 void
3755 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3756     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3757     uint32_t vrf_id, uint16_t port)
3758 {
3759 	uint32_t vtag;
3760 
3761 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3762 	struct socket *so;
3763 
3764 #endif
3765 
3766 	vtag = 0;
3767 	if (stcb != NULL) {
3768 		/* We have a TCB to abort, send notification too */
3769 		vtag = stcb->asoc.peer_vtag;
3770 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3771 		/* get the assoc vrf id and table id */
3772 		vrf_id = stcb->asoc.vrf_id;
3773 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3774 	}
3775 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3776 	if (stcb != NULL) {
3777 		/* Ok, now lets free it */
3778 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3779 		so = SCTP_INP_SO(inp);
3780 		atomic_add_int(&stcb->asoc.refcnt, 1);
3781 		SCTP_TCB_UNLOCK(stcb);
3782 		SCTP_SOCKET_LOCK(so, 1);
3783 		SCTP_TCB_LOCK(stcb);
3784 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3785 #endif
3786 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3787 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3788 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3789 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3790 		}
3791 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3792 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3793 		SCTP_SOCKET_UNLOCK(so, 1);
3794 #endif
3795 	}
3796 }
3797 
3798 #ifdef SCTP_ASOCLOG_OF_TSNS
3799 void
3800 sctp_print_out_track_log(struct sctp_tcb *stcb)
3801 {
3802 #ifdef NOSIY_PRINTS
3803 	int i;
3804 
3805 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3806 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3807 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3808 		SCTP_PRINTF("None rcvd\n");
3809 		goto none_in;
3810 	}
3811 	if (stcb->asoc.tsn_in_wrapped) {
3812 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3813 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3814 			    stcb->asoc.in_tsnlog[i].tsn,
3815 			    stcb->asoc.in_tsnlog[i].strm,
3816 			    stcb->asoc.in_tsnlog[i].seq,
3817 			    stcb->asoc.in_tsnlog[i].flgs,
3818 			    stcb->asoc.in_tsnlog[i].sz);
3819 		}
3820 	}
3821 	if (stcb->asoc.tsn_in_at) {
3822 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3823 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3824 			    stcb->asoc.in_tsnlog[i].tsn,
3825 			    stcb->asoc.in_tsnlog[i].strm,
3826 			    stcb->asoc.in_tsnlog[i].seq,
3827 			    stcb->asoc.in_tsnlog[i].flgs,
3828 			    stcb->asoc.in_tsnlog[i].sz);
3829 		}
3830 	}
3831 none_in:
3832 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3833 	if ((stcb->asoc.tsn_out_at == 0) &&
3834 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3835 		SCTP_PRINTF("None sent\n");
3836 	}
3837 	if (stcb->asoc.tsn_out_wrapped) {
3838 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3839 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3840 			    stcb->asoc.out_tsnlog[i].tsn,
3841 			    stcb->asoc.out_tsnlog[i].strm,
3842 			    stcb->asoc.out_tsnlog[i].seq,
3843 			    stcb->asoc.out_tsnlog[i].flgs,
3844 			    stcb->asoc.out_tsnlog[i].sz);
3845 		}
3846 	}
3847 	if (stcb->asoc.tsn_out_at) {
3848 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3849 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3850 			    stcb->asoc.out_tsnlog[i].tsn,
3851 			    stcb->asoc.out_tsnlog[i].strm,
3852 			    stcb->asoc.out_tsnlog[i].seq,
3853 			    stcb->asoc.out_tsnlog[i].flgs,
3854 			    stcb->asoc.out_tsnlog[i].sz);
3855 		}
3856 	}
3857 #endif
3858 }
3859 
3860 #endif
3861 
3862 void
3863 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3864     int error, struct mbuf *op_err,
3865     int so_locked
3866 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3867     SCTP_UNUSED
3868 #endif
3869 )
3870 {
3871 	uint32_t vtag;
3872 
3873 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3874 	struct socket *so;
3875 
3876 #endif
3877 
3878 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3879 	so = SCTP_INP_SO(inp);
3880 #endif
3881 	if (stcb == NULL) {
3882 		/* Got to have a TCB */
3883 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3884 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3885 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3886 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3887 			}
3888 		}
3889 		return;
3890 	} else {
3891 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3892 	}
3893 	vtag = stcb->asoc.peer_vtag;
3894 	/* notify the ulp */
3895 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3896 		sctp_abort_notification(stcb, error, so_locked);
3897 	/* notify the peer */
3898 #if defined(SCTP_PANIC_ON_ABORT)
3899 	panic("aborting an association");
3900 #endif
3901 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3902 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3903 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3904 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3905 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3906 	}
3907 	/* now free the asoc */
3908 #ifdef SCTP_ASOCLOG_OF_TSNS
3909 	sctp_print_out_track_log(stcb);
3910 #endif
3911 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3912 	if (!so_locked) {
3913 		atomic_add_int(&stcb->asoc.refcnt, 1);
3914 		SCTP_TCB_UNLOCK(stcb);
3915 		SCTP_SOCKET_LOCK(so, 1);
3916 		SCTP_TCB_LOCK(stcb);
3917 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3918 	}
3919 #endif
3920 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3921 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3922 	if (!so_locked) {
3923 		SCTP_SOCKET_UNLOCK(so, 1);
3924 	}
3925 #endif
3926 }
3927 
3928 void
3929 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3930     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3931 {
3932 	struct sctp_chunkhdr *ch, chunk_buf;
3933 	unsigned int chk_length;
3934 
3935 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3936 	/* Generate a TO address for future reference */
3937 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3938 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3939 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3940 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3941 		}
3942 	}
3943 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3944 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3945 	while (ch != NULL) {
3946 		chk_length = ntohs(ch->chunk_length);
3947 		if (chk_length < sizeof(*ch)) {
3948 			/* break to abort land */
3949 			break;
3950 		}
3951 		switch (ch->chunk_type) {
3952 		case SCTP_COOKIE_ECHO:
3953 			/* We hit here only if the assoc is being freed */
3954 			return;
3955 		case SCTP_PACKET_DROPPED:
3956 			/* we don't respond to pkt-dropped */
3957 			return;
3958 		case SCTP_ABORT_ASSOCIATION:
3959 			/* we don't respond with an ABORT to an ABORT */
3960 			return;
3961 		case SCTP_SHUTDOWN_COMPLETE:
3962 			/*
3963 			 * we ignore it since we are not waiting for it and
3964 			 * peer is gone
3965 			 */
3966 			return;
3967 		case SCTP_SHUTDOWN_ACK:
3968 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
3969 			return;
3970 		default:
3971 			break;
3972 		}
3973 		offset += SCTP_SIZE32(chk_length);
3974 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3975 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3976 	}
3977 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
3978 }
3979 
3980 /*
3981  * check the inbound datagram to make sure there is not an abort inside it,
3982  * if there is return 1, else return 0.
3983  */
3984 int
3985 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3986 {
3987 	struct sctp_chunkhdr *ch;
3988 	struct sctp_init_chunk *init_chk, chunk_buf;
3989 	int offset;
3990 	unsigned int chk_length;
3991 
3992 	offset = iphlen + sizeof(struct sctphdr);
3993 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3994 	    (uint8_t *) & chunk_buf);
3995 	while (ch != NULL) {
3996 		chk_length = ntohs(ch->chunk_length);
3997 		if (chk_length < sizeof(*ch)) {
3998 			/* packet is probably corrupt */
3999 			break;
4000 		}
4001 		/* we seem to be ok, is it an abort? */
4002 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4003 			/* yep, tell them */
4004 			return (1);
4005 		}
4006 		if (ch->chunk_type == SCTP_INITIATION) {
4007 			/* need to update the Vtag */
4008 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4009 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4010 			if (init_chk != NULL) {
4011 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4012 			}
4013 		}
4014 		/* Nope, move to the next chunk */
4015 		offset += SCTP_SIZE32(chk_length);
4016 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4017 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4018 	}
4019 	return (0);
4020 }
4021 
4022 /*
4023  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4024  * set (i.e. it's 0) so, create this function to compare link local scopes
4025  */
4026 #ifdef INET6
4027 uint32_t
4028 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4029 {
4030 	struct sockaddr_in6 a, b;
4031 
4032 	/* save copies */
4033 	a = *addr1;
4034 	b = *addr2;
4035 
4036 	if (a.sin6_scope_id == 0)
4037 		if (sa6_recoverscope(&a)) {
4038 			/* can't get scope, so can't match */
4039 			return (0);
4040 		}
4041 	if (b.sin6_scope_id == 0)
4042 		if (sa6_recoverscope(&b)) {
4043 			/* can't get scope, so can't match */
4044 			return (0);
4045 		}
4046 	if (a.sin6_scope_id != b.sin6_scope_id)
4047 		return (0);
4048 
4049 	return (1);
4050 }
4051 
4052 /*
4053  * returns a sockaddr_in6 with embedded scope recovered and removed
4054  */
4055 struct sockaddr_in6 *
4056 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4057 {
4058 	/* check and strip embedded scope junk */
4059 	if (addr->sin6_family == AF_INET6) {
4060 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4061 			if (addr->sin6_scope_id == 0) {
4062 				*store = *addr;
4063 				if (!sa6_recoverscope(store)) {
4064 					/* use the recovered scope */
4065 					addr = store;
4066 				}
4067 			} else {
4068 				/* else, return the original "to" addr */
4069 				in6_clearscope(&addr->sin6_addr);
4070 			}
4071 		}
4072 	}
4073 	return (addr);
4074 }
4075 
4076 #endif
4077 
4078 /*
4079  * are the two addresses the same?  currently a "scopeless" check returns: 1
4080  * if same, 0 if not
4081  */
4082 int
4083 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4084 {
4085 
4086 	/* must be valid */
4087 	if (sa1 == NULL || sa2 == NULL)
4088 		return (0);
4089 
4090 	/* must be the same family */
4091 	if (sa1->sa_family != sa2->sa_family)
4092 		return (0);
4093 
4094 	switch (sa1->sa_family) {
4095 #ifdef INET6
4096 	case AF_INET6:
4097 		{
4098 			/* IPv6 addresses */
4099 			struct sockaddr_in6 *sin6_1, *sin6_2;
4100 
4101 			sin6_1 = (struct sockaddr_in6 *)sa1;
4102 			sin6_2 = (struct sockaddr_in6 *)sa2;
4103 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4104 			    sin6_2));
4105 		}
4106 #endif
4107 	case AF_INET:
4108 		{
4109 			/* IPv4 addresses */
4110 			struct sockaddr_in *sin_1, *sin_2;
4111 
4112 			sin_1 = (struct sockaddr_in *)sa1;
4113 			sin_2 = (struct sockaddr_in *)sa2;
4114 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4115 		}
4116 	default:
4117 		/* we don't do these... */
4118 		return (0);
4119 	}
4120 }
4121 
4122 void
4123 sctp_print_address(struct sockaddr *sa)
4124 {
4125 #ifdef INET6
4126 	char ip6buf[INET6_ADDRSTRLEN];
4127 
4128 	ip6buf[0] = 0;
4129 #endif
4130 
4131 	switch (sa->sa_family) {
4132 #ifdef INET6
4133 	case AF_INET6:
4134 		{
4135 			struct sockaddr_in6 *sin6;
4136 
4137 			sin6 = (struct sockaddr_in6 *)sa;
4138 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4139 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4140 			    ntohs(sin6->sin6_port),
4141 			    sin6->sin6_scope_id);
4142 			break;
4143 		}
4144 #endif
4145 	case AF_INET:
4146 		{
4147 			struct sockaddr_in *sin;
4148 			unsigned char *p;
4149 
4150 			sin = (struct sockaddr_in *)sa;
4151 			p = (unsigned char *)&sin->sin_addr;
4152 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4153 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4154 			break;
4155 		}
4156 	default:
4157 		SCTP_PRINTF("?\n");
4158 		break;
4159 	}
4160 }
4161 
4162 void
4163 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4164 {
4165 	switch (iph->ip_v) {
4166 	case IPVERSION:
4167 		{
4168 			struct sockaddr_in lsa, fsa;
4169 
4170 			bzero(&lsa, sizeof(lsa));
4171 			lsa.sin_len = sizeof(lsa);
4172 			lsa.sin_family = AF_INET;
4173 			lsa.sin_addr = iph->ip_src;
4174 			lsa.sin_port = sh->src_port;
4175 			bzero(&fsa, sizeof(fsa));
4176 			fsa.sin_len = sizeof(fsa);
4177 			fsa.sin_family = AF_INET;
4178 			fsa.sin_addr = iph->ip_dst;
4179 			fsa.sin_port = sh->dest_port;
4180 			SCTP_PRINTF("src: ");
4181 			sctp_print_address((struct sockaddr *)&lsa);
4182 			SCTP_PRINTF("dest: ");
4183 			sctp_print_address((struct sockaddr *)&fsa);
4184 			break;
4185 		}
4186 #ifdef INET6
4187 	case IPV6_VERSION >> 4:
4188 		{
4189 			struct ip6_hdr *ip6;
4190 			struct sockaddr_in6 lsa6, fsa6;
4191 
4192 			ip6 = (struct ip6_hdr *)iph;
4193 			bzero(&lsa6, sizeof(lsa6));
4194 			lsa6.sin6_len = sizeof(lsa6);
4195 			lsa6.sin6_family = AF_INET6;
4196 			lsa6.sin6_addr = ip6->ip6_src;
4197 			lsa6.sin6_port = sh->src_port;
4198 			bzero(&fsa6, sizeof(fsa6));
4199 			fsa6.sin6_len = sizeof(fsa6);
4200 			fsa6.sin6_family = AF_INET6;
4201 			fsa6.sin6_addr = ip6->ip6_dst;
4202 			fsa6.sin6_port = sh->dest_port;
4203 			SCTP_PRINTF("src: ");
4204 			sctp_print_address((struct sockaddr *)&lsa6);
4205 			SCTP_PRINTF("dest: ");
4206 			sctp_print_address((struct sockaddr *)&fsa6);
4207 			break;
4208 		}
4209 #endif
4210 	default:
4211 		/* TSNH */
4212 		break;
4213 	}
4214 }
4215 
4216 void
4217 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4218     struct sctp_inpcb *new_inp,
4219     struct sctp_tcb *stcb,
4220     int waitflags)
4221 {
4222 	/*
4223 	 * go through our old INP and pull off any control structures that
4224 	 * belong to stcb and move then to the new inp.
4225 	 */
4226 	struct socket *old_so, *new_so;
4227 	struct sctp_queued_to_read *control, *nctl;
4228 	struct sctp_readhead tmp_queue;
4229 	struct mbuf *m;
4230 	int error = 0;
4231 
4232 	old_so = old_inp->sctp_socket;
4233 	new_so = new_inp->sctp_socket;
4234 	TAILQ_INIT(&tmp_queue);
4235 	error = sblock(&old_so->so_rcv, waitflags);
4236 	if (error) {
4237 		/*
4238 		 * Gak, can't get sblock, we have a problem. data will be
4239 		 * left stranded.. and we don't dare look at it since the
4240 		 * other thread may be reading something. Oh well, its a
4241 		 * screwed up app that does a peeloff OR a accept while
4242 		 * reading from the main socket... actually its only the
4243 		 * peeloff() case, since I think read will fail on a
4244 		 * listening socket..
4245 		 */
4246 		return;
4247 	}
4248 	/* lock the socket buffers */
4249 	SCTP_INP_READ_LOCK(old_inp);
4250 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4251 		/* Pull off all for out target stcb */
4252 		if (control->stcb == stcb) {
4253 			/* remove it we want it */
4254 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4255 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4256 			m = control->data;
4257 			while (m) {
4258 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4259 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4260 				}
4261 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4262 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4263 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4264 				}
4265 				m = SCTP_BUF_NEXT(m);
4266 			}
4267 		}
4268 	}
4269 	SCTP_INP_READ_UNLOCK(old_inp);
4270 	/* Remove the sb-lock on the old socket */
4271 
4272 	sbunlock(&old_so->so_rcv);
4273 	/* Now we move them over to the new socket buffer */
4274 	SCTP_INP_READ_LOCK(new_inp);
4275 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4276 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4277 		m = control->data;
4278 		while (m) {
4279 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4280 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4281 			}
4282 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4283 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4284 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4285 			}
4286 			m = SCTP_BUF_NEXT(m);
4287 		}
4288 	}
4289 	SCTP_INP_READ_UNLOCK(new_inp);
4290 }
4291 
4292 void
4293 sctp_add_to_readq(struct sctp_inpcb *inp,
4294     struct sctp_tcb *stcb,
4295     struct sctp_queued_to_read *control,
4296     struct sockbuf *sb,
4297     int end,
4298     int inp_read_lock_held,
4299     int so_locked
4300 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4301     SCTP_UNUSED
4302 #endif
4303 )
4304 {
4305 	/*
4306 	 * Here we must place the control on the end of the socket read
4307 	 * queue AND increment sb_cc so that select will work properly on
4308 	 * read.
4309 	 */
4310 	struct mbuf *m, *prev = NULL;
4311 
4312 	if (inp == NULL) {
4313 		/* Gak, TSNH!! */
4314 #ifdef INVARIANTS
4315 		panic("Gak, inp NULL on add_to_readq");
4316 #endif
4317 		return;
4318 	}
4319 	if (inp_read_lock_held == 0)
4320 		SCTP_INP_READ_LOCK(inp);
4321 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4322 		sctp_free_remote_addr(control->whoFrom);
4323 		if (control->data) {
4324 			sctp_m_freem(control->data);
4325 			control->data = NULL;
4326 		}
4327 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4328 		if (inp_read_lock_held == 0)
4329 			SCTP_INP_READ_UNLOCK(inp);
4330 		return;
4331 	}
4332 	if (!(control->spec_flags & M_NOTIFICATION)) {
4333 		atomic_add_int(&inp->total_recvs, 1);
4334 		if (!control->do_not_ref_stcb) {
4335 			atomic_add_int(&stcb->total_recvs, 1);
4336 		}
4337 	}
4338 	m = control->data;
4339 	control->held_length = 0;
4340 	control->length = 0;
4341 	while (m) {
4342 		if (SCTP_BUF_LEN(m) == 0) {
4343 			/* Skip mbufs with NO length */
4344 			if (prev == NULL) {
4345 				/* First one */
4346 				control->data = sctp_m_free(m);
4347 				m = control->data;
4348 			} else {
4349 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4350 				m = SCTP_BUF_NEXT(prev);
4351 			}
4352 			if (m == NULL) {
4353 				control->tail_mbuf = prev;
4354 			}
4355 			continue;
4356 		}
4357 		prev = m;
4358 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4359 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4360 		}
4361 		sctp_sballoc(stcb, sb, m);
4362 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4363 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4364 		}
4365 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4366 		m = SCTP_BUF_NEXT(m);
4367 	}
4368 	if (prev != NULL) {
4369 		control->tail_mbuf = prev;
4370 	} else {
4371 		/* Everything got collapsed out?? */
4372 		sctp_free_remote_addr(control->whoFrom);
4373 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4374 		if (inp_read_lock_held == 0)
4375 			SCTP_INP_READ_UNLOCK(inp);
4376 		return;
4377 	}
4378 	if (end) {
4379 		control->end_added = 1;
4380 	}
4381 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4382 	if (inp_read_lock_held == 0)
4383 		SCTP_INP_READ_UNLOCK(inp);
4384 	if (inp && inp->sctp_socket) {
4385 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4386 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4387 		} else {
4388 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4389 			struct socket *so;
4390 
4391 			so = SCTP_INP_SO(inp);
4392 			if (!so_locked) {
4393 				atomic_add_int(&stcb->asoc.refcnt, 1);
4394 				SCTP_TCB_UNLOCK(stcb);
4395 				SCTP_SOCKET_LOCK(so, 1);
4396 				SCTP_TCB_LOCK(stcb);
4397 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4398 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4399 					SCTP_SOCKET_UNLOCK(so, 1);
4400 					return;
4401 				}
4402 			}
4403 #endif
4404 			sctp_sorwakeup(inp, inp->sctp_socket);
4405 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4406 			if (!so_locked) {
4407 				SCTP_SOCKET_UNLOCK(so, 1);
4408 			}
4409 #endif
4410 		}
4411 	}
4412 }
4413 
4414 
4415 int
4416 sctp_append_to_readq(struct sctp_inpcb *inp,
4417     struct sctp_tcb *stcb,
4418     struct sctp_queued_to_read *control,
4419     struct mbuf *m,
4420     int end,
4421     int ctls_cumack,
4422     struct sockbuf *sb)
4423 {
4424 	/*
4425 	 * A partial delivery API event is underway. OR we are appending on
4426 	 * the reassembly queue.
4427 	 *
4428 	 * If PDAPI this means we need to add m to the end of the data.
4429 	 * Increase the length in the control AND increment the sb_cc.
4430 	 * Otherwise sb is NULL and all we need to do is put it at the end
4431 	 * of the mbuf chain.
4432 	 */
4433 	int len = 0;
4434 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4435 
4436 	if (inp) {
4437 		SCTP_INP_READ_LOCK(inp);
4438 	}
4439 	if (control == NULL) {
4440 get_out:
4441 		if (inp) {
4442 			SCTP_INP_READ_UNLOCK(inp);
4443 		}
4444 		return (-1);
4445 	}
4446 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4447 		SCTP_INP_READ_UNLOCK(inp);
4448 		return 0;
4449 	}
4450 	if (control->end_added) {
4451 		/* huh this one is complete? */
4452 		goto get_out;
4453 	}
4454 	mm = m;
4455 	if (mm == NULL) {
4456 		goto get_out;
4457 	}
4458 	while (mm) {
4459 		if (SCTP_BUF_LEN(mm) == 0) {
4460 			/* Skip mbufs with NO lenght */
4461 			if (prev == NULL) {
4462 				/* First one */
4463 				m = sctp_m_free(mm);
4464 				mm = m;
4465 			} else {
4466 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4467 				mm = SCTP_BUF_NEXT(prev);
4468 			}
4469 			continue;
4470 		}
4471 		prev = mm;
4472 		len += SCTP_BUF_LEN(mm);
4473 		if (sb) {
4474 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4475 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4476 			}
4477 			sctp_sballoc(stcb, sb, mm);
4478 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4479 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4480 			}
4481 		}
4482 		mm = SCTP_BUF_NEXT(mm);
4483 	}
4484 	if (prev) {
4485 		tail = prev;
4486 	} else {
4487 		/* Really there should always be a prev */
4488 		if (m == NULL) {
4489 			/* Huh nothing left? */
4490 #ifdef INVARIANTS
4491 			panic("Nothing left to add?");
4492 #else
4493 			goto get_out;
4494 #endif
4495 		}
4496 		tail = m;
4497 	}
4498 	if (control->tail_mbuf) {
4499 		/* append */
4500 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4501 		control->tail_mbuf = tail;
4502 	} else {
4503 		/* nothing there */
4504 #ifdef INVARIANTS
4505 		if (control->data != NULL) {
4506 			panic("This should NOT happen");
4507 		}
4508 #endif
4509 		control->data = m;
4510 		control->tail_mbuf = tail;
4511 	}
4512 	atomic_add_int(&control->length, len);
4513 	if (end) {
4514 		/* message is complete */
4515 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4516 			stcb->asoc.control_pdapi = NULL;
4517 		}
4518 		control->held_length = 0;
4519 		control->end_added = 1;
4520 	}
4521 	if (stcb == NULL) {
4522 		control->do_not_ref_stcb = 1;
4523 	}
4524 	/*
4525 	 * When we are appending in partial delivery, the cum-ack is used
4526 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4527 	 * is populated in the outbound sinfo structure from the true cumack
4528 	 * if the association exists...
4529 	 */
4530 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4531 	if (inp) {
4532 		SCTP_INP_READ_UNLOCK(inp);
4533 	}
4534 	if (inp && inp->sctp_socket) {
4535 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4536 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4537 		} else {
4538 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4539 			struct socket *so;
4540 
4541 			so = SCTP_INP_SO(inp);
4542 			atomic_add_int(&stcb->asoc.refcnt, 1);
4543 			SCTP_TCB_UNLOCK(stcb);
4544 			SCTP_SOCKET_LOCK(so, 1);
4545 			SCTP_TCB_LOCK(stcb);
4546 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4547 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4548 				SCTP_SOCKET_UNLOCK(so, 1);
4549 				return (0);
4550 			}
4551 #endif
4552 			sctp_sorwakeup(inp, inp->sctp_socket);
4553 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4554 			SCTP_SOCKET_UNLOCK(so, 1);
4555 #endif
4556 		}
4557 	}
4558 	return (0);
4559 }
4560 
4561 
4562 
4563 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4564  *************ALTERNATE ROUTING CODE
4565  */
4566 
4567 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4568  *************ALTERNATE ROUTING CODE
4569  */
4570 
4571 struct mbuf *
4572 sctp_generate_invmanparam(int err)
4573 {
4574 	/* Return a MBUF with a invalid mandatory parameter */
4575 	struct mbuf *m;
4576 
4577 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4578 	if (m) {
4579 		struct sctp_paramhdr *ph;
4580 
4581 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4582 		ph = mtod(m, struct sctp_paramhdr *);
4583 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4584 		ph->param_type = htons(err);
4585 	}
4586 	return (m);
4587 }
4588 
4589 #ifdef SCTP_MBCNT_LOGGING
4590 void
4591 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4592     struct sctp_tmit_chunk *tp1, int chk_cnt)
4593 {
4594 	if (tp1->data == NULL) {
4595 		return;
4596 	}
4597 	asoc->chunks_on_out_queue -= chk_cnt;
4598 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4599 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4600 		    asoc->total_output_queue_size,
4601 		    tp1->book_size,
4602 		    0,
4603 		    tp1->mbcnt);
4604 	}
4605 	if (asoc->total_output_queue_size >= tp1->book_size) {
4606 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4607 	} else {
4608 		asoc->total_output_queue_size = 0;
4609 	}
4610 
4611 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4612 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4613 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4614 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4615 		} else {
4616 			stcb->sctp_socket->so_snd.sb_cc = 0;
4617 
4618 		}
4619 	}
4620 }
4621 
4622 #endif
4623 
4624 int
4625 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4626     int reason, int so_locked
4627 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4628     SCTP_UNUSED
4629 #endif
4630 )
4631 {
4632 	struct sctp_stream_out *strq;
4633 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4634 	struct sctp_stream_queue_pending *sp;
4635 	uint16_t stream = 0, seq = 0;
4636 	uint8_t foundeom = 0;
4637 	int ret_sz = 0;
4638 	int notdone;
4639 	int do_wakeup_routine = 0;
4640 
4641 	stream = tp1->rec.data.stream_number;
4642 	seq = tp1->rec.data.stream_seq;
4643 	do {
4644 		ret_sz += tp1->book_size;
4645 		if (tp1->data != NULL) {
4646 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4647 				sctp_flight_size_decrease(tp1);
4648 				sctp_total_flight_decrease(stcb, tp1);
4649 			}
4650 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4651 			stcb->asoc.peers_rwnd += tp1->send_size;
4652 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4653 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4654 			if (tp1->data) {
4655 				sctp_m_freem(tp1->data);
4656 				tp1->data = NULL;
4657 			}
4658 			do_wakeup_routine = 1;
4659 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4660 				stcb->asoc.sent_queue_cnt_removeable--;
4661 			}
4662 		}
4663 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4664 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4665 		    SCTP_DATA_NOT_FRAG) {
4666 			/* not frag'ed we ae done   */
4667 			notdone = 0;
4668 			foundeom = 1;
4669 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4670 			/* end of frag, we are done */
4671 			notdone = 0;
4672 			foundeom = 1;
4673 		} else {
4674 			/*
4675 			 * Its a begin or middle piece, we must mark all of
4676 			 * it
4677 			 */
4678 			notdone = 1;
4679 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4680 		}
4681 	} while (tp1 && notdone);
4682 	if (foundeom == 0) {
4683 		/*
4684 		 * The multi-part message was scattered across the send and
4685 		 * sent queue.
4686 		 */
4687 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4688 			if ((tp1->rec.data.stream_number != stream) ||
4689 			    (tp1->rec.data.stream_seq != seq)) {
4690 				break;
4691 			}
4692 			/*
4693 			 * save to chk in case we have some on stream out
4694 			 * queue. If so and we have an un-transmitted one we
4695 			 * don't have to fudge the TSN.
4696 			 */
4697 			chk = tp1;
4698 			ret_sz += tp1->book_size;
4699 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4700 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4701 			if (tp1->data) {
4702 				sctp_m_freem(tp1->data);
4703 				tp1->data = NULL;
4704 			}
4705 			/* No flight involved here book the size to 0 */
4706 			tp1->book_size = 0;
4707 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4708 				foundeom = 1;
4709 			}
4710 			do_wakeup_routine = 1;
4711 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4712 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4713 			/*
4714 			 * on to the sent queue so we can wait for it to be
4715 			 * passed by.
4716 			 */
4717 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4718 			    sctp_next);
4719 			stcb->asoc.send_queue_cnt--;
4720 			stcb->asoc.sent_queue_cnt++;
4721 		}
4722 	}
4723 	if (foundeom == 0) {
4724 		/*
4725 		 * Still no eom found. That means there is stuff left on the
4726 		 * stream out queue.. yuck.
4727 		 */
4728 		strq = &stcb->asoc.strmout[stream];
4729 		SCTP_TCB_SEND_LOCK(stcb);
4730 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4731 			/* FIXME: Shouldn't this be a serial number check? */
4732 			if (sp->strseq > seq) {
4733 				break;
4734 			}
4735 			/* Check if its our SEQ */
4736 			if (sp->strseq == seq) {
4737 				sp->discard_rest = 1;
4738 				/*
4739 				 * We may need to put a chunk on the queue
4740 				 * that holds the TSN that would have been
4741 				 * sent with the LAST bit.
4742 				 */
4743 				if (chk == NULL) {
4744 					/* Yep, we have to */
4745 					sctp_alloc_a_chunk(stcb, chk);
4746 					if (chk == NULL) {
4747 						/*
4748 						 * we are hosed. All we can
4749 						 * do is nothing.. which
4750 						 * will cause an abort if
4751 						 * the peer is paying
4752 						 * attention.
4753 						 */
4754 						goto oh_well;
4755 					}
4756 					memset(chk, 0, sizeof(*chk));
4757 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4758 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4759 					chk->asoc = &stcb->asoc;
4760 					chk->rec.data.stream_seq = sp->strseq;
4761 					chk->rec.data.stream_number = sp->stream;
4762 					chk->rec.data.payloadtype = sp->ppid;
4763 					chk->rec.data.context = sp->context;
4764 					chk->flags = sp->act_flags;
4765 					if (sp->net)
4766 						chk->whoTo = sp->net;
4767 					else
4768 						chk->whoTo = stcb->asoc.primary_destination;
4769 					atomic_add_int(&chk->whoTo->ref_count, 1);
4770 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4771 					stcb->asoc.pr_sctp_cnt++;
4772 					chk->pr_sctp_on = 1;
4773 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4774 					stcb->asoc.sent_queue_cnt++;
4775 					stcb->asoc.pr_sctp_cnt++;
4776 				} else {
4777 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4778 				}
4779 		oh_well:
4780 				if (sp->data) {
4781 					/*
4782 					 * Pull any data to free up the SB
4783 					 * and allow sender to "add more"
4784 					 * whilc we will throw away :-)
4785 					 */
4786 					sctp_free_spbufspace(stcb, &stcb->asoc,
4787 					    sp);
4788 					ret_sz += sp->length;
4789 					do_wakeup_routine = 1;
4790 					sp->some_taken = 1;
4791 					sctp_m_freem(sp->data);
4792 					sp->length = 0;
4793 					sp->data = NULL;
4794 					sp->tail_mbuf = NULL;
4795 				}
4796 				break;
4797 			}
4798 		}		/* End tailq_foreach */
4799 		SCTP_TCB_SEND_UNLOCK(stcb);
4800 	}
4801 	if (do_wakeup_routine) {
4802 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4803 		struct socket *so;
4804 
4805 		so = SCTP_INP_SO(stcb->sctp_ep);
4806 		if (!so_locked) {
4807 			atomic_add_int(&stcb->asoc.refcnt, 1);
4808 			SCTP_TCB_UNLOCK(stcb);
4809 			SCTP_SOCKET_LOCK(so, 1);
4810 			SCTP_TCB_LOCK(stcb);
4811 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4812 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4813 				/* assoc was freed while we were unlocked */
4814 				SCTP_SOCKET_UNLOCK(so, 1);
4815 				return (ret_sz);
4816 			}
4817 		}
4818 #endif
4819 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4820 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4821 		if (!so_locked) {
4822 			SCTP_SOCKET_UNLOCK(so, 1);
4823 		}
4824 #endif
4825 	}
4826 	return (ret_sz);
4827 }
4828 
4829 /*
4830  * checks to see if the given address, sa, is one that is currently known by
4831  * the kernel note: can't distinguish the same address on multiple interfaces
4832  * and doesn't handle multiple addresses with different zone/scope id's note:
4833  * ifa_ifwithaddr() compares the entire sockaddr struct
4834  */
4835 struct sctp_ifa *
4836 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4837     int holds_lock)
4838 {
4839 	struct sctp_laddr *laddr;
4840 
4841 	if (holds_lock == 0) {
4842 		SCTP_INP_RLOCK(inp);
4843 	}
4844 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4845 		if (laddr->ifa == NULL)
4846 			continue;
4847 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4848 			continue;
4849 		if (addr->sa_family == AF_INET) {
4850 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4851 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4852 				/* found him. */
4853 				if (holds_lock == 0) {
4854 					SCTP_INP_RUNLOCK(inp);
4855 				}
4856 				return (laddr->ifa);
4857 				break;
4858 			}
4859 		}
4860 #ifdef INET6
4861 		if (addr->sa_family == AF_INET6) {
4862 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4863 			    &laddr->ifa->address.sin6)) {
4864 				/* found him. */
4865 				if (holds_lock == 0) {
4866 					SCTP_INP_RUNLOCK(inp);
4867 				}
4868 				return (laddr->ifa);
4869 				break;
4870 			}
4871 		}
4872 #endif
4873 	}
4874 	if (holds_lock == 0) {
4875 		SCTP_INP_RUNLOCK(inp);
4876 	}
4877 	return (NULL);
4878 }
4879 
4880 uint32_t
4881 sctp_get_ifa_hash_val(struct sockaddr *addr)
4882 {
4883 	if (addr->sa_family == AF_INET) {
4884 		struct sockaddr_in *sin;
4885 
4886 		sin = (struct sockaddr_in *)addr;
4887 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4888 	} else if (addr->sa_family == AF_INET6) {
4889 		struct sockaddr_in6 *sin6;
4890 		uint32_t hash_of_addr;
4891 
4892 		sin6 = (struct sockaddr_in6 *)addr;
4893 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4894 		    sin6->sin6_addr.s6_addr32[1] +
4895 		    sin6->sin6_addr.s6_addr32[2] +
4896 		    sin6->sin6_addr.s6_addr32[3]);
4897 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4898 		return (hash_of_addr);
4899 	}
4900 	return (0);
4901 }
4902 
4903 struct sctp_ifa *
4904 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4905 {
4906 	struct sctp_ifa *sctp_ifap;
4907 	struct sctp_vrf *vrf;
4908 	struct sctp_ifalist *hash_head;
4909 	uint32_t hash_of_addr;
4910 
4911 	if (holds_lock == 0)
4912 		SCTP_IPI_ADDR_RLOCK();
4913 
4914 	vrf = sctp_find_vrf(vrf_id);
4915 	if (vrf == NULL) {
4916 stage_right:
4917 		if (holds_lock == 0)
4918 			SCTP_IPI_ADDR_RUNLOCK();
4919 		return (NULL);
4920 	}
4921 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4922 
4923 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4924 	if (hash_head == NULL) {
4925 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4926 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4927 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4928 		sctp_print_address(addr);
4929 		SCTP_PRINTF("No such bucket for address\n");
4930 		if (holds_lock == 0)
4931 			SCTP_IPI_ADDR_RUNLOCK();
4932 
4933 		return (NULL);
4934 	}
4935 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4936 		if (sctp_ifap == NULL) {
4937 #ifdef INVARIANTS
4938 			panic("Huh LIST_FOREACH corrupt");
4939 			goto stage_right;
4940 #else
4941 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4942 			goto stage_right;
4943 #endif
4944 		}
4945 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4946 			continue;
4947 		if (addr->sa_family == AF_INET) {
4948 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4949 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4950 				/* found him. */
4951 				if (holds_lock == 0)
4952 					SCTP_IPI_ADDR_RUNLOCK();
4953 				return (sctp_ifap);
4954 				break;
4955 			}
4956 		}
4957 #ifdef INET6
4958 		if (addr->sa_family == AF_INET6) {
4959 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4960 			    &sctp_ifap->address.sin6)) {
4961 				/* found him. */
4962 				if (holds_lock == 0)
4963 					SCTP_IPI_ADDR_RUNLOCK();
4964 				return (sctp_ifap);
4965 				break;
4966 			}
4967 		}
4968 #endif
4969 	}
4970 	if (holds_lock == 0)
4971 		SCTP_IPI_ADDR_RUNLOCK();
4972 	return (NULL);
4973 }
4974 
4975 static void
4976 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4977     uint32_t rwnd_req)
4978 {
4979 	/* User pulled some data, do we need a rwnd update? */
4980 	int r_unlocked = 0;
4981 	uint32_t dif, rwnd;
4982 	struct socket *so = NULL;
4983 
4984 	if (stcb == NULL)
4985 		return;
4986 
4987 	atomic_add_int(&stcb->asoc.refcnt, 1);
4988 
4989 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4990 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4991 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4992 		/* Pre-check If we are freeing no update */
4993 		goto no_lock;
4994 	}
4995 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4996 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4997 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4998 		goto out;
4999 	}
5000 	so = stcb->sctp_socket;
5001 	if (so == NULL) {
5002 		goto out;
5003 	}
5004 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5005 	/* Have you have freed enough to look */
5006 	*freed_so_far = 0;
5007 	/* Yep, its worth a look and the lock overhead */
5008 
5009 	/* Figure out what the rwnd would be */
5010 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5011 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5012 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5013 	} else {
5014 		dif = 0;
5015 	}
5016 	if (dif >= rwnd_req) {
5017 		if (hold_rlock) {
5018 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5019 			r_unlocked = 1;
5020 		}
5021 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5022 			/*
5023 			 * One last check before we allow the guy possibly
5024 			 * to get in. There is a race, where the guy has not
5025 			 * reached the gate. In that case
5026 			 */
5027 			goto out;
5028 		}
5029 		SCTP_TCB_LOCK(stcb);
5030 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5031 			/* No reports here */
5032 			SCTP_TCB_UNLOCK(stcb);
5033 			goto out;
5034 		}
5035 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5036 		sctp_send_sack(stcb);
5037 
5038 		sctp_chunk_output(stcb->sctp_ep, stcb,
5039 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5040 		/* make sure no timer is running */
5041 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5042 		SCTP_TCB_UNLOCK(stcb);
5043 	} else {
5044 		/* Update how much we have pending */
5045 		stcb->freed_by_sorcv_sincelast = dif;
5046 	}
5047 out:
5048 	if (so && r_unlocked && hold_rlock) {
5049 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5050 	}
5051 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5052 no_lock:
5053 	atomic_add_int(&stcb->asoc.refcnt, -1);
5054 	return;
5055 }
5056 
5057 int
5058 sctp_sorecvmsg(struct socket *so,
5059     struct uio *uio,
5060     struct mbuf **mp,
5061     struct sockaddr *from,
5062     int fromlen,
5063     int *msg_flags,
5064     struct sctp_sndrcvinfo *sinfo,
5065     int filling_sinfo)
5066 {
5067 	/*
5068 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5069 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5070 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5071 	 * On the way out we may send out any combination of:
5072 	 * MSG_NOTIFICATION MSG_EOR
5073 	 *
5074 	 */
5075 	struct sctp_inpcb *inp = NULL;
5076 	int my_len = 0;
5077 	int cp_len = 0, error = 0;
5078 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5079 	struct mbuf *m = NULL;
5080 	struct sctp_tcb *stcb = NULL;
5081 	int wakeup_read_socket = 0;
5082 	int freecnt_applied = 0;
5083 	int out_flags = 0, in_flags = 0;
5084 	int block_allowed = 1;
5085 	uint32_t freed_so_far = 0;
5086 	uint32_t copied_so_far = 0;
5087 	int in_eeor_mode = 0;
5088 	int no_rcv_needed = 0;
5089 	uint32_t rwnd_req = 0;
5090 	int hold_sblock = 0;
5091 	int hold_rlock = 0;
5092 	int slen = 0;
5093 	uint32_t held_length = 0;
5094 	int sockbuf_lock = 0;
5095 
5096 	if (uio == NULL) {
5097 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5098 		return (EINVAL);
5099 	}
5100 	if (msg_flags) {
5101 		in_flags = *msg_flags;
5102 		if (in_flags & MSG_PEEK)
5103 			SCTP_STAT_INCR(sctps_read_peeks);
5104 	} else {
5105 		in_flags = 0;
5106 	}
5107 	slen = uio->uio_resid;
5108 
5109 	/* Pull in and set up our int flags */
5110 	if (in_flags & MSG_OOB) {
5111 		/* Out of band's NOT supported */
5112 		return (EOPNOTSUPP);
5113 	}
5114 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5115 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5116 		return (EINVAL);
5117 	}
5118 	if ((in_flags & (MSG_DONTWAIT
5119 	    | MSG_NBIO
5120 	    )) ||
5121 	    SCTP_SO_IS_NBIO(so)) {
5122 		block_allowed = 0;
5123 	}
5124 	/* setup the endpoint */
5125 	inp = (struct sctp_inpcb *)so->so_pcb;
5126 	if (inp == NULL) {
5127 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5128 		return (EFAULT);
5129 	}
5130 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5131 	/* Must be at least a MTU's worth */
5132 	if (rwnd_req < SCTP_MIN_RWND)
5133 		rwnd_req = SCTP_MIN_RWND;
5134 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5135 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5136 		sctp_misc_ints(SCTP_SORECV_ENTER,
5137 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5138 	}
5139 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5140 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5141 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5142 	}
5143 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5144 	sockbuf_lock = 1;
5145 	if (error) {
5146 		goto release_unlocked;
5147 	}
5148 restart:
5149 
5150 
5151 restart_nosblocks:
5152 	if (hold_sblock == 0) {
5153 		SOCKBUF_LOCK(&so->so_rcv);
5154 		hold_sblock = 1;
5155 	}
5156 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5157 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5158 		goto out;
5159 	}
5160 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5161 		if (so->so_error) {
5162 			error = so->so_error;
5163 			if ((in_flags & MSG_PEEK) == 0)
5164 				so->so_error = 0;
5165 			goto out;
5166 		} else {
5167 			if (so->so_rcv.sb_cc == 0) {
5168 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5169 				/* indicate EOF */
5170 				error = 0;
5171 				goto out;
5172 			}
5173 		}
5174 	}
5175 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5176 		/* we need to wait for data */
5177 		if ((so->so_rcv.sb_cc == 0) &&
5178 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5179 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5180 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5181 				/*
5182 				 * For active open side clear flags for
5183 				 * re-use passive open is blocked by
5184 				 * connect.
5185 				 */
5186 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5187 					/*
5188 					 * You were aborted, passive side
5189 					 * always hits here
5190 					 */
5191 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5192 					error = ECONNRESET;
5193 					/*
5194 					 * You get this once if you are
5195 					 * active open side
5196 					 */
5197 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5198 						/*
5199 						 * Remove flag if on the
5200 						 * active open side
5201 						 */
5202 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5203 					}
5204 				}
5205 				so->so_state &= ~(SS_ISCONNECTING |
5206 				    SS_ISDISCONNECTING |
5207 				    SS_ISCONFIRMING |
5208 				    SS_ISCONNECTED);
5209 				if (error == 0) {
5210 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5211 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5212 						error = ENOTCONN;
5213 					} else {
5214 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5215 					}
5216 				}
5217 				goto out;
5218 			}
5219 		}
5220 		error = sbwait(&so->so_rcv);
5221 		if (error) {
5222 			goto out;
5223 		}
5224 		held_length = 0;
5225 		goto restart_nosblocks;
5226 	} else if (so->so_rcv.sb_cc == 0) {
5227 		if (so->so_error) {
5228 			error = so->so_error;
5229 			if ((in_flags & MSG_PEEK) == 0)
5230 				so->so_error = 0;
5231 		} else {
5232 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5233 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5234 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5235 					/*
5236 					 * For active open side clear flags
5237 					 * for re-use passive open is
5238 					 * blocked by connect.
5239 					 */
5240 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5241 						/*
5242 						 * You were aborted, passive
5243 						 * side always hits here
5244 						 */
5245 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5246 						error = ECONNRESET;
5247 						/*
5248 						 * You get this once if you
5249 						 * are active open side
5250 						 */
5251 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5252 							/*
5253 							 * Remove flag if on
5254 							 * the active open
5255 							 * side
5256 							 */
5257 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5258 						}
5259 					}
5260 					so->so_state &= ~(SS_ISCONNECTING |
5261 					    SS_ISDISCONNECTING |
5262 					    SS_ISCONFIRMING |
5263 					    SS_ISCONNECTED);
5264 					if (error == 0) {
5265 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5266 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5267 							error = ENOTCONN;
5268 						} else {
5269 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5270 						}
5271 					}
5272 					goto out;
5273 				}
5274 			}
5275 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5276 			error = EWOULDBLOCK;
5277 		}
5278 		goto out;
5279 	}
5280 	if (hold_sblock == 1) {
5281 		SOCKBUF_UNLOCK(&so->so_rcv);
5282 		hold_sblock = 0;
5283 	}
5284 	/* we possibly have data we can read */
5285 	/* sa_ignore FREED_MEMORY */
5286 	control = TAILQ_FIRST(&inp->read_queue);
5287 	if (control == NULL) {
5288 		/*
5289 		 * This could be happening since the appender did the
5290 		 * increment but as not yet did the tailq insert onto the
5291 		 * read_queue
5292 		 */
5293 		if (hold_rlock == 0) {
5294 			SCTP_INP_READ_LOCK(inp);
5295 			hold_rlock = 1;
5296 		}
5297 		control = TAILQ_FIRST(&inp->read_queue);
5298 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5299 #ifdef INVARIANTS
5300 			panic("Huh, its non zero and nothing on control?");
5301 #endif
5302 			so->so_rcv.sb_cc = 0;
5303 		}
5304 		SCTP_INP_READ_UNLOCK(inp);
5305 		hold_rlock = 0;
5306 		goto restart;
5307 	}
5308 	if ((control->length == 0) &&
5309 	    (control->do_not_ref_stcb)) {
5310 		/*
5311 		 * Clean up code for freeing assoc that left behind a
5312 		 * pdapi.. maybe a peer in EEOR that just closed after
5313 		 * sending and never indicated a EOR.
5314 		 */
5315 		if (hold_rlock == 0) {
5316 			hold_rlock = 1;
5317 			SCTP_INP_READ_LOCK(inp);
5318 		}
5319 		control->held_length = 0;
5320 		if (control->data) {
5321 			/* Hmm there is data here .. fix */
5322 			struct mbuf *m_tmp;
5323 			int cnt = 0;
5324 
5325 			m_tmp = control->data;
5326 			while (m_tmp) {
5327 				cnt += SCTP_BUF_LEN(m_tmp);
5328 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5329 					control->tail_mbuf = m_tmp;
5330 					control->end_added = 1;
5331 				}
5332 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5333 			}
5334 			control->length = cnt;
5335 		} else {
5336 			/* remove it */
5337 			TAILQ_REMOVE(&inp->read_queue, control, next);
5338 			/* Add back any hiddend data */
5339 			sctp_free_remote_addr(control->whoFrom);
5340 			sctp_free_a_readq(stcb, control);
5341 		}
5342 		if (hold_rlock) {
5343 			hold_rlock = 0;
5344 			SCTP_INP_READ_UNLOCK(inp);
5345 		}
5346 		goto restart;
5347 	}
5348 	if ((control->length == 0) &&
5349 	    (control->end_added == 1)) {
5350 		/*
5351 		 * Do we also need to check for (control->pdapi_aborted ==
5352 		 * 1)?
5353 		 */
5354 		if (hold_rlock == 0) {
5355 			hold_rlock = 1;
5356 			SCTP_INP_READ_LOCK(inp);
5357 		}
5358 		TAILQ_REMOVE(&inp->read_queue, control, next);
5359 		if (control->data) {
5360 #ifdef INVARIANTS
5361 			panic("control->data not null but control->length == 0");
5362 #else
5363 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5364 			sctp_m_freem(control->data);
5365 			control->data = NULL;
5366 #endif
5367 		}
5368 		if (control->aux_data) {
5369 			sctp_m_free(control->aux_data);
5370 			control->aux_data = NULL;
5371 		}
5372 		sctp_free_remote_addr(control->whoFrom);
5373 		sctp_free_a_readq(stcb, control);
5374 		if (hold_rlock) {
5375 			hold_rlock = 0;
5376 			SCTP_INP_READ_UNLOCK(inp);
5377 		}
5378 		goto restart;
5379 	}
5380 	if (control->length == 0) {
5381 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5382 		    (filling_sinfo)) {
5383 			/* find a more suitable one then this */
5384 			ctl = TAILQ_NEXT(control, next);
5385 			while (ctl) {
5386 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5387 				    (ctl->some_taken ||
5388 				    (ctl->spec_flags & M_NOTIFICATION) ||
5389 				    ((ctl->do_not_ref_stcb == 0) &&
5390 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5391 				    ) {
5392 					/*-
5393 					 * If we have a different TCB next, and there is data
5394 					 * present. If we have already taken some (pdapi), OR we can
5395 					 * ref the tcb and no delivery as started on this stream, we
5396 					 * take it. Note we allow a notification on a different
5397 					 * assoc to be delivered..
5398 					 */
5399 					control = ctl;
5400 					goto found_one;
5401 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5402 					    (ctl->length) &&
5403 					    ((ctl->some_taken) ||
5404 					    ((ctl->do_not_ref_stcb == 0) &&
5405 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5406 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5407 					/*-
5408 					 * If we have the same tcb, and there is data present, and we
5409 					 * have the strm interleave feature present. Then if we have
5410 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5411 					 * not started a delivery for this stream, we can take it.
5412 					 * Note we do NOT allow a notificaiton on the same assoc to
5413 					 * be delivered.
5414 					 */
5415 					control = ctl;
5416 					goto found_one;
5417 				}
5418 				ctl = TAILQ_NEXT(ctl, next);
5419 			}
5420 		}
5421 		/*
5422 		 * if we reach here, not suitable replacement is available
5423 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5424 		 * into the our held count, and its time to sleep again.
5425 		 */
5426 		held_length = so->so_rcv.sb_cc;
5427 		control->held_length = so->so_rcv.sb_cc;
5428 		goto restart;
5429 	}
5430 	/* Clear the held length since there is something to read */
5431 	control->held_length = 0;
5432 	if (hold_rlock) {
5433 		SCTP_INP_READ_UNLOCK(inp);
5434 		hold_rlock = 0;
5435 	}
5436 found_one:
5437 	/*
5438 	 * If we reach here, control has a some data for us to read off.
5439 	 * Note that stcb COULD be NULL.
5440 	 */
5441 	control->some_taken++;
5442 	if (hold_sblock) {
5443 		SOCKBUF_UNLOCK(&so->so_rcv);
5444 		hold_sblock = 0;
5445 	}
5446 	stcb = control->stcb;
5447 	if (stcb) {
5448 		if ((control->do_not_ref_stcb == 0) &&
5449 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5450 			if (freecnt_applied == 0)
5451 				stcb = NULL;
5452 		} else if (control->do_not_ref_stcb == 0) {
5453 			/* you can't free it on me please */
5454 			/*
5455 			 * The lock on the socket buffer protects us so the
5456 			 * free code will stop. But since we used the
5457 			 * socketbuf lock and the sender uses the tcb_lock
5458 			 * to increment, we need to use the atomic add to
5459 			 * the refcnt
5460 			 */
5461 			if (freecnt_applied) {
5462 #ifdef INVARIANTS
5463 				panic("refcnt already incremented");
5464 #else
5465 				printf("refcnt already incremented?\n");
5466 #endif
5467 			} else {
5468 				atomic_add_int(&stcb->asoc.refcnt, 1);
5469 				freecnt_applied = 1;
5470 			}
5471 			/*
5472 			 * Setup to remember how much we have not yet told
5473 			 * the peer our rwnd has opened up. Note we grab the
5474 			 * value from the tcb from last time. Note too that
5475 			 * sack sending clears this when a sack is sent,
5476 			 * which is fine. Once we hit the rwnd_req, we then
5477 			 * will go to the sctp_user_rcvd() that will not
5478 			 * lock until it KNOWs it MUST send a WUP-SACK.
5479 			 */
5480 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5481 			stcb->freed_by_sorcv_sincelast = 0;
5482 		}
5483 	}
5484 	if (stcb &&
5485 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5486 	    control->do_not_ref_stcb == 0) {
5487 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5488 	}
5489 	/* First lets get off the sinfo and sockaddr info */
5490 	if ((sinfo) && filling_sinfo) {
5491 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5492 		nxt = TAILQ_NEXT(control, next);
5493 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5494 			struct sctp_extrcvinfo *s_extra;
5495 
5496 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5497 			if ((nxt) &&
5498 			    (nxt->length)) {
5499 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5500 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5501 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5502 				}
5503 				if (nxt->spec_flags & M_NOTIFICATION) {
5504 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5505 				}
5506 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5507 				s_extra->sreinfo_next_length = nxt->length;
5508 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5509 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5510 				if (nxt->tail_mbuf != NULL) {
5511 					if (nxt->end_added) {
5512 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5513 					}
5514 				}
5515 			} else {
5516 				/*
5517 				 * we explicitly 0 this, since the memcpy
5518 				 * got some other things beyond the older
5519 				 * sinfo_ that is on the control's structure
5520 				 * :-D
5521 				 */
5522 				nxt = NULL;
5523 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5524 				s_extra->sreinfo_next_aid = 0;
5525 				s_extra->sreinfo_next_length = 0;
5526 				s_extra->sreinfo_next_ppid = 0;
5527 				s_extra->sreinfo_next_stream = 0;
5528 			}
5529 		}
5530 		/*
5531 		 * update off the real current cum-ack, if we have an stcb.
5532 		 */
5533 		if ((control->do_not_ref_stcb == 0) && stcb)
5534 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5535 		/*
5536 		 * mask off the high bits, we keep the actual chunk bits in
5537 		 * there.
5538 		 */
5539 		sinfo->sinfo_flags &= 0x00ff;
5540 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5541 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5542 		}
5543 	}
5544 #ifdef SCTP_ASOCLOG_OF_TSNS
5545 	{
5546 		int index, newindex;
5547 		struct sctp_pcbtsn_rlog *entry;
5548 
5549 		do {
5550 			index = inp->readlog_index;
5551 			newindex = index + 1;
5552 			if (newindex >= SCTP_READ_LOG_SIZE) {
5553 				newindex = 0;
5554 			}
5555 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5556 		entry = &inp->readlog[index];
5557 		entry->vtag = control->sinfo_assoc_id;
5558 		entry->strm = control->sinfo_stream;
5559 		entry->seq = control->sinfo_ssn;
5560 		entry->sz = control->length;
5561 		entry->flgs = control->sinfo_flags;
5562 	}
5563 #endif
5564 	if (fromlen && from) {
5565 		struct sockaddr *to;
5566 
5567 #ifdef INET
5568 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5569 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5570 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5571 #else
5572 		/* No AF_INET use AF_INET6 */
5573 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5574 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5575 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5576 #endif
5577 
5578 		to = from;
5579 #if defined(INET) && defined(INET6)
5580 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5581 		    (to->sa_family == AF_INET) &&
5582 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5583 			struct sockaddr_in *sin;
5584 			struct sockaddr_in6 sin6;
5585 
5586 			sin = (struct sockaddr_in *)to;
5587 			bzero(&sin6, sizeof(sin6));
5588 			sin6.sin6_family = AF_INET6;
5589 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5590 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5591 			bcopy(&sin->sin_addr,
5592 			    &sin6.sin6_addr.s6_addr32[3],
5593 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5594 			sin6.sin6_port = sin->sin_port;
5595 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5596 		}
5597 #endif
5598 #if defined(INET6)
5599 		{
5600 			struct sockaddr_in6 lsa6, *to6;
5601 
5602 			to6 = (struct sockaddr_in6 *)to;
5603 			sctp_recover_scope_mac(to6, (&lsa6));
5604 		}
5605 #endif
5606 	}
5607 	/* now copy out what data we can */
5608 	if (mp == NULL) {
5609 		/* copy out each mbuf in the chain up to length */
5610 get_more_data:
5611 		m = control->data;
5612 		while (m) {
5613 			/* Move out all we can */
5614 			cp_len = (int)uio->uio_resid;
5615 			my_len = (int)SCTP_BUF_LEN(m);
5616 			if (cp_len > my_len) {
5617 				/* not enough in this buf */
5618 				cp_len = my_len;
5619 			}
5620 			if (hold_rlock) {
5621 				SCTP_INP_READ_UNLOCK(inp);
5622 				hold_rlock = 0;
5623 			}
5624 			if (cp_len > 0)
5625 				error = uiomove(mtod(m, char *), cp_len, uio);
5626 			/* re-read */
5627 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5628 				goto release;
5629 			}
5630 			if ((control->do_not_ref_stcb == 0) && stcb &&
5631 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5632 				no_rcv_needed = 1;
5633 			}
5634 			if (error) {
5635 				/* error we are out of here */
5636 				goto release;
5637 			}
5638 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5639 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5640 			    ((control->end_added == 0) ||
5641 			    (control->end_added &&
5642 			    (TAILQ_NEXT(control, next) == NULL)))
5643 			    ) {
5644 				SCTP_INP_READ_LOCK(inp);
5645 				hold_rlock = 1;
5646 			}
5647 			if (cp_len == SCTP_BUF_LEN(m)) {
5648 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5649 				    (control->end_added)) {
5650 					out_flags |= MSG_EOR;
5651 					if ((control->do_not_ref_stcb == 0) &&
5652 					    (control->stcb != NULL) &&
5653 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5654 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5655 				}
5656 				if (control->spec_flags & M_NOTIFICATION) {
5657 					out_flags |= MSG_NOTIFICATION;
5658 				}
5659 				/* we ate up the mbuf */
5660 				if (in_flags & MSG_PEEK) {
5661 					/* just looking */
5662 					m = SCTP_BUF_NEXT(m);
5663 					copied_so_far += cp_len;
5664 				} else {
5665 					/* dispose of the mbuf */
5666 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5667 						sctp_sblog(&so->so_rcv,
5668 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5669 					}
5670 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5671 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5672 						sctp_sblog(&so->so_rcv,
5673 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5674 					}
5675 					copied_so_far += cp_len;
5676 					freed_so_far += cp_len;
5677 					freed_so_far += MSIZE;
5678 					atomic_subtract_int(&control->length, cp_len);
5679 					control->data = sctp_m_free(m);
5680 					m = control->data;
5681 					/*
5682 					 * been through it all, must hold sb
5683 					 * lock ok to null tail
5684 					 */
5685 					if (control->data == NULL) {
5686 #ifdef INVARIANTS
5687 						if ((control->end_added == 0) ||
5688 						    (TAILQ_NEXT(control, next) == NULL)) {
5689 							/*
5690 							 * If the end is not
5691 							 * added, OR the
5692 							 * next is NOT null
5693 							 * we MUST have the
5694 							 * lock.
5695 							 */
5696 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5697 								panic("Hmm we don't own the lock?");
5698 							}
5699 						}
5700 #endif
5701 						control->tail_mbuf = NULL;
5702 #ifdef INVARIANTS
5703 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5704 							panic("end_added, nothing left and no MSG_EOR");
5705 						}
5706 #endif
5707 					}
5708 				}
5709 			} else {
5710 				/* Do we need to trim the mbuf? */
5711 				if (control->spec_flags & M_NOTIFICATION) {
5712 					out_flags |= MSG_NOTIFICATION;
5713 				}
5714 				if ((in_flags & MSG_PEEK) == 0) {
5715 					SCTP_BUF_RESV_UF(m, cp_len);
5716 					SCTP_BUF_LEN(m) -= cp_len;
5717 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5718 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5719 					}
5720 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5721 					if ((control->do_not_ref_stcb == 0) &&
5722 					    stcb) {
5723 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5724 					}
5725 					copied_so_far += cp_len;
5726 					freed_so_far += cp_len;
5727 					freed_so_far += MSIZE;
5728 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5729 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5730 						    SCTP_LOG_SBRESULT, 0);
5731 					}
5732 					atomic_subtract_int(&control->length, cp_len);
5733 				} else {
5734 					copied_so_far += cp_len;
5735 				}
5736 			}
5737 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5738 				break;
5739 			}
5740 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5741 			    (control->do_not_ref_stcb == 0) &&
5742 			    (freed_so_far >= rwnd_req)) {
5743 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5744 			}
5745 		}		/* end while(m) */
5746 		/*
5747 		 * At this point we have looked at it all and we either have
5748 		 * a MSG_EOR/or read all the user wants... <OR>
5749 		 * control->length == 0.
5750 		 */
5751 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5752 			/* we are done with this control */
5753 			if (control->length == 0) {
5754 				if (control->data) {
5755 #ifdef INVARIANTS
5756 					panic("control->data not null at read eor?");
5757 #else
5758 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5759 					sctp_m_freem(control->data);
5760 					control->data = NULL;
5761 #endif
5762 				}
5763 		done_with_control:
5764 				if (TAILQ_NEXT(control, next) == NULL) {
5765 					/*
5766 					 * If we don't have a next we need a
5767 					 * lock, if there is a next
5768 					 * interrupt is filling ahead of us
5769 					 * and we don't need a lock to
5770 					 * remove this guy (which is the
5771 					 * head of the queue).
5772 					 */
5773 					if (hold_rlock == 0) {
5774 						SCTP_INP_READ_LOCK(inp);
5775 						hold_rlock = 1;
5776 					}
5777 				}
5778 				TAILQ_REMOVE(&inp->read_queue, control, next);
5779 				/* Add back any hiddend data */
5780 				if (control->held_length) {
5781 					held_length = 0;
5782 					control->held_length = 0;
5783 					wakeup_read_socket = 1;
5784 				}
5785 				if (control->aux_data) {
5786 					sctp_m_free(control->aux_data);
5787 					control->aux_data = NULL;
5788 				}
5789 				no_rcv_needed = control->do_not_ref_stcb;
5790 				sctp_free_remote_addr(control->whoFrom);
5791 				control->data = NULL;
5792 				sctp_free_a_readq(stcb, control);
5793 				control = NULL;
5794 				if ((freed_so_far >= rwnd_req) &&
5795 				    (no_rcv_needed == 0))
5796 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5797 
5798 			} else {
5799 				/*
5800 				 * The user did not read all of this
5801 				 * message, turn off the returned MSG_EOR
5802 				 * since we are leaving more behind on the
5803 				 * control to read.
5804 				 */
5805 #ifdef INVARIANTS
5806 				if (control->end_added &&
5807 				    (control->data == NULL) &&
5808 				    (control->tail_mbuf == NULL)) {
5809 					panic("Gak, control->length is corrupt?");
5810 				}
5811 #endif
5812 				no_rcv_needed = control->do_not_ref_stcb;
5813 				out_flags &= ~MSG_EOR;
5814 			}
5815 		}
5816 		if (out_flags & MSG_EOR) {
5817 			goto release;
5818 		}
5819 		if ((uio->uio_resid == 0) ||
5820 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5821 		    ) {
5822 			goto release;
5823 		}
5824 		/*
5825 		 * If I hit here the receiver wants more and this message is
5826 		 * NOT done (pd-api). So two questions. Can we block? if not
5827 		 * we are done. Did the user NOT set MSG_WAITALL?
5828 		 */
5829 		if (block_allowed == 0) {
5830 			goto release;
5831 		}
5832 		/*
5833 		 * We need to wait for more data a few things: - We don't
5834 		 * sbunlock() so we don't get someone else reading. - We
5835 		 * must be sure to account for the case where what is added
5836 		 * is NOT to our control when we wakeup.
5837 		 */
5838 
5839 		/*
5840 		 * Do we need to tell the transport a rwnd update might be
5841 		 * needed before we go to sleep?
5842 		 */
5843 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5844 		    ((freed_so_far >= rwnd_req) &&
5845 		    (control->do_not_ref_stcb == 0) &&
5846 		    (no_rcv_needed == 0))) {
5847 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5848 		}
5849 wait_some_more:
5850 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5851 			goto release;
5852 		}
5853 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5854 			goto release;
5855 
5856 		if (hold_rlock == 1) {
5857 			SCTP_INP_READ_UNLOCK(inp);
5858 			hold_rlock = 0;
5859 		}
5860 		if (hold_sblock == 0) {
5861 			SOCKBUF_LOCK(&so->so_rcv);
5862 			hold_sblock = 1;
5863 		}
5864 		if ((copied_so_far) && (control->length == 0) &&
5865 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5866 			goto release;
5867 		}
5868 		if (so->so_rcv.sb_cc <= control->held_length) {
5869 			error = sbwait(&so->so_rcv);
5870 			if (error) {
5871 				goto release;
5872 			}
5873 			control->held_length = 0;
5874 		}
5875 		if (hold_sblock) {
5876 			SOCKBUF_UNLOCK(&so->so_rcv);
5877 			hold_sblock = 0;
5878 		}
5879 		if (control->length == 0) {
5880 			/* still nothing here */
5881 			if (control->end_added == 1) {
5882 				/* he aborted, or is done i.e.did a shutdown */
5883 				out_flags |= MSG_EOR;
5884 				if (control->pdapi_aborted) {
5885 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5886 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5887 
5888 					out_flags |= MSG_TRUNC;
5889 				} else {
5890 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5891 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5892 				}
5893 				goto done_with_control;
5894 			}
5895 			if (so->so_rcv.sb_cc > held_length) {
5896 				control->held_length = so->so_rcv.sb_cc;
5897 				held_length = 0;
5898 			}
5899 			goto wait_some_more;
5900 		} else if (control->data == NULL) {
5901 			/*
5902 			 * we must re-sync since data is probably being
5903 			 * added
5904 			 */
5905 			SCTP_INP_READ_LOCK(inp);
5906 			if ((control->length > 0) && (control->data == NULL)) {
5907 				/*
5908 				 * big trouble.. we have the lock and its
5909 				 * corrupt?
5910 				 */
5911 #ifdef INVARIANTS
5912 				panic("Impossible data==NULL length !=0");
5913 #endif
5914 				out_flags |= MSG_EOR;
5915 				out_flags |= MSG_TRUNC;
5916 				control->length = 0;
5917 				SCTP_INP_READ_UNLOCK(inp);
5918 				goto done_with_control;
5919 			}
5920 			SCTP_INP_READ_UNLOCK(inp);
5921 			/* We will fall around to get more data */
5922 		}
5923 		goto get_more_data;
5924 	} else {
5925 		/*-
5926 		 * Give caller back the mbuf chain,
5927 		 * store in uio_resid the length
5928 		 */
5929 		wakeup_read_socket = 0;
5930 		if ((control->end_added == 0) ||
5931 		    (TAILQ_NEXT(control, next) == NULL)) {
5932 			/* Need to get rlock */
5933 			if (hold_rlock == 0) {
5934 				SCTP_INP_READ_LOCK(inp);
5935 				hold_rlock = 1;
5936 			}
5937 		}
5938 		if (control->end_added) {
5939 			out_flags |= MSG_EOR;
5940 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5941 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5942 		}
5943 		if (control->spec_flags & M_NOTIFICATION) {
5944 			out_flags |= MSG_NOTIFICATION;
5945 		}
5946 		uio->uio_resid = control->length;
5947 		*mp = control->data;
5948 		m = control->data;
5949 		while (m) {
5950 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5951 				sctp_sblog(&so->so_rcv,
5952 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5953 			}
5954 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5955 			freed_so_far += SCTP_BUF_LEN(m);
5956 			freed_so_far += MSIZE;
5957 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5958 				sctp_sblog(&so->so_rcv,
5959 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5960 			}
5961 			m = SCTP_BUF_NEXT(m);
5962 		}
5963 		control->data = control->tail_mbuf = NULL;
5964 		control->length = 0;
5965 		if (out_flags & MSG_EOR) {
5966 			/* Done with this control */
5967 			goto done_with_control;
5968 		}
5969 	}
5970 release:
5971 	if (hold_rlock == 1) {
5972 		SCTP_INP_READ_UNLOCK(inp);
5973 		hold_rlock = 0;
5974 	}
5975 	if (hold_sblock == 1) {
5976 		SOCKBUF_UNLOCK(&so->so_rcv);
5977 		hold_sblock = 0;
5978 	}
5979 	sbunlock(&so->so_rcv);
5980 	sockbuf_lock = 0;
5981 
5982 release_unlocked:
5983 	if (hold_sblock) {
5984 		SOCKBUF_UNLOCK(&so->so_rcv);
5985 		hold_sblock = 0;
5986 	}
5987 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5988 		if ((freed_so_far >= rwnd_req) &&
5989 		    (control && (control->do_not_ref_stcb == 0)) &&
5990 		    (no_rcv_needed == 0))
5991 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5992 	}
5993 out:
5994 	if (msg_flags) {
5995 		*msg_flags = out_flags;
5996 	}
5997 	if (((out_flags & MSG_EOR) == 0) &&
5998 	    ((in_flags & MSG_PEEK) == 0) &&
5999 	    (sinfo) &&
6000 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
6001 		struct sctp_extrcvinfo *s_extra;
6002 
6003 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6004 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6005 	}
6006 	if (hold_rlock == 1) {
6007 		SCTP_INP_READ_UNLOCK(inp);
6008 		hold_rlock = 0;
6009 	}
6010 	if (hold_sblock) {
6011 		SOCKBUF_UNLOCK(&so->so_rcv);
6012 		hold_sblock = 0;
6013 	}
6014 	if (sockbuf_lock) {
6015 		sbunlock(&so->so_rcv);
6016 	}
6017 	if (freecnt_applied) {
6018 		/*
6019 		 * The lock on the socket buffer protects us so the free
6020 		 * code will stop. But since we used the socketbuf lock and
6021 		 * the sender uses the tcb_lock to increment, we need to use
6022 		 * the atomic add to the refcnt.
6023 		 */
6024 		if (stcb == NULL) {
6025 #ifdef INVARIANTS
6026 			panic("stcb for refcnt has gone NULL?");
6027 			goto stage_left;
6028 #else
6029 			goto stage_left;
6030 #endif
6031 		}
6032 		atomic_add_int(&stcb->asoc.refcnt, -1);
6033 		freecnt_applied = 0;
6034 		/* Save the value back for next time */
6035 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6036 	}
6037 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6038 		if (stcb) {
6039 			sctp_misc_ints(SCTP_SORECV_DONE,
6040 			    freed_so_far,
6041 			    ((uio) ? (slen - uio->uio_resid) : slen),
6042 			    stcb->asoc.my_rwnd,
6043 			    so->so_rcv.sb_cc);
6044 		} else {
6045 			sctp_misc_ints(SCTP_SORECV_DONE,
6046 			    freed_so_far,
6047 			    ((uio) ? (slen - uio->uio_resid) : slen),
6048 			    0,
6049 			    so->so_rcv.sb_cc);
6050 		}
6051 	}
6052 stage_left:
6053 	if (wakeup_read_socket) {
6054 		sctp_sorwakeup(inp, so);
6055 	}
6056 	return (error);
6057 }
6058 
6059 
6060 #ifdef SCTP_MBUF_LOGGING
6061 struct mbuf *
6062 sctp_m_free(struct mbuf *m)
6063 {
6064 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6065 		if (SCTP_BUF_IS_EXTENDED(m)) {
6066 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6067 		}
6068 	}
6069 	return (m_free(m));
6070 }
6071 
6072 void
6073 sctp_m_freem(struct mbuf *mb)
6074 {
6075 	while (mb != NULL)
6076 		mb = sctp_m_free(mb);
6077 }
6078 
6079 #endif
6080 
6081 int
6082 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6083 {
6084 	/*
6085 	 * Given a local address. For all associations that holds the
6086 	 * address, request a peer-set-primary.
6087 	 */
6088 	struct sctp_ifa *ifa;
6089 	struct sctp_laddr *wi;
6090 
6091 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6092 	if (ifa == NULL) {
6093 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6094 		return (EADDRNOTAVAIL);
6095 	}
6096 	/*
6097 	 * Now that we have the ifa we must awaken the iterator with this
6098 	 * message.
6099 	 */
6100 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6101 	if (wi == NULL) {
6102 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6103 		return (ENOMEM);
6104 	}
6105 	/* Now incr the count and int wi structure */
6106 	SCTP_INCR_LADDR_COUNT();
6107 	bzero(wi, sizeof(*wi));
6108 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6109 	wi->ifa = ifa;
6110 	wi->action = SCTP_SET_PRIM_ADDR;
6111 	atomic_add_int(&ifa->refcount, 1);
6112 
6113 	/* Now add it to the work queue */
6114 	SCTP_WQ_ADDR_LOCK();
6115 	/*
6116 	 * Should this really be a tailq? As it is we will process the
6117 	 * newest first :-0
6118 	 */
6119 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6120 	SCTP_WQ_ADDR_UNLOCK();
6121 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6122 	    (struct sctp_inpcb *)NULL,
6123 	    (struct sctp_tcb *)NULL,
6124 	    (struct sctp_nets *)NULL);
6125 	return (0);
6126 }
6127 
6128 
6129 int
6130 sctp_soreceive(struct socket *so,
6131     struct sockaddr **psa,
6132     struct uio *uio,
6133     struct mbuf **mp0,
6134     struct mbuf **controlp,
6135     int *flagsp)
6136 {
6137 	int error, fromlen;
6138 	uint8_t sockbuf[256];
6139 	struct sockaddr *from;
6140 	struct sctp_extrcvinfo sinfo;
6141 	int filling_sinfo = 1;
6142 	struct sctp_inpcb *inp;
6143 
6144 	inp = (struct sctp_inpcb *)so->so_pcb;
6145 	/* pickup the assoc we are reading from */
6146 	if (inp == NULL) {
6147 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6148 		return (EINVAL);
6149 	}
6150 	if ((sctp_is_feature_off(inp,
6151 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6152 	    (controlp == NULL)) {
6153 		/* user does not want the sndrcv ctl */
6154 		filling_sinfo = 0;
6155 	}
6156 	if (psa) {
6157 		from = (struct sockaddr *)sockbuf;
6158 		fromlen = sizeof(sockbuf);
6159 		from->sa_len = 0;
6160 	} else {
6161 		from = NULL;
6162 		fromlen = 0;
6163 	}
6164 
6165 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6166 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6167 	if ((controlp) && (filling_sinfo)) {
6168 		/* copy back the sinfo in a CMSG format */
6169 		if (filling_sinfo)
6170 			*controlp = sctp_build_ctl_nchunk(inp,
6171 			    (struct sctp_sndrcvinfo *)&sinfo);
6172 		else
6173 			*controlp = NULL;
6174 	}
6175 	if (psa) {
6176 		/* copy back the address info */
6177 		if (from && from->sa_len) {
6178 			*psa = sodupsockaddr(from, M_NOWAIT);
6179 		} else {
6180 			*psa = NULL;
6181 		}
6182 	}
6183 	return (error);
6184 }
6185 
6186 
6187 int
6188 sctp_l_soreceive(struct socket *so,
6189     struct sockaddr **name,
6190     struct uio *uio,
6191     char **controlp,
6192     int *controllen,
6193     int *flag)
6194 {
6195 	int error, fromlen;
6196 	uint8_t sockbuf[256];
6197 	struct sockaddr *from;
6198 	struct sctp_extrcvinfo sinfo;
6199 	int filling_sinfo = 1;
6200 	struct sctp_inpcb *inp;
6201 
6202 	inp = (struct sctp_inpcb *)so->so_pcb;
6203 	/* pickup the assoc we are reading from */
6204 	if (inp == NULL) {
6205 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6206 		return (EINVAL);
6207 	}
6208 	if ((sctp_is_feature_off(inp,
6209 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6210 	    (controlp == NULL)) {
6211 		/* user does not want the sndrcv ctl */
6212 		filling_sinfo = 0;
6213 	}
6214 	if (name) {
6215 		from = (struct sockaddr *)sockbuf;
6216 		fromlen = sizeof(sockbuf);
6217 		from->sa_len = 0;
6218 	} else {
6219 		from = NULL;
6220 		fromlen = 0;
6221 	}
6222 
6223 	error = sctp_sorecvmsg(so, uio,
6224 	    (struct mbuf **)NULL,
6225 	    from, fromlen, flag,
6226 	    (struct sctp_sndrcvinfo *)&sinfo,
6227 	    filling_sinfo);
6228 	if ((controlp) && (filling_sinfo)) {
6229 		/*
6230 		 * copy back the sinfo in a CMSG format note that the caller
6231 		 * has reponsibility for freeing the memory.
6232 		 */
6233 		if (filling_sinfo)
6234 			*controlp = sctp_build_ctl_cchunk(inp,
6235 			    controllen,
6236 			    (struct sctp_sndrcvinfo *)&sinfo);
6237 	}
6238 	if (name) {
6239 		/* copy back the address info */
6240 		if (from && from->sa_len) {
6241 			*name = sodupsockaddr(from, M_WAIT);
6242 		} else {
6243 			*name = NULL;
6244 		}
6245 	}
6246 	return (error);
6247 }
6248 
6249 
6250 
6251 
6252 
6253 
6254 
6255 int
6256 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6257     int totaddr, int *error)
6258 {
6259 	int added = 0;
6260 	int i;
6261 	struct sctp_inpcb *inp;
6262 	struct sockaddr *sa;
6263 	size_t incr = 0;
6264 
6265 	sa = addr;
6266 	inp = stcb->sctp_ep;
6267 	*error = 0;
6268 	for (i = 0; i < totaddr; i++) {
6269 		if (sa->sa_family == AF_INET) {
6270 			incr = sizeof(struct sockaddr_in);
6271 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6272 				/* assoc gone no un-lock */
6273 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6274 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6275 				*error = ENOBUFS;
6276 				goto out_now;
6277 			}
6278 			added++;
6279 		} else if (sa->sa_family == AF_INET6) {
6280 			incr = sizeof(struct sockaddr_in6);
6281 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6282 				/* assoc gone no un-lock */
6283 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6284 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6285 				*error = ENOBUFS;
6286 				goto out_now;
6287 			}
6288 			added++;
6289 		}
6290 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6291 	}
6292 out_now:
6293 	return (added);
6294 }
6295 
6296 struct sctp_tcb *
6297 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6298     int *totaddr, int *num_v4, int *num_v6, int *error,
6299     int limit, int *bad_addr)
6300 {
6301 	struct sockaddr *sa;
6302 	struct sctp_tcb *stcb = NULL;
6303 	size_t incr, at, i;
6304 
6305 	at = incr = 0;
6306 	sa = addr;
6307 	*error = *num_v6 = *num_v4 = 0;
6308 	/* account and validate addresses */
6309 	for (i = 0; i < (size_t)*totaddr; i++) {
6310 		if (sa->sa_family == AF_INET) {
6311 			(*num_v4) += 1;
6312 			incr = sizeof(struct sockaddr_in);
6313 			if (sa->sa_len != incr) {
6314 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6315 				*error = EINVAL;
6316 				*bad_addr = 1;
6317 				return (NULL);
6318 			}
6319 		} else if (sa->sa_family == AF_INET6) {
6320 			struct sockaddr_in6 *sin6;
6321 
6322 			sin6 = (struct sockaddr_in6 *)sa;
6323 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6324 				/* Must be non-mapped for connectx */
6325 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6326 				*error = EINVAL;
6327 				*bad_addr = 1;
6328 				return (NULL);
6329 			}
6330 			(*num_v6) += 1;
6331 			incr = sizeof(struct sockaddr_in6);
6332 			if (sa->sa_len != incr) {
6333 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6334 				*error = EINVAL;
6335 				*bad_addr = 1;
6336 				return (NULL);
6337 			}
6338 		} else {
6339 			*totaddr = i;
6340 			/* we are done */
6341 			break;
6342 		}
6343 		SCTP_INP_INCR_REF(inp);
6344 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6345 		if (stcb != NULL) {
6346 			/* Already have or am bring up an association */
6347 			return (stcb);
6348 		} else {
6349 			SCTP_INP_DECR_REF(inp);
6350 		}
6351 		if ((at + incr) > (size_t)limit) {
6352 			*totaddr = i;
6353 			break;
6354 		}
6355 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6356 	}
6357 	return ((struct sctp_tcb *)NULL);
6358 }
6359 
6360 /*
6361  * sctp_bindx(ADD) for one address.
6362  * assumes all arguments are valid/checked by caller.
6363  */
6364 void
6365 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6366     struct sockaddr *sa, sctp_assoc_t assoc_id,
6367     uint32_t vrf_id, int *error, void *p)
6368 {
6369 	struct sockaddr *addr_touse;
6370 
6371 #ifdef INET6
6372 	struct sockaddr_in sin;
6373 
6374 #endif
6375 
6376 	/* see if we're bound all already! */
6377 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6378 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6379 		*error = EINVAL;
6380 		return;
6381 	}
6382 	addr_touse = sa;
6383 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6384 	if (sa->sa_family == AF_INET6) {
6385 		struct sockaddr_in6 *sin6;
6386 
6387 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6388 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6389 			*error = EINVAL;
6390 			return;
6391 		}
6392 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6393 			/* can only bind v6 on PF_INET6 sockets */
6394 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6395 			*error = EINVAL;
6396 			return;
6397 		}
6398 		sin6 = (struct sockaddr_in6 *)addr_touse;
6399 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6400 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6401 			    SCTP_IPV6_V6ONLY(inp)) {
6402 				/* can't bind v4-mapped on PF_INET sockets */
6403 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6404 				*error = EINVAL;
6405 				return;
6406 			}
6407 			in6_sin6_2_sin(&sin, sin6);
6408 			addr_touse = (struct sockaddr *)&sin;
6409 		}
6410 	}
6411 #endif
6412 	if (sa->sa_family == AF_INET) {
6413 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6414 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6415 			*error = EINVAL;
6416 			return;
6417 		}
6418 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6419 		    SCTP_IPV6_V6ONLY(inp)) {
6420 			/* can't bind v4 on PF_INET sockets */
6421 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6422 			*error = EINVAL;
6423 			return;
6424 		}
6425 	}
6426 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6427 		if (p == NULL) {
6428 			/* Can't get proc for Net/Open BSD */
6429 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6430 			*error = EINVAL;
6431 			return;
6432 		}
6433 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6434 		return;
6435 	}
6436 	/*
6437 	 * No locks required here since bind and mgmt_ep_sa all do their own
6438 	 * locking. If we do something for the FIX: below we may need to
6439 	 * lock in that case.
6440 	 */
6441 	if (assoc_id == 0) {
6442 		/* add the address */
6443 		struct sctp_inpcb *lep;
6444 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6445 
6446 		/* validate the incoming port */
6447 		if ((lsin->sin_port != 0) &&
6448 		    (lsin->sin_port != inp->sctp_lport)) {
6449 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6450 			*error = EINVAL;
6451 			return;
6452 		} else {
6453 			/* user specified 0 port, set it to existing port */
6454 			lsin->sin_port = inp->sctp_lport;
6455 		}
6456 
6457 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6458 		if (lep != NULL) {
6459 			/*
6460 			 * We must decrement the refcount since we have the
6461 			 * ep already and are binding. No remove going on
6462 			 * here.
6463 			 */
6464 			SCTP_INP_DECR_REF(lep);
6465 		}
6466 		if (lep == inp) {
6467 			/* already bound to it.. ok */
6468 			return;
6469 		} else if (lep == NULL) {
6470 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6471 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6472 			    SCTP_ADD_IP_ADDRESS,
6473 			    vrf_id, NULL);
6474 		} else {
6475 			*error = EADDRINUSE;
6476 		}
6477 		if (*error)
6478 			return;
6479 	} else {
6480 		/*
6481 		 * FIX: decide whether we allow assoc based bindx
6482 		 */
6483 	}
6484 }
6485 
6486 /*
6487  * sctp_bindx(DELETE) for one address.
6488  * assumes all arguments are valid/checked by caller.
6489  */
6490 void
6491 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6492     struct sockaddr *sa, sctp_assoc_t assoc_id,
6493     uint32_t vrf_id, int *error)
6494 {
6495 	struct sockaddr *addr_touse;
6496 
6497 #ifdef INET6
6498 	struct sockaddr_in sin;
6499 
6500 #endif
6501 
6502 	/* see if we're bound all already! */
6503 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6504 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6505 		*error = EINVAL;
6506 		return;
6507 	}
6508 	addr_touse = sa;
6509 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6510 	if (sa->sa_family == AF_INET6) {
6511 		struct sockaddr_in6 *sin6;
6512 
6513 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6514 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6515 			*error = EINVAL;
6516 			return;
6517 		}
6518 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6519 			/* can only bind v6 on PF_INET6 sockets */
6520 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6521 			*error = EINVAL;
6522 			return;
6523 		}
6524 		sin6 = (struct sockaddr_in6 *)addr_touse;
6525 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6526 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6527 			    SCTP_IPV6_V6ONLY(inp)) {
6528 				/* can't bind mapped-v4 on PF_INET sockets */
6529 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6530 				*error = EINVAL;
6531 				return;
6532 			}
6533 			in6_sin6_2_sin(&sin, sin6);
6534 			addr_touse = (struct sockaddr *)&sin;
6535 		}
6536 	}
6537 #endif
6538 	if (sa->sa_family == AF_INET) {
6539 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6540 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6541 			*error = EINVAL;
6542 			return;
6543 		}
6544 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6545 		    SCTP_IPV6_V6ONLY(inp)) {
6546 			/* can't bind v4 on PF_INET sockets */
6547 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6548 			*error = EINVAL;
6549 			return;
6550 		}
6551 	}
6552 	/*
6553 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6554 	 * below is ever changed we may need to lock before calling
6555 	 * association level binding.
6556 	 */
6557 	if (assoc_id == 0) {
6558 		/* delete the address */
6559 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6560 		    SCTP_DEL_IP_ADDRESS,
6561 		    vrf_id, NULL);
6562 	} else {
6563 		/*
6564 		 * FIX: decide whether we allow assoc based bindx
6565 		 */
6566 	}
6567 }
6568 
6569 /*
6570  * returns the valid local address count for an assoc, taking into account
6571  * all scoping rules
6572  */
6573 int
6574 sctp_local_addr_count(struct sctp_tcb *stcb)
6575 {
6576 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6577 	int ipv4_addr_legal, ipv6_addr_legal;
6578 	struct sctp_vrf *vrf;
6579 	struct sctp_ifn *sctp_ifn;
6580 	struct sctp_ifa *sctp_ifa;
6581 	int count = 0;
6582 
6583 	/* Turn on all the appropriate scopes */
6584 	loopback_scope = stcb->asoc.loopback_scope;
6585 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6586 	local_scope = stcb->asoc.local_scope;
6587 	site_scope = stcb->asoc.site_scope;
6588 	ipv4_addr_legal = ipv6_addr_legal = 0;
6589 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6590 		ipv6_addr_legal = 1;
6591 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6592 			ipv4_addr_legal = 1;
6593 		}
6594 	} else {
6595 		ipv4_addr_legal = 1;
6596 	}
6597 
6598 	SCTP_IPI_ADDR_RLOCK();
6599 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6600 	if (vrf == NULL) {
6601 		/* no vrf, no addresses */
6602 		SCTP_IPI_ADDR_RUNLOCK();
6603 		return (0);
6604 	}
6605 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6606 		/*
6607 		 * bound all case: go through all ifns on the vrf
6608 		 */
6609 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6610 			if ((loopback_scope == 0) &&
6611 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6612 				continue;
6613 			}
6614 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6615 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6616 					continue;
6617 				switch (sctp_ifa->address.sa.sa_family) {
6618 				case AF_INET:
6619 					if (ipv4_addr_legal) {
6620 						struct sockaddr_in *sin;
6621 
6622 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6623 						if (sin->sin_addr.s_addr == 0) {
6624 							/*
6625 							 * skip unspecified
6626 							 * addrs
6627 							 */
6628 							continue;
6629 						}
6630 						if ((ipv4_local_scope == 0) &&
6631 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6632 							continue;
6633 						}
6634 						/* count this one */
6635 						count++;
6636 					} else {
6637 						continue;
6638 					}
6639 					break;
6640 #ifdef INET6
6641 				case AF_INET6:
6642 					if (ipv6_addr_legal) {
6643 						struct sockaddr_in6 *sin6;
6644 
6645 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6646 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6647 							continue;
6648 						}
6649 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6650 							if (local_scope == 0)
6651 								continue;
6652 							if (sin6->sin6_scope_id == 0) {
6653 								if (sa6_recoverscope(sin6) != 0)
6654 									/*
6655 									 *
6656 									 * bad
6657 									 *
6658 									 * li
6659 									 * nk
6660 									 *
6661 									 * loc
6662 									 * al
6663 									 *
6664 									 * add
6665 									 * re
6666 									 * ss
6667 									 * */
6668 									continue;
6669 							}
6670 						}
6671 						if ((site_scope == 0) &&
6672 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6673 							continue;
6674 						}
6675 						/* count this one */
6676 						count++;
6677 					}
6678 					break;
6679 #endif
6680 				default:
6681 					/* TSNH */
6682 					break;
6683 				}
6684 			}
6685 		}
6686 	} else {
6687 		/*
6688 		 * subset bound case
6689 		 */
6690 		struct sctp_laddr *laddr;
6691 
6692 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6693 		    sctp_nxt_addr) {
6694 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6695 				continue;
6696 			}
6697 			/* count this one */
6698 			count++;
6699 		}
6700 	}
6701 	SCTP_IPI_ADDR_RUNLOCK();
6702 	return (count);
6703 }
6704 
6705 #if defined(SCTP_LOCAL_TRACE_BUF)
6706 
6707 void
6708 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6709 {
6710 	uint32_t saveindex, newindex;
6711 
6712 	do {
6713 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6714 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6715 			newindex = 1;
6716 		} else {
6717 			newindex = saveindex + 1;
6718 		}
6719 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6720 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6721 		saveindex = 0;
6722 	}
6723 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6724 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6725 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6726 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6727 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6728 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6729 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6730 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6731 }
6732 
6733 #endif
6734 /* We will need to add support
6735  * to bind the ports and such here
6736  * so we can do UDP tunneling. In
6737  * the mean-time, we return error
6738  */
6739 #include <netinet/udp.h>
6740 #include <netinet/udp_var.h>
6741 #include <sys/proc.h>
6742 #ifdef INET6
6743 #include <netinet6/sctp6_var.h>
6744 #endif
6745 
6746 static void
6747 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6748 {
6749 	struct ip *iph;
6750 	struct mbuf *sp, *last;
6751 	struct udphdr *uhdr;
6752 	uint16_t port = 0, len;
6753 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6754 
6755 	/*
6756 	 * Split out the mbuf chain. Leave the IP header in m, place the
6757 	 * rest in the sp.
6758 	 */
6759 	if ((m->m_flags & M_PKTHDR) == 0) {
6760 		/* Can't handle one that is not a pkt hdr */
6761 		goto out;
6762 	}
6763 	/* pull the src port */
6764 	iph = mtod(m, struct ip *);
6765 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6766 
6767 	port = uhdr->uh_sport;
6768 	sp = m_split(m, off, M_DONTWAIT);
6769 	if (sp == NULL) {
6770 		/* Gak, drop packet, we can't do a split */
6771 		goto out;
6772 	}
6773 	if (sp->m_pkthdr.len < header_size) {
6774 		/* Gak, packet can't have an SCTP header in it - to small */
6775 		m_freem(sp);
6776 		goto out;
6777 	}
6778 	/* ok now pull up the UDP header and SCTP header together */
6779 	sp = m_pullup(sp, header_size);
6780 	if (sp == NULL) {
6781 		/* Gak pullup failed */
6782 		goto out;
6783 	}
6784 	/* trim out the UDP header */
6785 	m_adj(sp, sizeof(struct udphdr));
6786 
6787 	/* Now reconstruct the mbuf chain */
6788 	/* 1) find last one */
6789 	last = m;
6790 	while (last->m_next != NULL) {
6791 		last = last->m_next;
6792 	}
6793 	last->m_next = sp;
6794 	m->m_pkthdr.len += sp->m_pkthdr.len;
6795 	last = m;
6796 	while (last != NULL) {
6797 		last = last->m_next;
6798 	}
6799 	/* Now its ready for sctp_input or sctp6_input */
6800 	iph = mtod(m, struct ip *);
6801 	switch (iph->ip_v) {
6802 	case IPVERSION:
6803 		{
6804 			/* its IPv4 */
6805 			len = SCTP_GET_IPV4_LENGTH(iph);
6806 			len -= sizeof(struct udphdr);
6807 			SCTP_GET_IPV4_LENGTH(iph) = len;
6808 			sctp_input_with_port(m, off, port);
6809 			break;
6810 		}
6811 #ifdef INET6
6812 	case IPV6_VERSION >> 4:
6813 		{
6814 			/* its IPv6 - NOT supported */
6815 			goto out;
6816 			break;
6817 
6818 		}
6819 #endif
6820 	default:
6821 		{
6822 			m_freem(m);
6823 			break;
6824 		}
6825 	}
6826 	return;
6827 out:
6828 	m_freem(m);
6829 }
6830 
6831 void
6832 sctp_over_udp_stop(void)
6833 {
6834 	struct socket *sop;
6835 
6836 	/*
6837 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6838 	 * for writting!
6839 	 */
6840 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6841 		/* Nothing to do */
6842 		return;
6843 	}
6844 	sop = SCTP_BASE_INFO(udp_tun_socket);
6845 	soclose(sop);
6846 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6847 }
6848 int
6849 sctp_over_udp_start(void)
6850 {
6851 	uint16_t port;
6852 	int ret;
6853 	struct sockaddr_in sin;
6854 	struct socket *sop = NULL;
6855 	struct thread *th;
6856 	struct ucred *cred;
6857 
6858 	/*
6859 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6860 	 * for writting!
6861 	 */
6862 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6863 	if (port == 0) {
6864 		/* Must have a port set */
6865 		return (EINVAL);
6866 	}
6867 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6868 		/* Already running -- must stop first */
6869 		return (EALREADY);
6870 	}
6871 	th = curthread;
6872 	cred = th->td_ucred;
6873 	if ((ret = socreate(PF_INET, &sop,
6874 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6875 		return (ret);
6876 	}
6877 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6878 	/* call the special UDP hook */
6879 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6880 	if (ret) {
6881 		goto exit_stage_left;
6882 	}
6883 	/* Ok we have a socket, bind it to the port */
6884 	memset(&sin, 0, sizeof(sin));
6885 	sin.sin_len = sizeof(sin);
6886 	sin.sin_family = AF_INET;
6887 	sin.sin_port = htons(port);
6888 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6889 	if (ret) {
6890 		/* Close up we cant get the port */
6891 exit_stage_left:
6892 		sctp_over_udp_stop();
6893 		return (ret);
6894 	}
6895 	/*
6896 	 * Ok we should now get UDP packets directly to our input routine
6897 	 * sctp_recv_upd_tunneled_packet().
6898 	 */
6899 	return (0);
6900 }
6901