xref: /freebsd/sys/netinet/sctputil.c (revision 050570efa79efcc9cf5adeb545f1a679c8dc377b)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_bsd_addr.h>
51 
52 
53 #ifndef KTR_SCTP
54 #define KTR_SCTP KTR_SUBSYS
55 #endif
56 
57 extern struct sctp_cc_functions sctp_cc_functions[];
58 extern struct sctp_ss_functions sctp_ss_functions[];
59 
60 void
61 sctp_sblog(struct sockbuf *sb,
62     struct sctp_tcb *stcb, int from, int incr)
63 {
64 	struct sctp_cwnd_log sctp_clog;
65 
66 	sctp_clog.x.sb.stcb = stcb;
67 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
68 	if (stcb)
69 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
70 	else
71 		sctp_clog.x.sb.stcb_sbcc = 0;
72 	sctp_clog.x.sb.incr = incr;
73 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
74 	    SCTP_LOG_EVENT_SB,
75 	    from,
76 	    sctp_clog.x.misc.log1,
77 	    sctp_clog.x.misc.log2,
78 	    sctp_clog.x.misc.log3,
79 	    sctp_clog.x.misc.log4);
80 }
81 
82 void
83 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
84 {
85 	struct sctp_cwnd_log sctp_clog;
86 
87 	sctp_clog.x.close.inp = (void *)inp;
88 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
89 	if (stcb) {
90 		sctp_clog.x.close.stcb = (void *)stcb;
91 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
92 	} else {
93 		sctp_clog.x.close.stcb = 0;
94 		sctp_clog.x.close.state = 0;
95 	}
96 	sctp_clog.x.close.loc = loc;
97 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
98 	    SCTP_LOG_EVENT_CLOSE,
99 	    0,
100 	    sctp_clog.x.misc.log1,
101 	    sctp_clog.x.misc.log2,
102 	    sctp_clog.x.misc.log3,
103 	    sctp_clog.x.misc.log4);
104 }
105 
106 
107 void
108 rto_logging(struct sctp_nets *net, int from)
109 {
110 	struct sctp_cwnd_log sctp_clog;
111 
112 	memset(&sctp_clog, 0, sizeof(sctp_clog));
113 	sctp_clog.x.rto.net = (void *)net;
114 	sctp_clog.x.rto.rtt = net->prev_rtt;
115 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
116 	    SCTP_LOG_EVENT_RTT,
117 	    from,
118 	    sctp_clog.x.misc.log1,
119 	    sctp_clog.x.misc.log2,
120 	    sctp_clog.x.misc.log3,
121 	    sctp_clog.x.misc.log4);
122 
123 }
124 
125 void
126 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
127 {
128 	struct sctp_cwnd_log sctp_clog;
129 
130 	sctp_clog.x.strlog.stcb = stcb;
131 	sctp_clog.x.strlog.n_tsn = tsn;
132 	sctp_clog.x.strlog.n_sseq = sseq;
133 	sctp_clog.x.strlog.e_tsn = 0;
134 	sctp_clog.x.strlog.e_sseq = 0;
135 	sctp_clog.x.strlog.strm = stream;
136 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
137 	    SCTP_LOG_EVENT_STRM,
138 	    from,
139 	    sctp_clog.x.misc.log1,
140 	    sctp_clog.x.misc.log2,
141 	    sctp_clog.x.misc.log3,
142 	    sctp_clog.x.misc.log4);
143 
144 }
145 
146 void
147 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
148 {
149 	struct sctp_cwnd_log sctp_clog;
150 
151 	sctp_clog.x.nagle.stcb = (void *)stcb;
152 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
153 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
154 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
155 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
156 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
157 	    SCTP_LOG_EVENT_NAGLE,
158 	    action,
159 	    sctp_clog.x.misc.log1,
160 	    sctp_clog.x.misc.log2,
161 	    sctp_clog.x.misc.log3,
162 	    sctp_clog.x.misc.log4);
163 }
164 
165 
166 void
167 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
168 {
169 	struct sctp_cwnd_log sctp_clog;
170 
171 	sctp_clog.x.sack.cumack = cumack;
172 	sctp_clog.x.sack.oldcumack = old_cumack;
173 	sctp_clog.x.sack.tsn = tsn;
174 	sctp_clog.x.sack.numGaps = gaps;
175 	sctp_clog.x.sack.numDups = dups;
176 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
177 	    SCTP_LOG_EVENT_SACK,
178 	    from,
179 	    sctp_clog.x.misc.log1,
180 	    sctp_clog.x.misc.log2,
181 	    sctp_clog.x.misc.log3,
182 	    sctp_clog.x.misc.log4);
183 }
184 
185 void
186 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
187 {
188 	struct sctp_cwnd_log sctp_clog;
189 
190 	memset(&sctp_clog, 0, sizeof(sctp_clog));
191 	sctp_clog.x.map.base = map;
192 	sctp_clog.x.map.cum = cum;
193 	sctp_clog.x.map.high = high;
194 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
195 	    SCTP_LOG_EVENT_MAP,
196 	    from,
197 	    sctp_clog.x.misc.log1,
198 	    sctp_clog.x.misc.log2,
199 	    sctp_clog.x.misc.log3,
200 	    sctp_clog.x.misc.log4);
201 }
202 
203 void
204 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
205     int from)
206 {
207 	struct sctp_cwnd_log sctp_clog;
208 
209 	memset(&sctp_clog, 0, sizeof(sctp_clog));
210 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
211 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
212 	sctp_clog.x.fr.tsn = tsn;
213 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
214 	    SCTP_LOG_EVENT_FR,
215 	    from,
216 	    sctp_clog.x.misc.log1,
217 	    sctp_clog.x.misc.log2,
218 	    sctp_clog.x.misc.log3,
219 	    sctp_clog.x.misc.log4);
220 
221 }
222 
223 
224 void
225 sctp_log_mb(struct mbuf *m, int from)
226 {
227 	struct sctp_cwnd_log sctp_clog;
228 
229 	sctp_clog.x.mb.mp = m;
230 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
231 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
232 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
233 	if (SCTP_BUF_IS_EXTENDED(m)) {
234 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
235 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
236 	} else {
237 		sctp_clog.x.mb.ext = 0;
238 		sctp_clog.x.mb.refcnt = 0;
239 	}
240 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
241 	    SCTP_LOG_EVENT_MBUF,
242 	    from,
243 	    sctp_clog.x.misc.log1,
244 	    sctp_clog.x.misc.log2,
245 	    sctp_clog.x.misc.log3,
246 	    sctp_clog.x.misc.log4);
247 }
248 
249 
250 void
251 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
252     int from)
253 {
254 	struct sctp_cwnd_log sctp_clog;
255 
256 	if (control == NULL) {
257 		SCTP_PRINTF("Gak log of NULL?\n");
258 		return;
259 	}
260 	sctp_clog.x.strlog.stcb = control->stcb;
261 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
262 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
263 	sctp_clog.x.strlog.strm = control->sinfo_stream;
264 	if (poschk != NULL) {
265 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
266 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
267 	} else {
268 		sctp_clog.x.strlog.e_tsn = 0;
269 		sctp_clog.x.strlog.e_sseq = 0;
270 	}
271 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
272 	    SCTP_LOG_EVENT_STRM,
273 	    from,
274 	    sctp_clog.x.misc.log1,
275 	    sctp_clog.x.misc.log2,
276 	    sctp_clog.x.misc.log3,
277 	    sctp_clog.x.misc.log4);
278 
279 }
280 
281 void
282 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
283 {
284 	struct sctp_cwnd_log sctp_clog;
285 
286 	sctp_clog.x.cwnd.net = net;
287 	if (stcb->asoc.send_queue_cnt > 255)
288 		sctp_clog.x.cwnd.cnt_in_send = 255;
289 	else
290 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
291 	if (stcb->asoc.stream_queue_cnt > 255)
292 		sctp_clog.x.cwnd.cnt_in_str = 255;
293 	else
294 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
295 
296 	if (net) {
297 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
298 		sctp_clog.x.cwnd.inflight = net->flight_size;
299 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
300 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
301 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
302 	}
303 	if (SCTP_CWNDLOG_PRESEND == from) {
304 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
305 	}
306 	sctp_clog.x.cwnd.cwnd_augment = augment;
307 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
308 	    SCTP_LOG_EVENT_CWND,
309 	    from,
310 	    sctp_clog.x.misc.log1,
311 	    sctp_clog.x.misc.log2,
312 	    sctp_clog.x.misc.log3,
313 	    sctp_clog.x.misc.log4);
314 
315 }
316 
317 void
318 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
319 {
320 	struct sctp_cwnd_log sctp_clog;
321 
322 	memset(&sctp_clog, 0, sizeof(sctp_clog));
323 	if (inp) {
324 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
325 
326 	} else {
327 		sctp_clog.x.lock.sock = (void *)NULL;
328 	}
329 	sctp_clog.x.lock.inp = (void *)inp;
330 	if (stcb) {
331 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
332 	} else {
333 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
334 	}
335 	if (inp) {
336 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
337 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
338 	} else {
339 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
340 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
341 	}
342 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
343 	if (inp && (inp->sctp_socket)) {
344 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
345 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
346 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
347 	} else {
348 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
349 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
350 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
351 	}
352 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
353 	    SCTP_LOG_LOCK_EVENT,
354 	    from,
355 	    sctp_clog.x.misc.log1,
356 	    sctp_clog.x.misc.log2,
357 	    sctp_clog.x.misc.log3,
358 	    sctp_clog.x.misc.log4);
359 
360 }
361 
362 void
363 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
364 {
365 	struct sctp_cwnd_log sctp_clog;
366 
367 	memset(&sctp_clog, 0, sizeof(sctp_clog));
368 	sctp_clog.x.cwnd.net = net;
369 	sctp_clog.x.cwnd.cwnd_new_value = error;
370 	sctp_clog.x.cwnd.inflight = net->flight_size;
371 	sctp_clog.x.cwnd.cwnd_augment = burst;
372 	if (stcb->asoc.send_queue_cnt > 255)
373 		sctp_clog.x.cwnd.cnt_in_send = 255;
374 	else
375 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
376 	if (stcb->asoc.stream_queue_cnt > 255)
377 		sctp_clog.x.cwnd.cnt_in_str = 255;
378 	else
379 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
380 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
381 	    SCTP_LOG_EVENT_MAXBURST,
382 	    from,
383 	    sctp_clog.x.misc.log1,
384 	    sctp_clog.x.misc.log2,
385 	    sctp_clog.x.misc.log3,
386 	    sctp_clog.x.misc.log4);
387 
388 }
389 
390 void
391 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
392 {
393 	struct sctp_cwnd_log sctp_clog;
394 
395 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
396 	sctp_clog.x.rwnd.send_size = snd_size;
397 	sctp_clog.x.rwnd.overhead = overhead;
398 	sctp_clog.x.rwnd.new_rwnd = 0;
399 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
400 	    SCTP_LOG_EVENT_RWND,
401 	    from,
402 	    sctp_clog.x.misc.log1,
403 	    sctp_clog.x.misc.log2,
404 	    sctp_clog.x.misc.log3,
405 	    sctp_clog.x.misc.log4);
406 }
407 
408 void
409 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
410 {
411 	struct sctp_cwnd_log sctp_clog;
412 
413 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
414 	sctp_clog.x.rwnd.send_size = flight_size;
415 	sctp_clog.x.rwnd.overhead = overhead;
416 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
417 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
418 	    SCTP_LOG_EVENT_RWND,
419 	    from,
420 	    sctp_clog.x.misc.log1,
421 	    sctp_clog.x.misc.log2,
422 	    sctp_clog.x.misc.log3,
423 	    sctp_clog.x.misc.log4);
424 }
425 
426 void
427 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
428 {
429 	struct sctp_cwnd_log sctp_clog;
430 
431 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
432 	sctp_clog.x.mbcnt.size_change = book;
433 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
434 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
435 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
436 	    SCTP_LOG_EVENT_MBCNT,
437 	    from,
438 	    sctp_clog.x.misc.log1,
439 	    sctp_clog.x.misc.log2,
440 	    sctp_clog.x.misc.log3,
441 	    sctp_clog.x.misc.log4);
442 
443 }
444 
445 void
446 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
447 {
448 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
449 	    SCTP_LOG_MISC_EVENT,
450 	    from,
451 	    a, b, c, d);
452 }
453 
454 void
455 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
456 {
457 	struct sctp_cwnd_log sctp_clog;
458 
459 	sctp_clog.x.wake.stcb = (void *)stcb;
460 	sctp_clog.x.wake.wake_cnt = wake_cnt;
461 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
462 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
463 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
464 
465 	if (stcb->asoc.stream_queue_cnt < 0xff)
466 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
467 	else
468 		sctp_clog.x.wake.stream_qcnt = 0xff;
469 
470 	if (stcb->asoc.chunks_on_out_queue < 0xff)
471 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
472 	else
473 		sctp_clog.x.wake.chunks_on_oque = 0xff;
474 
475 	sctp_clog.x.wake.sctpflags = 0;
476 	/* set in the defered mode stuff */
477 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
478 		sctp_clog.x.wake.sctpflags |= 1;
479 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
480 		sctp_clog.x.wake.sctpflags |= 2;
481 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
482 		sctp_clog.x.wake.sctpflags |= 4;
483 	/* what about the sb */
484 	if (stcb->sctp_socket) {
485 		struct socket *so = stcb->sctp_socket;
486 
487 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
488 	} else {
489 		sctp_clog.x.wake.sbflags = 0xff;
490 	}
491 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
492 	    SCTP_LOG_EVENT_WAKE,
493 	    from,
494 	    sctp_clog.x.misc.log1,
495 	    sctp_clog.x.misc.log2,
496 	    sctp_clog.x.misc.log3,
497 	    sctp_clog.x.misc.log4);
498 
499 }
500 
501 void
502 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
503 {
504 	struct sctp_cwnd_log sctp_clog;
505 
506 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
507 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
508 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
509 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
510 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
511 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
512 	sctp_clog.x.blk.sndlen = sendlen;
513 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
514 	    SCTP_LOG_EVENT_BLOCK,
515 	    from,
516 	    sctp_clog.x.misc.log1,
517 	    sctp_clog.x.misc.log2,
518 	    sctp_clog.x.misc.log3,
519 	    sctp_clog.x.misc.log4);
520 
521 }
522 
523 int
524 sctp_fill_stat_log(void *optval, size_t *optsize)
525 {
526 	/* May need to fix this if ktrdump does not work */
527 	return (0);
528 }
529 
530 #ifdef SCTP_AUDITING_ENABLED
531 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
532 static int sctp_audit_indx = 0;
533 
534 static
535 void
536 sctp_print_audit_report(void)
537 {
538 	int i;
539 	int cnt;
540 
541 	cnt = 0;
542 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
543 		if ((sctp_audit_data[i][0] == 0xe0) &&
544 		    (sctp_audit_data[i][1] == 0x01)) {
545 			cnt = 0;
546 			SCTP_PRINTF("\n");
547 		} else if (sctp_audit_data[i][0] == 0xf0) {
548 			cnt = 0;
549 			SCTP_PRINTF("\n");
550 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
551 		    (sctp_audit_data[i][1] == 0x01)) {
552 			SCTP_PRINTF("\n");
553 			cnt = 0;
554 		}
555 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
556 		    (uint32_t) sctp_audit_data[i][1]);
557 		cnt++;
558 		if ((cnt % 14) == 0)
559 			SCTP_PRINTF("\n");
560 	}
561 	for (i = 0; i < sctp_audit_indx; i++) {
562 		if ((sctp_audit_data[i][0] == 0xe0) &&
563 		    (sctp_audit_data[i][1] == 0x01)) {
564 			cnt = 0;
565 			SCTP_PRINTF("\n");
566 		} else if (sctp_audit_data[i][0] == 0xf0) {
567 			cnt = 0;
568 			SCTP_PRINTF("\n");
569 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
570 		    (sctp_audit_data[i][1] == 0x01)) {
571 			SCTP_PRINTF("\n");
572 			cnt = 0;
573 		}
574 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
575 		    (uint32_t) sctp_audit_data[i][1]);
576 		cnt++;
577 		if ((cnt % 14) == 0)
578 			SCTP_PRINTF("\n");
579 	}
580 	SCTP_PRINTF("\n");
581 }
582 
583 void
584 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
585     struct sctp_nets *net)
586 {
587 	int resend_cnt, tot_out, rep, tot_book_cnt;
588 	struct sctp_nets *lnet;
589 	struct sctp_tmit_chunk *chk;
590 
591 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
592 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
593 	sctp_audit_indx++;
594 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
595 		sctp_audit_indx = 0;
596 	}
597 	if (inp == NULL) {
598 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
599 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
600 		sctp_audit_indx++;
601 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
602 			sctp_audit_indx = 0;
603 		}
604 		return;
605 	}
606 	if (stcb == NULL) {
607 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
608 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
609 		sctp_audit_indx++;
610 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
611 			sctp_audit_indx = 0;
612 		}
613 		return;
614 	}
615 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
616 	sctp_audit_data[sctp_audit_indx][1] =
617 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
618 	sctp_audit_indx++;
619 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
620 		sctp_audit_indx = 0;
621 	}
622 	rep = 0;
623 	tot_book_cnt = 0;
624 	resend_cnt = tot_out = 0;
625 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
626 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
627 			resend_cnt++;
628 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
629 			tot_out += chk->book_size;
630 			tot_book_cnt++;
631 		}
632 	}
633 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
634 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
635 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
636 		sctp_audit_indx++;
637 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
638 			sctp_audit_indx = 0;
639 		}
640 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
641 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
642 		rep = 1;
643 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
644 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
645 		sctp_audit_data[sctp_audit_indx][1] =
646 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
647 		sctp_audit_indx++;
648 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
649 			sctp_audit_indx = 0;
650 		}
651 	}
652 	if (tot_out != stcb->asoc.total_flight) {
653 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
654 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
655 		sctp_audit_indx++;
656 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
657 			sctp_audit_indx = 0;
658 		}
659 		rep = 1;
660 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
661 		    (int)stcb->asoc.total_flight);
662 		stcb->asoc.total_flight = tot_out;
663 	}
664 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
665 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
666 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
667 		sctp_audit_indx++;
668 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
669 			sctp_audit_indx = 0;
670 		}
671 		rep = 1;
672 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt);
673 
674 		stcb->asoc.total_flight_count = tot_book_cnt;
675 	}
676 	tot_out = 0;
677 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
678 		tot_out += lnet->flight_size;
679 	}
680 	if (tot_out != stcb->asoc.total_flight) {
681 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
682 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
683 		sctp_audit_indx++;
684 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
685 			sctp_audit_indx = 0;
686 		}
687 		rep = 1;
688 		SCTP_PRINTF("real flight:%d net total was %d\n",
689 		    stcb->asoc.total_flight, tot_out);
690 		/* now corrective action */
691 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
692 
693 			tot_out = 0;
694 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
695 				if ((chk->whoTo == lnet) &&
696 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
697 					tot_out += chk->book_size;
698 				}
699 			}
700 			if (lnet->flight_size != tot_out) {
701 				SCTP_PRINTF("net:%p flight was %d corrected to %d\n",
702 				    lnet, lnet->flight_size,
703 				    tot_out);
704 				lnet->flight_size = tot_out;
705 			}
706 		}
707 	}
708 	if (rep) {
709 		sctp_print_audit_report();
710 	}
711 }
712 
713 void
714 sctp_audit_log(uint8_t ev, uint8_t fd)
715 {
716 
717 	sctp_audit_data[sctp_audit_indx][0] = ev;
718 	sctp_audit_data[sctp_audit_indx][1] = fd;
719 	sctp_audit_indx++;
720 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
721 		sctp_audit_indx = 0;
722 	}
723 }
724 
725 #endif
726 
727 /*
728  * sctp_stop_timers_for_shutdown() should be called
729  * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT
730  * state to make sure that all timers are stopped.
731  */
732 void
733 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
734 {
735 	struct sctp_association *asoc;
736 	struct sctp_nets *net;
737 
738 	asoc = &stcb->asoc;
739 
740 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
741 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
742 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
743 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
744 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
745 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
746 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
747 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
748 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
749 	}
750 }
751 
752 /*
753  * a list of sizes based on typical mtu's, used only if next hop size not
754  * returned.
755  */
756 static uint32_t sctp_mtu_sizes[] = {
757 	68,
758 	296,
759 	508,
760 	512,
761 	544,
762 	576,
763 	1006,
764 	1492,
765 	1500,
766 	1536,
767 	2002,
768 	2048,
769 	4352,
770 	4464,
771 	8166,
772 	17914,
773 	32000,
774 	65535
775 };
776 
777 /*
778  * Return the largest MTU smaller than val. If there is no
779  * entry, just return val.
780  */
781 uint32_t
782 sctp_get_prev_mtu(uint32_t val)
783 {
784 	uint32_t i;
785 
786 	if (val <= sctp_mtu_sizes[0]) {
787 		return (val);
788 	}
789 	for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
790 		if (val <= sctp_mtu_sizes[i]) {
791 			break;
792 		}
793 	}
794 	return (sctp_mtu_sizes[i - 1]);
795 }
796 
797 /*
798  * Return the smallest MTU larger than val. If there is no
799  * entry, just return val.
800  */
801 uint32_t
802 sctp_get_next_mtu(struct sctp_inpcb *inp, uint32_t val)
803 {
804 	/* select another MTU that is just bigger than this one */
805 	uint32_t i;
806 
807 	for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) {
808 		if (val < sctp_mtu_sizes[i]) {
809 			return (sctp_mtu_sizes[i]);
810 		}
811 	}
812 	return (val);
813 }
814 
815 void
816 sctp_fill_random_store(struct sctp_pcb *m)
817 {
818 	/*
819 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
820 	 * our counter. The result becomes our good random numbers and we
821 	 * then setup to give these out. Note that we do no locking to
822 	 * protect this. This is ok, since if competing folks call this we
823 	 * will get more gobbled gook in the random store which is what we
824 	 * want. There is a danger that two guys will use the same random
825 	 * numbers, but thats ok too since that is random as well :->
826 	 */
827 	m->store_at = 0;
828 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
829 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
830 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
831 	m->random_counter++;
832 }
833 
834 uint32_t
835 sctp_select_initial_TSN(struct sctp_pcb *inp)
836 {
837 	/*
838 	 * A true implementation should use random selection process to get
839 	 * the initial stream sequence number, using RFC1750 as a good
840 	 * guideline
841 	 */
842 	uint32_t x, *xp;
843 	uint8_t *p;
844 	int store_at, new_store;
845 
846 	if (inp->initial_sequence_debug != 0) {
847 		uint32_t ret;
848 
849 		ret = inp->initial_sequence_debug;
850 		inp->initial_sequence_debug++;
851 		return (ret);
852 	}
853 retry:
854 	store_at = inp->store_at;
855 	new_store = store_at + sizeof(uint32_t);
856 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
857 		new_store = 0;
858 	}
859 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
860 		goto retry;
861 	}
862 	if (new_store == 0) {
863 		/* Refill the random store */
864 		sctp_fill_random_store(inp);
865 	}
866 	p = &inp->random_store[store_at];
867 	xp = (uint32_t *) p;
868 	x = *xp;
869 	return (x);
870 }
871 
872 uint32_t
873 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
874 {
875 	uint32_t x, not_done;
876 	struct timeval now;
877 
878 	(void)SCTP_GETTIME_TIMEVAL(&now);
879 	not_done = 1;
880 	while (not_done) {
881 		x = sctp_select_initial_TSN(&inp->sctp_ep);
882 		if (x == 0) {
883 			/* we never use 0 */
884 			continue;
885 		}
886 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
887 			not_done = 0;
888 		}
889 	}
890 	return (x);
891 }
892 
893 int
894 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
895     uint32_t override_tag, uint32_t vrf_id)
896 {
897 	struct sctp_association *asoc;
898 
899 	/*
900 	 * Anything set to zero is taken care of by the allocation routine's
901 	 * bzero
902 	 */
903 
904 	/*
905 	 * Up front select what scoping to apply on addresses I tell my peer
906 	 * Not sure what to do with these right now, we will need to come up
907 	 * with a way to set them. We may need to pass them through from the
908 	 * caller in the sctp_aloc_assoc() function.
909 	 */
910 	int i;
911 
912 	asoc = &stcb->asoc;
913 	/* init all variables to a known value. */
914 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
915 	asoc->max_burst = m->sctp_ep.max_burst;
916 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
917 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
918 	asoc->sctp_cmt_on_off = m->sctp_cmt_on_off;
919 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
920 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
921 	asoc->sctp_frag_point = m->sctp_frag_point;
922 #ifdef INET
923 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
924 #else
925 	asoc->default_tos = 0;
926 #endif
927 
928 #ifdef INET6
929 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
930 #else
931 	asoc->default_flowlabel = 0;
932 #endif
933 	asoc->sb_send_resv = 0;
934 	if (override_tag) {
935 		asoc->my_vtag = override_tag;
936 	} else {
937 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
938 	}
939 	/* Get the nonce tags */
940 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
941 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
942 	asoc->vrf_id = vrf_id;
943 
944 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
945 		asoc->hb_is_disabled = 1;
946 	else
947 		asoc->hb_is_disabled = 0;
948 
949 #ifdef SCTP_ASOCLOG_OF_TSNS
950 	asoc->tsn_in_at = 0;
951 	asoc->tsn_out_at = 0;
952 	asoc->tsn_in_wrapped = 0;
953 	asoc->tsn_out_wrapped = 0;
954 	asoc->cumack_log_at = 0;
955 	asoc->cumack_log_atsnt = 0;
956 #endif
957 #ifdef SCTP_FS_SPEC_LOG
958 	asoc->fs_index = 0;
959 #endif
960 	asoc->refcnt = 0;
961 	asoc->assoc_up_sent = 0;
962 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
963 	    sctp_select_initial_TSN(&m->sctp_ep);
964 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
965 	/* we are optimisitic here */
966 	asoc->peer_supports_pktdrop = 1;
967 	asoc->peer_supports_nat = 0;
968 	asoc->sent_queue_retran_cnt = 0;
969 
970 	/* for CMT */
971 	asoc->last_net_cmt_send_started = NULL;
972 
973 	/* This will need to be adjusted */
974 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
975 	asoc->last_acked_seq = asoc->init_seq_number - 1;
976 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
977 	asoc->asconf_seq_in = asoc->last_acked_seq;
978 
979 	/* here we are different, we hold the next one we expect */
980 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
981 
982 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
983 	asoc->initial_rto = m->sctp_ep.initial_rto;
984 
985 	asoc->max_init_times = m->sctp_ep.max_init_times;
986 	asoc->max_send_times = m->sctp_ep.max_send_times;
987 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
988 	asoc->free_chunk_cnt = 0;
989 
990 	asoc->iam_blocking = 0;
991 	/* ECN Nonce initialization */
992 	asoc->context = m->sctp_context;
993 	asoc->def_send = m->def_send;
994 	asoc->ecn_nonce_allowed = 0;
995 	asoc->receiver_nonce_sum = 1;
996 	asoc->nonce_sum_expect_base = 1;
997 	asoc->nonce_sum_check = 1;
998 	asoc->nonce_resync_tsn = 0;
999 	asoc->nonce_wait_for_ecne = 0;
1000 	asoc->nonce_wait_tsn = 0;
1001 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1002 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
1003 	asoc->pr_sctp_cnt = 0;
1004 	asoc->total_output_queue_size = 0;
1005 
1006 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1007 		struct in6pcb *inp6;
1008 
1009 		/* Its a V6 socket */
1010 		inp6 = (struct in6pcb *)m;
1011 		asoc->ipv6_addr_legal = 1;
1012 		/* Now look at the binding flag to see if V4 will be legal */
1013 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
1014 			asoc->ipv4_addr_legal = 1;
1015 		} else {
1016 			/* V4 addresses are NOT legal on the association */
1017 			asoc->ipv4_addr_legal = 0;
1018 		}
1019 	} else {
1020 		/* Its a V4 socket, no - V6 */
1021 		asoc->ipv4_addr_legal = 1;
1022 		asoc->ipv6_addr_legal = 0;
1023 	}
1024 
1025 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1026 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1027 
1028 	asoc->smallest_mtu = m->sctp_frag_point;
1029 	asoc->minrto = m->sctp_ep.sctp_minrto;
1030 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1031 
1032 	asoc->locked_on_sending = NULL;
1033 	asoc->stream_locked_on = 0;
1034 	asoc->ecn_echo_cnt_onq = 0;
1035 	asoc->stream_locked = 0;
1036 
1037 	asoc->send_sack = 1;
1038 
1039 	LIST_INIT(&asoc->sctp_restricted_addrs);
1040 
1041 	TAILQ_INIT(&asoc->nets);
1042 	TAILQ_INIT(&asoc->pending_reply_queue);
1043 	TAILQ_INIT(&asoc->asconf_ack_sent);
1044 	/* Setup to fill the hb random cache at first HB */
1045 	asoc->hb_random_idx = 4;
1046 
1047 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1048 
1049 	stcb->asoc.congestion_control_module = m->sctp_ep.sctp_default_cc_module;
1050 	stcb->asoc.cc_functions = sctp_cc_functions[m->sctp_ep.sctp_default_cc_module];
1051 
1052 	stcb->asoc.stream_scheduling_module = m->sctp_ep.sctp_default_ss_module;
1053 	stcb->asoc.ss_functions = sctp_ss_functions[m->sctp_ep.sctp_default_ss_module];
1054 
1055 	/*
1056 	 * Now the stream parameters, here we allocate space for all streams
1057 	 * that we request by default.
1058 	 */
1059 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1060 	    m->sctp_ep.pre_open_stream_count;
1061 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1062 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1063 	    SCTP_M_STRMO);
1064 	if (asoc->strmout == NULL) {
1065 		/* big trouble no memory */
1066 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1067 		return (ENOMEM);
1068 	}
1069 	for (i = 0; i < asoc->streamoutcnt; i++) {
1070 		/*
1071 		 * inbound side must be set to 0xffff, also NOTE when we get
1072 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1073 		 * count (streamoutcnt) but first check if we sent to any of
1074 		 * the upper streams that were dropped (if some were). Those
1075 		 * that were dropped must be notified to the upper layer as
1076 		 * failed to send.
1077 		 */
1078 		asoc->strmout[i].next_sequence_sent = 0x0;
1079 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1080 		asoc->strmout[i].stream_no = i;
1081 		asoc->strmout[i].last_msg_incomplete = 0;
1082 		asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i]);
1083 	}
1084 	asoc->ss_functions.sctp_ss_init(stcb, asoc, 0);
1085 
1086 	/* Now the mapping array */
1087 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1088 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1089 	    SCTP_M_MAP);
1090 	if (asoc->mapping_array == NULL) {
1091 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1092 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1093 		return (ENOMEM);
1094 	}
1095 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1096 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
1097 	    SCTP_M_MAP);
1098 	if (asoc->nr_mapping_array == NULL) {
1099 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1100 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1101 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1102 		return (ENOMEM);
1103 	}
1104 	memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
1105 
1106 	/* Now the init of the other outqueues */
1107 	TAILQ_INIT(&asoc->free_chunks);
1108 	TAILQ_INIT(&asoc->control_send_queue);
1109 	TAILQ_INIT(&asoc->asconf_send_queue);
1110 	TAILQ_INIT(&asoc->send_queue);
1111 	TAILQ_INIT(&asoc->sent_queue);
1112 	TAILQ_INIT(&asoc->reasmqueue);
1113 	TAILQ_INIT(&asoc->resetHead);
1114 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1115 	TAILQ_INIT(&asoc->asconf_queue);
1116 	/* authentication fields */
1117 	asoc->authinfo.random = NULL;
1118 	asoc->authinfo.active_keyid = 0;
1119 	asoc->authinfo.assoc_key = NULL;
1120 	asoc->authinfo.assoc_keyid = 0;
1121 	asoc->authinfo.recv_key = NULL;
1122 	asoc->authinfo.recv_keyid = 0;
1123 	LIST_INIT(&asoc->shared_keys);
1124 	asoc->marked_retrans = 0;
1125 	asoc->timoinit = 0;
1126 	asoc->timodata = 0;
1127 	asoc->timosack = 0;
1128 	asoc->timoshutdown = 0;
1129 	asoc->timoheartbeat = 0;
1130 	asoc->timocookie = 0;
1131 	asoc->timoshutdownack = 0;
1132 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1133 	asoc->discontinuity_time = asoc->start_time;
1134 	/*
1135 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1136 	 * freed later when the association is freed.
1137 	 */
1138 	return (0);
1139 }
1140 
1141 void
1142 sctp_print_mapping_array(struct sctp_association *asoc)
1143 {
1144 	unsigned int i, limit;
1145 
1146 	printf("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n",
1147 	    asoc->mapping_array_size,
1148 	    asoc->mapping_array_base_tsn,
1149 	    asoc->cumulative_tsn,
1150 	    asoc->highest_tsn_inside_map,
1151 	    asoc->highest_tsn_inside_nr_map);
1152 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1153 		if (asoc->mapping_array[limit - 1]) {
1154 			break;
1155 		}
1156 	}
1157 	printf("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1158 	for (i = 0; i < limit; i++) {
1159 		printf("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1160 	}
1161 	if (limit % 16)
1162 		printf("\n");
1163 	for (limit = asoc->mapping_array_size; limit > 1; limit--) {
1164 		if (asoc->nr_mapping_array[limit - 1]) {
1165 			break;
1166 		}
1167 	}
1168 	printf("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit);
1169 	for (i = 0; i < limit; i++) {
1170 		printf("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n');
1171 	}
1172 	if (limit % 16)
1173 		printf("\n");
1174 }
1175 
1176 int
1177 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1178 {
1179 	/* mapping array needs to grow */
1180 	uint8_t *new_array1, *new_array2;
1181 	uint32_t new_size;
1182 
1183 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1184 	SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
1185 	SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
1186 	if ((new_array1 == NULL) || (new_array2 == NULL)) {
1187 		/* can't get more, forget it */
1188 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
1189 		if (new_array1) {
1190 			SCTP_FREE(new_array1, SCTP_M_MAP);
1191 		}
1192 		if (new_array2) {
1193 			SCTP_FREE(new_array2, SCTP_M_MAP);
1194 		}
1195 		return (-1);
1196 	}
1197 	memset(new_array1, 0, new_size);
1198 	memset(new_array2, 0, new_size);
1199 	memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
1200 	memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
1201 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1202 	SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1203 	asoc->mapping_array = new_array1;
1204 	asoc->nr_mapping_array = new_array2;
1205 	asoc->mapping_array_size = new_size;
1206 	return (0);
1207 }
1208 
1209 
1210 static void
1211 sctp_iterator_work(struct sctp_iterator *it)
1212 {
1213 	int iteration_count = 0;
1214 	int inp_skip = 0;
1215 	int first_in = 1;
1216 	struct sctp_inpcb *tinp;
1217 
1218 	SCTP_INP_INFO_RLOCK();
1219 	SCTP_ITERATOR_LOCK();
1220 	if (it->inp) {
1221 		SCTP_INP_RLOCK(it->inp);
1222 		SCTP_INP_DECR_REF(it->inp);
1223 	}
1224 	if (it->inp == NULL) {
1225 		/* iterator is complete */
1226 done_with_iterator:
1227 		SCTP_ITERATOR_UNLOCK();
1228 		SCTP_INP_INFO_RUNLOCK();
1229 		if (it->function_atend != NULL) {
1230 			(*it->function_atend) (it->pointer, it->val);
1231 		}
1232 		SCTP_FREE(it, SCTP_M_ITER);
1233 		return;
1234 	}
1235 select_a_new_ep:
1236 	if (first_in) {
1237 		first_in = 0;
1238 	} else {
1239 		SCTP_INP_RLOCK(it->inp);
1240 	}
1241 	while (((it->pcb_flags) &&
1242 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1243 	    ((it->pcb_features) &&
1244 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1245 		/* endpoint flags or features don't match, so keep looking */
1246 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1247 			SCTP_INP_RUNLOCK(it->inp);
1248 			goto done_with_iterator;
1249 		}
1250 		tinp = it->inp;
1251 		it->inp = LIST_NEXT(it->inp, sctp_list);
1252 		SCTP_INP_RUNLOCK(tinp);
1253 		if (it->inp == NULL) {
1254 			goto done_with_iterator;
1255 		}
1256 		SCTP_INP_RLOCK(it->inp);
1257 	}
1258 	/* now go through each assoc which is in the desired state */
1259 	if (it->done_current_ep == 0) {
1260 		if (it->function_inp != NULL)
1261 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1262 		it->done_current_ep = 1;
1263 	}
1264 	if (it->stcb == NULL) {
1265 		/* run the per instance function */
1266 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1267 	}
1268 	if ((inp_skip) || it->stcb == NULL) {
1269 		if (it->function_inp_end != NULL) {
1270 			inp_skip = (*it->function_inp_end) (it->inp,
1271 			    it->pointer,
1272 			    it->val);
1273 		}
1274 		SCTP_INP_RUNLOCK(it->inp);
1275 		goto no_stcb;
1276 	}
1277 	while (it->stcb) {
1278 		SCTP_TCB_LOCK(it->stcb);
1279 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1280 			/* not in the right state... keep looking */
1281 			SCTP_TCB_UNLOCK(it->stcb);
1282 			goto next_assoc;
1283 		}
1284 		/* see if we have limited out the iterator loop */
1285 		iteration_count++;
1286 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1287 			/* Pause to let others grab the lock */
1288 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1289 			SCTP_TCB_UNLOCK(it->stcb);
1290 			SCTP_INP_INCR_REF(it->inp);
1291 			SCTP_INP_RUNLOCK(it->inp);
1292 			SCTP_ITERATOR_UNLOCK();
1293 			SCTP_INP_INFO_RUNLOCK();
1294 			SCTP_INP_INFO_RLOCK();
1295 			SCTP_ITERATOR_LOCK();
1296 			if (sctp_it_ctl.iterator_flags) {
1297 				/* We won't be staying here */
1298 				SCTP_INP_DECR_REF(it->inp);
1299 				atomic_add_int(&it->stcb->asoc.refcnt, -1);
1300 				if (sctp_it_ctl.iterator_flags &
1301 				    SCTP_ITERATOR_MUST_EXIT) {
1302 					goto done_with_iterator;
1303 				}
1304 				if (sctp_it_ctl.iterator_flags &
1305 				    SCTP_ITERATOR_STOP_CUR_IT) {
1306 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT;
1307 					goto done_with_iterator;
1308 				}
1309 				if (sctp_it_ctl.iterator_flags &
1310 				    SCTP_ITERATOR_STOP_CUR_INP) {
1311 					sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP;
1312 					goto no_stcb;
1313 				}
1314 				/* If we reach here huh? */
1315 				printf("Unknown it ctl flag %x\n",
1316 				    sctp_it_ctl.iterator_flags);
1317 				sctp_it_ctl.iterator_flags = 0;
1318 			}
1319 			SCTP_INP_RLOCK(it->inp);
1320 			SCTP_INP_DECR_REF(it->inp);
1321 			SCTP_TCB_LOCK(it->stcb);
1322 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1323 			iteration_count = 0;
1324 		}
1325 		/* run function on this one */
1326 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1327 
1328 		/*
1329 		 * we lie here, it really needs to have its own type but
1330 		 * first I must verify that this won't effect things :-0
1331 		 */
1332 		if (it->no_chunk_output == 0)
1333 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1334 
1335 		SCTP_TCB_UNLOCK(it->stcb);
1336 next_assoc:
1337 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1338 		if (it->stcb == NULL) {
1339 			/* Run last function */
1340 			if (it->function_inp_end != NULL) {
1341 				inp_skip = (*it->function_inp_end) (it->inp,
1342 				    it->pointer,
1343 				    it->val);
1344 			}
1345 		}
1346 	}
1347 	SCTP_INP_RUNLOCK(it->inp);
1348 no_stcb:
1349 	/* done with all assocs on this endpoint, move on to next endpoint */
1350 	it->done_current_ep = 0;
1351 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1352 		it->inp = NULL;
1353 	} else {
1354 		it->inp = LIST_NEXT(it->inp, sctp_list);
1355 	}
1356 	if (it->inp == NULL) {
1357 		goto done_with_iterator;
1358 	}
1359 	goto select_a_new_ep;
1360 }
1361 
1362 void
1363 sctp_iterator_worker(void)
1364 {
1365 	struct sctp_iterator *it, *nit;
1366 
1367 	/* This function is called with the WQ lock in place */
1368 
1369 	sctp_it_ctl.iterator_running = 1;
1370 	TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) {
1371 		sctp_it_ctl.cur_it = it;
1372 		/* now lets work on this one */
1373 		TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr);
1374 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1375 		CURVNET_SET(it->vn);
1376 		sctp_iterator_work(it);
1377 
1378 		CURVNET_RESTORE();
1379 		SCTP_IPI_ITERATOR_WQ_LOCK();
1380 		if (sctp_it_ctl.iterator_flags & SCTP_ITERATOR_MUST_EXIT) {
1381 			sctp_it_ctl.cur_it = NULL;
1382 			break;
1383 		}
1384 		/* sa_ignore FREED_MEMORY */
1385 	}
1386 	sctp_it_ctl.iterator_running = 0;
1387 	return;
1388 }
1389 
1390 
1391 static void
1392 sctp_handle_addr_wq(void)
1393 {
1394 	/* deal with the ADDR wq from the rtsock calls */
1395 	struct sctp_laddr *wi, *nwi;
1396 	struct sctp_asconf_iterator *asc;
1397 
1398 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1399 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1400 	if (asc == NULL) {
1401 		/* Try later, no memory */
1402 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1403 		    (struct sctp_inpcb *)NULL,
1404 		    (struct sctp_tcb *)NULL,
1405 		    (struct sctp_nets *)NULL);
1406 		return;
1407 	}
1408 	LIST_INIT(&asc->list_of_work);
1409 	asc->cnt = 0;
1410 
1411 	SCTP_WQ_ADDR_LOCK();
1412 	LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) {
1413 		LIST_REMOVE(wi, sctp_nxt_addr);
1414 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1415 		asc->cnt++;
1416 	}
1417 	SCTP_WQ_ADDR_UNLOCK();
1418 
1419 	if (asc->cnt == 0) {
1420 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1421 	} else {
1422 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1423 		    sctp_asconf_iterator_stcb,
1424 		    NULL,	/* No ep end for boundall */
1425 		    SCTP_PCB_FLAGS_BOUNDALL,
1426 		    SCTP_PCB_ANY_FEATURES,
1427 		    SCTP_ASOC_ANY_STATE,
1428 		    (void *)asc, 0,
1429 		    sctp_asconf_iterator_end, NULL, 0);
1430 	}
1431 }
1432 
1433 int retcode = 0;
1434 int cur_oerr = 0;
1435 
1436 void
1437 sctp_timeout_handler(void *t)
1438 {
1439 	struct sctp_inpcb *inp;
1440 	struct sctp_tcb *stcb;
1441 	struct sctp_nets *net;
1442 	struct sctp_timer *tmr;
1443 
1444 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1445 	struct socket *so;
1446 
1447 #endif
1448 	int did_output, type;
1449 
1450 	tmr = (struct sctp_timer *)t;
1451 	inp = (struct sctp_inpcb *)tmr->ep;
1452 	stcb = (struct sctp_tcb *)tmr->tcb;
1453 	net = (struct sctp_nets *)tmr->net;
1454 	CURVNET_SET((struct vnet *)tmr->vnet);
1455 	did_output = 1;
1456 
1457 #ifdef SCTP_AUDITING_ENABLED
1458 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1459 	sctp_auditing(3, inp, stcb, net);
1460 #endif
1461 
1462 	/* sanity checks... */
1463 	if (tmr->self != (void *)tmr) {
1464 		/*
1465 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1466 		 * tmr);
1467 		 */
1468 		CURVNET_RESTORE();
1469 		return;
1470 	}
1471 	tmr->stopped_from = 0xa001;
1472 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1473 		/*
1474 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1475 		 * tmr->type);
1476 		 */
1477 		CURVNET_RESTORE();
1478 		return;
1479 	}
1480 	tmr->stopped_from = 0xa002;
1481 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1482 		CURVNET_RESTORE();
1483 		return;
1484 	}
1485 	/* if this is an iterator timeout, get the struct and clear inp */
1486 	tmr->stopped_from = 0xa003;
1487 	type = tmr->type;
1488 	if (inp) {
1489 		SCTP_INP_INCR_REF(inp);
1490 		if ((inp->sctp_socket == 0) &&
1491 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1492 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1493 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1494 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1495 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1496 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1497 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1498 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1499 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1500 		    ) {
1501 			SCTP_INP_DECR_REF(inp);
1502 			CURVNET_RESTORE();
1503 			return;
1504 		}
1505 	}
1506 	tmr->stopped_from = 0xa004;
1507 	if (stcb) {
1508 		atomic_add_int(&stcb->asoc.refcnt, 1);
1509 		if (stcb->asoc.state == 0) {
1510 			atomic_add_int(&stcb->asoc.refcnt, -1);
1511 			if (inp) {
1512 				SCTP_INP_DECR_REF(inp);
1513 			}
1514 			CURVNET_RESTORE();
1515 			return;
1516 		}
1517 	}
1518 	tmr->stopped_from = 0xa005;
1519 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1520 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1521 		if (inp) {
1522 			SCTP_INP_DECR_REF(inp);
1523 		}
1524 		if (stcb) {
1525 			atomic_add_int(&stcb->asoc.refcnt, -1);
1526 		}
1527 		CURVNET_RESTORE();
1528 		return;
1529 	}
1530 	tmr->stopped_from = 0xa006;
1531 
1532 	if (stcb) {
1533 		SCTP_TCB_LOCK(stcb);
1534 		atomic_add_int(&stcb->asoc.refcnt, -1);
1535 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1536 		    ((stcb->asoc.state == 0) ||
1537 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1538 			SCTP_TCB_UNLOCK(stcb);
1539 			if (inp) {
1540 				SCTP_INP_DECR_REF(inp);
1541 			}
1542 			CURVNET_RESTORE();
1543 			return;
1544 		}
1545 	}
1546 	/* record in stopped what t-o occured */
1547 	tmr->stopped_from = tmr->type;
1548 
1549 	/* mark as being serviced now */
1550 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1551 		/*
1552 		 * Callout has been rescheduled.
1553 		 */
1554 		goto get_out;
1555 	}
1556 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1557 		/*
1558 		 * Not active, so no action.
1559 		 */
1560 		goto get_out;
1561 	}
1562 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1563 
1564 	/* call the handler for the appropriate timer type */
1565 	switch (tmr->type) {
1566 	case SCTP_TIMER_TYPE_ZERO_COPY:
1567 		if (inp == NULL) {
1568 			break;
1569 		}
1570 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1571 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1572 		}
1573 		break;
1574 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1575 		if (inp == NULL) {
1576 			break;
1577 		}
1578 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1579 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1580 		}
1581 		break;
1582 	case SCTP_TIMER_TYPE_ADDR_WQ:
1583 		sctp_handle_addr_wq();
1584 		break;
1585 	case SCTP_TIMER_TYPE_SEND:
1586 		if ((stcb == NULL) || (inp == NULL)) {
1587 			break;
1588 		}
1589 		SCTP_STAT_INCR(sctps_timodata);
1590 		stcb->asoc.timodata++;
1591 		stcb->asoc.num_send_timers_up--;
1592 		if (stcb->asoc.num_send_timers_up < 0) {
1593 			stcb->asoc.num_send_timers_up = 0;
1594 		}
1595 		SCTP_TCB_LOCK_ASSERT(stcb);
1596 		cur_oerr = stcb->asoc.overall_error_count;
1597 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1598 		if (retcode) {
1599 			/* no need to unlock on tcb its gone */
1600 
1601 			goto out_decr;
1602 		}
1603 		SCTP_TCB_LOCK_ASSERT(stcb);
1604 #ifdef SCTP_AUDITING_ENABLED
1605 		sctp_auditing(4, inp, stcb, net);
1606 #endif
1607 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1608 		if ((stcb->asoc.num_send_timers_up == 0) &&
1609 		    (stcb->asoc.sent_queue_cnt > 0)) {
1610 			struct sctp_tmit_chunk *chk;
1611 
1612 			/*
1613 			 * safeguard. If there on some on the sent queue
1614 			 * somewhere but no timers running something is
1615 			 * wrong... so we start a timer on the first chunk
1616 			 * on the send queue on whatever net it is sent to.
1617 			 */
1618 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1619 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1620 			    chk->whoTo);
1621 		}
1622 		break;
1623 	case SCTP_TIMER_TYPE_INIT:
1624 		if ((stcb == NULL) || (inp == NULL)) {
1625 			break;
1626 		}
1627 		SCTP_STAT_INCR(sctps_timoinit);
1628 		stcb->asoc.timoinit++;
1629 		if (sctp_t1init_timer(inp, stcb, net)) {
1630 			/* no need to unlock on tcb its gone */
1631 			goto out_decr;
1632 		}
1633 		/* We do output but not here */
1634 		did_output = 0;
1635 		break;
1636 	case SCTP_TIMER_TYPE_RECV:
1637 		if ((stcb == NULL) || (inp == NULL)) {
1638 			break;
1639 		} {
1640 			SCTP_STAT_INCR(sctps_timosack);
1641 			stcb->asoc.timosack++;
1642 			sctp_send_sack(stcb);
1643 		}
1644 #ifdef SCTP_AUDITING_ENABLED
1645 		sctp_auditing(4, inp, stcb, net);
1646 #endif
1647 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1648 		break;
1649 	case SCTP_TIMER_TYPE_SHUTDOWN:
1650 		if ((stcb == NULL) || (inp == NULL)) {
1651 			break;
1652 		}
1653 		if (sctp_shutdown_timer(inp, stcb, net)) {
1654 			/* no need to unlock on tcb its gone */
1655 			goto out_decr;
1656 		}
1657 		SCTP_STAT_INCR(sctps_timoshutdown);
1658 		stcb->asoc.timoshutdown++;
1659 #ifdef SCTP_AUDITING_ENABLED
1660 		sctp_auditing(4, inp, stcb, net);
1661 #endif
1662 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1663 		break;
1664 	case SCTP_TIMER_TYPE_HEARTBEAT:
1665 		{
1666 			struct sctp_nets *lnet;
1667 			int cnt_of_unconf = 0;
1668 
1669 			if ((stcb == NULL) || (inp == NULL)) {
1670 				break;
1671 			}
1672 			SCTP_STAT_INCR(sctps_timoheartbeat);
1673 			stcb->asoc.timoheartbeat++;
1674 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1675 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1676 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1677 					cnt_of_unconf++;
1678 				}
1679 			}
1680 			if (cnt_of_unconf == 0) {
1681 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1682 				    cnt_of_unconf)) {
1683 					/* no need to unlock on tcb its gone */
1684 					goto out_decr;
1685 				}
1686 			}
1687 #ifdef SCTP_AUDITING_ENABLED
1688 			sctp_auditing(4, inp, stcb, lnet);
1689 #endif
1690 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1691 			    stcb->sctp_ep, stcb, lnet);
1692 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1693 		}
1694 		break;
1695 	case SCTP_TIMER_TYPE_COOKIE:
1696 		if ((stcb == NULL) || (inp == NULL)) {
1697 			break;
1698 		}
1699 		if (sctp_cookie_timer(inp, stcb, net)) {
1700 			/* no need to unlock on tcb its gone */
1701 			goto out_decr;
1702 		}
1703 		SCTP_STAT_INCR(sctps_timocookie);
1704 		stcb->asoc.timocookie++;
1705 #ifdef SCTP_AUDITING_ENABLED
1706 		sctp_auditing(4, inp, stcb, net);
1707 #endif
1708 		/*
1709 		 * We consider T3 and Cookie timer pretty much the same with
1710 		 * respect to where from in chunk_output.
1711 		 */
1712 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1713 		break;
1714 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1715 		{
1716 			struct timeval tv;
1717 			int i, secret;
1718 
1719 			if (inp == NULL) {
1720 				break;
1721 			}
1722 			SCTP_STAT_INCR(sctps_timosecret);
1723 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1724 			SCTP_INP_WLOCK(inp);
1725 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1726 			inp->sctp_ep.last_secret_number =
1727 			    inp->sctp_ep.current_secret_number;
1728 			inp->sctp_ep.current_secret_number++;
1729 			if (inp->sctp_ep.current_secret_number >=
1730 			    SCTP_HOW_MANY_SECRETS) {
1731 				inp->sctp_ep.current_secret_number = 0;
1732 			}
1733 			secret = (int)inp->sctp_ep.current_secret_number;
1734 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1735 				inp->sctp_ep.secret_key[secret][i] =
1736 				    sctp_select_initial_TSN(&inp->sctp_ep);
1737 			}
1738 			SCTP_INP_WUNLOCK(inp);
1739 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1740 		}
1741 		did_output = 0;
1742 		break;
1743 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1744 		if ((stcb == NULL) || (inp == NULL)) {
1745 			break;
1746 		}
1747 		SCTP_STAT_INCR(sctps_timopathmtu);
1748 		sctp_pathmtu_timer(inp, stcb, net);
1749 		did_output = 0;
1750 		break;
1751 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1752 		if ((stcb == NULL) || (inp == NULL)) {
1753 			break;
1754 		}
1755 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1756 			/* no need to unlock on tcb its gone */
1757 			goto out_decr;
1758 		}
1759 		SCTP_STAT_INCR(sctps_timoshutdownack);
1760 		stcb->asoc.timoshutdownack++;
1761 #ifdef SCTP_AUDITING_ENABLED
1762 		sctp_auditing(4, inp, stcb, net);
1763 #endif
1764 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1765 		break;
1766 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1767 		if ((stcb == NULL) || (inp == NULL)) {
1768 			break;
1769 		}
1770 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1771 		sctp_abort_an_association(inp, stcb,
1772 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1773 		/* no need to unlock on tcb its gone */
1774 		goto out_decr;
1775 
1776 	case SCTP_TIMER_TYPE_STRRESET:
1777 		if ((stcb == NULL) || (inp == NULL)) {
1778 			break;
1779 		}
1780 		if (sctp_strreset_timer(inp, stcb, net)) {
1781 			/* no need to unlock on tcb its gone */
1782 			goto out_decr;
1783 		}
1784 		SCTP_STAT_INCR(sctps_timostrmrst);
1785 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1786 		break;
1787 	case SCTP_TIMER_TYPE_EARLYFR:
1788 		/* Need to do FR of things for net */
1789 		if ((stcb == NULL) || (inp == NULL)) {
1790 			break;
1791 		}
1792 		SCTP_STAT_INCR(sctps_timoearlyfr);
1793 		sctp_early_fr_timer(inp, stcb, net);
1794 		break;
1795 	case SCTP_TIMER_TYPE_ASCONF:
1796 		if ((stcb == NULL) || (inp == NULL)) {
1797 			break;
1798 		}
1799 		if (sctp_asconf_timer(inp, stcb, net)) {
1800 			/* no need to unlock on tcb its gone */
1801 			goto out_decr;
1802 		}
1803 		SCTP_STAT_INCR(sctps_timoasconf);
1804 #ifdef SCTP_AUDITING_ENABLED
1805 		sctp_auditing(4, inp, stcb, net);
1806 #endif
1807 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1808 		break;
1809 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1810 		if ((stcb == NULL) || (inp == NULL)) {
1811 			break;
1812 		}
1813 		sctp_delete_prim_timer(inp, stcb, net);
1814 		SCTP_STAT_INCR(sctps_timodelprim);
1815 		break;
1816 
1817 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1818 		if ((stcb == NULL) || (inp == NULL)) {
1819 			break;
1820 		}
1821 		SCTP_STAT_INCR(sctps_timoautoclose);
1822 		sctp_autoclose_timer(inp, stcb, net);
1823 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1824 		did_output = 0;
1825 		break;
1826 	case SCTP_TIMER_TYPE_ASOCKILL:
1827 		if ((stcb == NULL) || (inp == NULL)) {
1828 			break;
1829 		}
1830 		SCTP_STAT_INCR(sctps_timoassockill);
1831 		/* Can we free it yet? */
1832 		SCTP_INP_DECR_REF(inp);
1833 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1834 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1835 		so = SCTP_INP_SO(inp);
1836 		atomic_add_int(&stcb->asoc.refcnt, 1);
1837 		SCTP_TCB_UNLOCK(stcb);
1838 		SCTP_SOCKET_LOCK(so, 1);
1839 		SCTP_TCB_LOCK(stcb);
1840 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1841 #endif
1842 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1843 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1844 		SCTP_SOCKET_UNLOCK(so, 1);
1845 #endif
1846 		/*
1847 		 * free asoc, always unlocks (or destroy's) so prevent
1848 		 * duplicate unlock or unlock of a free mtx :-0
1849 		 */
1850 		stcb = NULL;
1851 		goto out_no_decr;
1852 	case SCTP_TIMER_TYPE_INPKILL:
1853 		SCTP_STAT_INCR(sctps_timoinpkill);
1854 		if (inp == NULL) {
1855 			break;
1856 		}
1857 		/*
1858 		 * special case, take away our increment since WE are the
1859 		 * killer
1860 		 */
1861 		SCTP_INP_DECR_REF(inp);
1862 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1863 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1864 		    SCTP_CALLED_FROM_INPKILL_TIMER);
1865 		inp = NULL;
1866 		goto out_no_decr;
1867 	default:
1868 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1869 		    tmr->type);
1870 		break;
1871 	};
1872 #ifdef SCTP_AUDITING_ENABLED
1873 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1874 	if (inp)
1875 		sctp_auditing(5, inp, stcb, net);
1876 #endif
1877 	if ((did_output) && stcb) {
1878 		/*
1879 		 * Now we need to clean up the control chunk chain if an
1880 		 * ECNE is on it. It must be marked as UNSENT again so next
1881 		 * call will continue to send it until such time that we get
1882 		 * a CWR, to remove it. It is, however, less likely that we
1883 		 * will find a ecn echo on the chain though.
1884 		 */
1885 		sctp_fix_ecn_echo(&stcb->asoc);
1886 	}
1887 get_out:
1888 	if (stcb) {
1889 		SCTP_TCB_UNLOCK(stcb);
1890 	}
1891 out_decr:
1892 	if (inp) {
1893 		SCTP_INP_DECR_REF(inp);
1894 	}
1895 out_no_decr:
1896 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1897 	    type);
1898 	CURVNET_RESTORE();
1899 }
1900 
1901 void
1902 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1903     struct sctp_nets *net)
1904 {
1905 	int to_ticks;
1906 	struct sctp_timer *tmr;
1907 
1908 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1909 		return;
1910 
1911 	to_ticks = 0;
1912 
1913 	tmr = NULL;
1914 	if (stcb) {
1915 		SCTP_TCB_LOCK_ASSERT(stcb);
1916 	}
1917 	switch (t_type) {
1918 	case SCTP_TIMER_TYPE_ZERO_COPY:
1919 		tmr = &inp->sctp_ep.zero_copy_timer;
1920 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1921 		break;
1922 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1923 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1924 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1925 		break;
1926 	case SCTP_TIMER_TYPE_ADDR_WQ:
1927 		/* Only 1 tick away :-) */
1928 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1929 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1930 		break;
1931 	case SCTP_TIMER_TYPE_SEND:
1932 		/* Here we use the RTO timer */
1933 		{
1934 			int rto_val;
1935 
1936 			if ((stcb == NULL) || (net == NULL)) {
1937 				return;
1938 			}
1939 			tmr = &net->rxt_timer;
1940 			if (net->RTO == 0) {
1941 				rto_val = stcb->asoc.initial_rto;
1942 			} else {
1943 				rto_val = net->RTO;
1944 			}
1945 			to_ticks = MSEC_TO_TICKS(rto_val);
1946 		}
1947 		break;
1948 	case SCTP_TIMER_TYPE_INIT:
1949 		/*
1950 		 * Here we use the INIT timer default usually about 1
1951 		 * minute.
1952 		 */
1953 		if ((stcb == NULL) || (net == NULL)) {
1954 			return;
1955 		}
1956 		tmr = &net->rxt_timer;
1957 		if (net->RTO == 0) {
1958 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1959 		} else {
1960 			to_ticks = MSEC_TO_TICKS(net->RTO);
1961 		}
1962 		break;
1963 	case SCTP_TIMER_TYPE_RECV:
1964 		/*
1965 		 * Here we use the Delayed-Ack timer value from the inp
1966 		 * ususually about 200ms.
1967 		 */
1968 		if (stcb == NULL) {
1969 			return;
1970 		}
1971 		tmr = &stcb->asoc.dack_timer;
1972 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1973 		break;
1974 	case SCTP_TIMER_TYPE_SHUTDOWN:
1975 		/* Here we use the RTO of the destination. */
1976 		if ((stcb == NULL) || (net == NULL)) {
1977 			return;
1978 		}
1979 		if (net->RTO == 0) {
1980 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1981 		} else {
1982 			to_ticks = MSEC_TO_TICKS(net->RTO);
1983 		}
1984 		tmr = &net->rxt_timer;
1985 		break;
1986 	case SCTP_TIMER_TYPE_HEARTBEAT:
1987 		/*
1988 		 * the net is used here so that we can add in the RTO. Even
1989 		 * though we use a different timer. We also add the HB timer
1990 		 * PLUS a random jitter.
1991 		 */
1992 		if ((inp == NULL) || (stcb == NULL)) {
1993 			return;
1994 		} else {
1995 			uint32_t rndval;
1996 			uint8_t this_random;
1997 			int cnt_of_unconf = 0;
1998 			struct sctp_nets *lnet;
1999 
2000 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2001 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2002 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2003 					cnt_of_unconf++;
2004 				}
2005 			}
2006 			if (cnt_of_unconf) {
2007 				net = lnet = NULL;
2008 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2009 			}
2010 			if (stcb->asoc.hb_random_idx > 3) {
2011 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2012 				memcpy(stcb->asoc.hb_random_values, &rndval,
2013 				    sizeof(stcb->asoc.hb_random_values));
2014 				stcb->asoc.hb_random_idx = 0;
2015 			}
2016 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2017 			stcb->asoc.hb_random_idx++;
2018 			stcb->asoc.hb_ect_randombit = 0;
2019 			/*
2020 			 * this_random will be 0 - 256 ms RTO is in ms.
2021 			 */
2022 			if ((stcb->asoc.hb_is_disabled) &&
2023 			    (cnt_of_unconf == 0)) {
2024 				return;
2025 			}
2026 			if (net) {
2027 				int delay;
2028 
2029 				delay = stcb->asoc.heart_beat_delay;
2030 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2031 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2032 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2033 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2034 						delay = 0;
2035 					}
2036 				}
2037 				if (net->RTO == 0) {
2038 					/* Never been checked */
2039 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2040 				} else {
2041 					/* set rto_val to the ms */
2042 					to_ticks = delay + net->RTO + this_random;
2043 				}
2044 			} else {
2045 				if (cnt_of_unconf) {
2046 					to_ticks = this_random + stcb->asoc.initial_rto;
2047 				} else {
2048 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2049 				}
2050 			}
2051 			/*
2052 			 * Now we must convert the to_ticks that are now in
2053 			 * ms to ticks.
2054 			 */
2055 			to_ticks = MSEC_TO_TICKS(to_ticks);
2056 			tmr = &stcb->asoc.hb_timer;
2057 		}
2058 		break;
2059 	case SCTP_TIMER_TYPE_COOKIE:
2060 		/*
2061 		 * Here we can use the RTO timer from the network since one
2062 		 * RTT was compelete. If a retran happened then we will be
2063 		 * using the RTO initial value.
2064 		 */
2065 		if ((stcb == NULL) || (net == NULL)) {
2066 			return;
2067 		}
2068 		if (net->RTO == 0) {
2069 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2070 		} else {
2071 			to_ticks = MSEC_TO_TICKS(net->RTO);
2072 		}
2073 		tmr = &net->rxt_timer;
2074 		break;
2075 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2076 		/*
2077 		 * nothing needed but the endpoint here ususually about 60
2078 		 * minutes.
2079 		 */
2080 		if (inp == NULL) {
2081 			return;
2082 		}
2083 		tmr = &inp->sctp_ep.signature_change;
2084 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2085 		break;
2086 	case SCTP_TIMER_TYPE_ASOCKILL:
2087 		if (stcb == NULL) {
2088 			return;
2089 		}
2090 		tmr = &stcb->asoc.strreset_timer;
2091 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2092 		break;
2093 	case SCTP_TIMER_TYPE_INPKILL:
2094 		/*
2095 		 * The inp is setup to die. We re-use the signature_chage
2096 		 * timer since that has stopped and we are in the GONE
2097 		 * state.
2098 		 */
2099 		if (inp == NULL) {
2100 			return;
2101 		}
2102 		tmr = &inp->sctp_ep.signature_change;
2103 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2104 		break;
2105 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2106 		/*
2107 		 * Here we use the value found in the EP for PMTU ususually
2108 		 * about 10 minutes.
2109 		 */
2110 		if ((stcb == NULL) || (inp == NULL)) {
2111 			return;
2112 		}
2113 		if (net == NULL) {
2114 			return;
2115 		}
2116 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2117 		tmr = &net->pmtu_timer;
2118 		break;
2119 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2120 		/* Here we use the RTO of the destination */
2121 		if ((stcb == NULL) || (net == NULL)) {
2122 			return;
2123 		}
2124 		if (net->RTO == 0) {
2125 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2126 		} else {
2127 			to_ticks = MSEC_TO_TICKS(net->RTO);
2128 		}
2129 		tmr = &net->rxt_timer;
2130 		break;
2131 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2132 		/*
2133 		 * Here we use the endpoints shutdown guard timer usually
2134 		 * about 3 minutes.
2135 		 */
2136 		if ((inp == NULL) || (stcb == NULL)) {
2137 			return;
2138 		}
2139 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2140 		tmr = &stcb->asoc.shut_guard_timer;
2141 		break;
2142 	case SCTP_TIMER_TYPE_STRRESET:
2143 		/*
2144 		 * Here the timer comes from the stcb but its value is from
2145 		 * the net's RTO.
2146 		 */
2147 		if ((stcb == NULL) || (net == NULL)) {
2148 			return;
2149 		}
2150 		if (net->RTO == 0) {
2151 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2152 		} else {
2153 			to_ticks = MSEC_TO_TICKS(net->RTO);
2154 		}
2155 		tmr = &stcb->asoc.strreset_timer;
2156 		break;
2157 
2158 	case SCTP_TIMER_TYPE_EARLYFR:
2159 		{
2160 			unsigned int msec;
2161 
2162 			if ((stcb == NULL) || (net == NULL)) {
2163 				return;
2164 			}
2165 			if (net->flight_size > net->cwnd) {
2166 				/* no need to start */
2167 				return;
2168 			}
2169 			SCTP_STAT_INCR(sctps_earlyfrstart);
2170 			if (net->lastsa == 0) {
2171 				/* Hmm no rtt estimate yet? */
2172 				msec = stcb->asoc.initial_rto >> 2;
2173 			} else {
2174 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2175 			}
2176 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2177 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2178 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2179 					msec = SCTP_MINFR_MSEC_FLOOR;
2180 				}
2181 			}
2182 			to_ticks = MSEC_TO_TICKS(msec);
2183 			tmr = &net->fr_timer;
2184 		}
2185 		break;
2186 	case SCTP_TIMER_TYPE_ASCONF:
2187 		/*
2188 		 * Here the timer comes from the stcb but its value is from
2189 		 * the net's RTO.
2190 		 */
2191 		if ((stcb == NULL) || (net == NULL)) {
2192 			return;
2193 		}
2194 		if (net->RTO == 0) {
2195 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2196 		} else {
2197 			to_ticks = MSEC_TO_TICKS(net->RTO);
2198 		}
2199 		tmr = &stcb->asoc.asconf_timer;
2200 		break;
2201 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2202 		if ((stcb == NULL) || (net != NULL)) {
2203 			return;
2204 		}
2205 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2206 		tmr = &stcb->asoc.delete_prim_timer;
2207 		break;
2208 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2209 		if (stcb == NULL) {
2210 			return;
2211 		}
2212 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2213 			/*
2214 			 * Really an error since stcb is NOT set to
2215 			 * autoclose
2216 			 */
2217 			return;
2218 		}
2219 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2220 		tmr = &stcb->asoc.autoclose_timer;
2221 		break;
2222 	default:
2223 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2224 		    __FUNCTION__, t_type);
2225 		return;
2226 		break;
2227 	};
2228 	if ((to_ticks <= 0) || (tmr == NULL)) {
2229 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2230 		    __FUNCTION__, t_type, to_ticks, tmr);
2231 		return;
2232 	}
2233 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2234 		/*
2235 		 * we do NOT allow you to have it already running. if it is
2236 		 * we leave the current one up unchanged
2237 		 */
2238 		return;
2239 	}
2240 	/* At this point we can proceed */
2241 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2242 		stcb->asoc.num_send_timers_up++;
2243 	}
2244 	tmr->stopped_from = 0;
2245 	tmr->type = t_type;
2246 	tmr->ep = (void *)inp;
2247 	tmr->tcb = (void *)stcb;
2248 	tmr->net = (void *)net;
2249 	tmr->self = (void *)tmr;
2250 	tmr->vnet = (void *)curvnet;
2251 	tmr->ticks = sctp_get_tick_count();
2252 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2253 	return;
2254 }
2255 
2256 void
2257 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2258     struct sctp_nets *net, uint32_t from)
2259 {
2260 	struct sctp_timer *tmr;
2261 
2262 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2263 	    (inp == NULL))
2264 		return;
2265 
2266 	tmr = NULL;
2267 	if (stcb) {
2268 		SCTP_TCB_LOCK_ASSERT(stcb);
2269 	}
2270 	switch (t_type) {
2271 	case SCTP_TIMER_TYPE_ZERO_COPY:
2272 		tmr = &inp->sctp_ep.zero_copy_timer;
2273 		break;
2274 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2275 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2276 		break;
2277 	case SCTP_TIMER_TYPE_ADDR_WQ:
2278 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2279 		break;
2280 	case SCTP_TIMER_TYPE_EARLYFR:
2281 		if ((stcb == NULL) || (net == NULL)) {
2282 			return;
2283 		}
2284 		tmr = &net->fr_timer;
2285 		SCTP_STAT_INCR(sctps_earlyfrstop);
2286 		break;
2287 	case SCTP_TIMER_TYPE_SEND:
2288 		if ((stcb == NULL) || (net == NULL)) {
2289 			return;
2290 		}
2291 		tmr = &net->rxt_timer;
2292 		break;
2293 	case SCTP_TIMER_TYPE_INIT:
2294 		if ((stcb == NULL) || (net == NULL)) {
2295 			return;
2296 		}
2297 		tmr = &net->rxt_timer;
2298 		break;
2299 	case SCTP_TIMER_TYPE_RECV:
2300 		if (stcb == NULL) {
2301 			return;
2302 		}
2303 		tmr = &stcb->asoc.dack_timer;
2304 		break;
2305 	case SCTP_TIMER_TYPE_SHUTDOWN:
2306 		if ((stcb == NULL) || (net == NULL)) {
2307 			return;
2308 		}
2309 		tmr = &net->rxt_timer;
2310 		break;
2311 	case SCTP_TIMER_TYPE_HEARTBEAT:
2312 		if (stcb == NULL) {
2313 			return;
2314 		}
2315 		tmr = &stcb->asoc.hb_timer;
2316 		break;
2317 	case SCTP_TIMER_TYPE_COOKIE:
2318 		if ((stcb == NULL) || (net == NULL)) {
2319 			return;
2320 		}
2321 		tmr = &net->rxt_timer;
2322 		break;
2323 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2324 		/* nothing needed but the endpoint here */
2325 		tmr = &inp->sctp_ep.signature_change;
2326 		/*
2327 		 * We re-use the newcookie timer for the INP kill timer. We
2328 		 * must assure that we do not kill it by accident.
2329 		 */
2330 		break;
2331 	case SCTP_TIMER_TYPE_ASOCKILL:
2332 		/*
2333 		 * Stop the asoc kill timer.
2334 		 */
2335 		if (stcb == NULL) {
2336 			return;
2337 		}
2338 		tmr = &stcb->asoc.strreset_timer;
2339 		break;
2340 
2341 	case SCTP_TIMER_TYPE_INPKILL:
2342 		/*
2343 		 * The inp is setup to die. We re-use the signature_chage
2344 		 * timer since that has stopped and we are in the GONE
2345 		 * state.
2346 		 */
2347 		tmr = &inp->sctp_ep.signature_change;
2348 		break;
2349 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2350 		if ((stcb == NULL) || (net == NULL)) {
2351 			return;
2352 		}
2353 		tmr = &net->pmtu_timer;
2354 		break;
2355 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2356 		if ((stcb == NULL) || (net == NULL)) {
2357 			return;
2358 		}
2359 		tmr = &net->rxt_timer;
2360 		break;
2361 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2362 		if (stcb == NULL) {
2363 			return;
2364 		}
2365 		tmr = &stcb->asoc.shut_guard_timer;
2366 		break;
2367 	case SCTP_TIMER_TYPE_STRRESET:
2368 		if (stcb == NULL) {
2369 			return;
2370 		}
2371 		tmr = &stcb->asoc.strreset_timer;
2372 		break;
2373 	case SCTP_TIMER_TYPE_ASCONF:
2374 		if (stcb == NULL) {
2375 			return;
2376 		}
2377 		tmr = &stcb->asoc.asconf_timer;
2378 		break;
2379 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2380 		if (stcb == NULL) {
2381 			return;
2382 		}
2383 		tmr = &stcb->asoc.delete_prim_timer;
2384 		break;
2385 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2386 		if (stcb == NULL) {
2387 			return;
2388 		}
2389 		tmr = &stcb->asoc.autoclose_timer;
2390 		break;
2391 	default:
2392 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2393 		    __FUNCTION__, t_type);
2394 		break;
2395 	};
2396 	if (tmr == NULL) {
2397 		return;
2398 	}
2399 	if ((tmr->type != t_type) && tmr->type) {
2400 		/*
2401 		 * Ok we have a timer that is under joint use. Cookie timer
2402 		 * per chance with the SEND timer. We therefore are NOT
2403 		 * running the timer that the caller wants stopped.  So just
2404 		 * return.
2405 		 */
2406 		return;
2407 	}
2408 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2409 		stcb->asoc.num_send_timers_up--;
2410 		if (stcb->asoc.num_send_timers_up < 0) {
2411 			stcb->asoc.num_send_timers_up = 0;
2412 		}
2413 	}
2414 	tmr->self = NULL;
2415 	tmr->stopped_from = from;
2416 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2417 	return;
2418 }
2419 
2420 uint32_t
2421 sctp_calculate_len(struct mbuf *m)
2422 {
2423 	uint32_t tlen = 0;
2424 	struct mbuf *at;
2425 
2426 	at = m;
2427 	while (at) {
2428 		tlen += SCTP_BUF_LEN(at);
2429 		at = SCTP_BUF_NEXT(at);
2430 	}
2431 	return (tlen);
2432 }
2433 
2434 void
2435 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2436     struct sctp_association *asoc, uint32_t mtu)
2437 {
2438 	/*
2439 	 * Reset the P-MTU size on this association, this involves changing
2440 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2441 	 * allow the DF flag to be cleared.
2442 	 */
2443 	struct sctp_tmit_chunk *chk;
2444 	unsigned int eff_mtu, ovh;
2445 
2446 	asoc->smallest_mtu = mtu;
2447 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2448 		ovh = SCTP_MIN_OVERHEAD;
2449 	} else {
2450 		ovh = SCTP_MIN_V4_OVERHEAD;
2451 	}
2452 	eff_mtu = mtu - ovh;
2453 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2454 		if (chk->send_size > eff_mtu) {
2455 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2456 		}
2457 	}
2458 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2459 		if (chk->send_size > eff_mtu) {
2460 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2461 		}
2462 	}
2463 }
2464 
2465 
2466 /*
2467  * given an association and starting time of the current RTT period return
2468  * RTO in number of msecs net should point to the current network
2469  */
2470 uint32_t
2471 sctp_calculate_rto(struct sctp_tcb *stcb,
2472     struct sctp_association *asoc,
2473     struct sctp_nets *net,
2474     struct timeval *told,
2475     int safe)
2476 {
2477 	/*-
2478 	 * given an association and the starting time of the current RTT
2479 	 * period (in value1/value2) return RTO in number of msecs.
2480 	 */
2481 	int calc_time = 0;
2482 	int o_calctime;
2483 	uint32_t new_rto = 0;
2484 	int first_measure = 0;
2485 	struct timeval now, then, *old;
2486 
2487 	/* Copy it out for sparc64 */
2488 	if (safe == sctp_align_unsafe_makecopy) {
2489 		old = &then;
2490 		memcpy(&then, told, sizeof(struct timeval));
2491 	} else if (safe == sctp_align_safe_nocopy) {
2492 		old = told;
2493 	} else {
2494 		/* error */
2495 		SCTP_PRINTF("Huh, bad rto calc call\n");
2496 		return (0);
2497 	}
2498 	/************************/
2499 	/* 1. calculate new RTT */
2500 	/************************/
2501 	/* get the current time */
2502 	(void)SCTP_GETTIME_TIMEVAL(&now);
2503 	/* compute the RTT value */
2504 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2505 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2506 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2507 			calc_time += (((u_long)now.tv_usec -
2508 			    (u_long)old->tv_usec) / 1000);
2509 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2510 			/* Borrow 1,000ms from current calculation */
2511 			calc_time -= 1000;
2512 			/* Add in the slop over */
2513 			calc_time += ((int)now.tv_usec / 1000);
2514 			/* Add in the pre-second ms's */
2515 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2516 		}
2517 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2518 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2519 			calc_time = ((u_long)now.tv_usec -
2520 			    (u_long)old->tv_usec) / 1000;
2521 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2522 			/* impossible .. garbage in nothing out */
2523 			goto calc_rto;
2524 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2525 			/*
2526 			 * We have to have 1 usec :-D this must be the
2527 			 * loopback.
2528 			 */
2529 			calc_time = 1;
2530 		} else {
2531 			/* impossible .. garbage in nothing out */
2532 			goto calc_rto;
2533 		}
2534 	} else {
2535 		/* Clock wrapped? */
2536 		goto calc_rto;
2537 	}
2538 	/***************************/
2539 	/* 2. update RTTVAR & SRTT */
2540 	/***************************/
2541 	net->rtt = o_calctime = calc_time;
2542 	/* this is Van Jacobson's integer version */
2543 	if (net->RTO_measured) {
2544 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2545 								 * shift=3 */
2546 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2547 			rto_logging(net, SCTP_LOG_RTTVAR);
2548 		}
2549 		net->prev_rtt = o_calctime;
2550 		net->lastsa += calc_time;	/* add 7/8th into sa when
2551 						 * shift=3 */
2552 		if (calc_time < 0) {
2553 			calc_time = -calc_time;
2554 		}
2555 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2556 									 * VAR shift=2 */
2557 		net->lastsv += calc_time;
2558 		if (net->lastsv == 0) {
2559 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2560 		}
2561 	} else {
2562 		/* First RTO measurment */
2563 		net->RTO_measured = 1;
2564 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2565 								 * shift=3 */
2566 		net->lastsv = calc_time;
2567 		if (net->lastsv == 0) {
2568 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2569 		}
2570 		first_measure = 1;
2571 		net->prev_rtt = o_calctime;
2572 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2573 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2574 		}
2575 	}
2576 calc_rto:
2577 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2578 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2579 	    (stcb->asoc.sat_network_lockout == 0)) {
2580 		stcb->asoc.sat_network = 1;
2581 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2582 		stcb->asoc.sat_network = 0;
2583 		stcb->asoc.sat_network_lockout = 1;
2584 	}
2585 	/* bound it, per C6/C7 in Section 5.3.1 */
2586 	if (new_rto < stcb->asoc.minrto) {
2587 		new_rto = stcb->asoc.minrto;
2588 	}
2589 	if (new_rto > stcb->asoc.maxrto) {
2590 		new_rto = stcb->asoc.maxrto;
2591 	}
2592 	/* we are now returning the RTO */
2593 	return (new_rto);
2594 }
2595 
2596 /*
2597  * return a pointer to a contiguous piece of data from the given mbuf chain
2598  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2599  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2600  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2601  */
2602 caddr_t
2603 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2604 {
2605 	uint32_t count;
2606 	uint8_t *ptr;
2607 
2608 	ptr = in_ptr;
2609 	if ((off < 0) || (len <= 0))
2610 		return (NULL);
2611 
2612 	/* find the desired start location */
2613 	while ((m != NULL) && (off > 0)) {
2614 		if (off < SCTP_BUF_LEN(m))
2615 			break;
2616 		off -= SCTP_BUF_LEN(m);
2617 		m = SCTP_BUF_NEXT(m);
2618 	}
2619 	if (m == NULL)
2620 		return (NULL);
2621 
2622 	/* is the current mbuf large enough (eg. contiguous)? */
2623 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2624 		return (mtod(m, caddr_t)+off);
2625 	} else {
2626 		/* else, it spans more than one mbuf, so save a temp copy... */
2627 		while ((m != NULL) && (len > 0)) {
2628 			count = min(SCTP_BUF_LEN(m) - off, len);
2629 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2630 			len -= count;
2631 			ptr += count;
2632 			off = 0;
2633 			m = SCTP_BUF_NEXT(m);
2634 		}
2635 		if ((m == NULL) && (len > 0))
2636 			return (NULL);
2637 		else
2638 			return ((caddr_t)in_ptr);
2639 	}
2640 }
2641 
2642 
2643 
2644 struct sctp_paramhdr *
2645 sctp_get_next_param(struct mbuf *m,
2646     int offset,
2647     struct sctp_paramhdr *pull,
2648     int pull_limit)
2649 {
2650 	/* This just provides a typed signature to Peter's Pull routine */
2651 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2652 	    (uint8_t *) pull));
2653 }
2654 
2655 
2656 int
2657 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2658 {
2659 	/*
2660 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2661 	 * padlen is > 3 this routine will fail.
2662 	 */
2663 	uint8_t *dp;
2664 	int i;
2665 
2666 	if (padlen > 3) {
2667 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2668 		return (ENOBUFS);
2669 	}
2670 	if (padlen <= M_TRAILINGSPACE(m)) {
2671 		/*
2672 		 * The easy way. We hope the majority of the time we hit
2673 		 * here :)
2674 		 */
2675 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2676 		SCTP_BUF_LEN(m) += padlen;
2677 	} else {
2678 		/* Hard way we must grow the mbuf */
2679 		struct mbuf *tmp;
2680 
2681 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2682 		if (tmp == NULL) {
2683 			/* Out of space GAK! we are in big trouble. */
2684 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2685 			return (ENOSPC);
2686 		}
2687 		/* setup and insert in middle */
2688 		SCTP_BUF_LEN(tmp) = padlen;
2689 		SCTP_BUF_NEXT(tmp) = NULL;
2690 		SCTP_BUF_NEXT(m) = tmp;
2691 		dp = mtod(tmp, uint8_t *);
2692 	}
2693 	/* zero out the pad */
2694 	for (i = 0; i < padlen; i++) {
2695 		*dp = 0;
2696 		dp++;
2697 	}
2698 	return (0);
2699 }
2700 
2701 int
2702 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2703 {
2704 	/* find the last mbuf in chain and pad it */
2705 	struct mbuf *m_at;
2706 
2707 	m_at = m;
2708 	if (last_mbuf) {
2709 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2710 	} else {
2711 		while (m_at) {
2712 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2713 				return (sctp_add_pad_tombuf(m_at, padval));
2714 			}
2715 			m_at = SCTP_BUF_NEXT(m_at);
2716 		}
2717 	}
2718 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2719 	return (EFAULT);
2720 }
2721 
2722 static void
2723 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2724     uint32_t error, void *data, int so_locked
2725 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2726     SCTP_UNUSED
2727 #endif
2728 )
2729 {
2730 	struct mbuf *m_notify;
2731 	struct sctp_assoc_change *sac;
2732 	struct sctp_queued_to_read *control;
2733 
2734 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2735 	struct socket *so;
2736 
2737 #endif
2738 
2739 	/*
2740 	 * For TCP model AND UDP connected sockets we will send an error up
2741 	 * when an ABORT comes in.
2742 	 */
2743 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2744 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2745 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2746 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2747 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2748 			stcb->sctp_socket->so_error = ECONNREFUSED;
2749 		} else {
2750 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2751 			stcb->sctp_socket->so_error = ECONNRESET;
2752 		}
2753 		/* Wake ANY sleepers */
2754 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2755 		so = SCTP_INP_SO(stcb->sctp_ep);
2756 		if (!so_locked) {
2757 			atomic_add_int(&stcb->asoc.refcnt, 1);
2758 			SCTP_TCB_UNLOCK(stcb);
2759 			SCTP_SOCKET_LOCK(so, 1);
2760 			SCTP_TCB_LOCK(stcb);
2761 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2762 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2763 				SCTP_SOCKET_UNLOCK(so, 1);
2764 				return;
2765 			}
2766 		}
2767 #endif
2768 		socantrcvmore(stcb->sctp_socket);
2769 		sorwakeup(stcb->sctp_socket);
2770 		sowwakeup(stcb->sctp_socket);
2771 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2772 		if (!so_locked) {
2773 			SCTP_SOCKET_UNLOCK(so, 1);
2774 		}
2775 #endif
2776 	}
2777 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2778 		/* event not enabled */
2779 		return;
2780 	}
2781 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2782 	if (m_notify == NULL)
2783 		/* no space left */
2784 		return;
2785 	SCTP_BUF_LEN(m_notify) = 0;
2786 
2787 	sac = mtod(m_notify, struct sctp_assoc_change *);
2788 	sac->sac_type = SCTP_ASSOC_CHANGE;
2789 	sac->sac_flags = 0;
2790 	sac->sac_length = sizeof(struct sctp_assoc_change);
2791 	sac->sac_state = event;
2792 	sac->sac_error = error;
2793 	/* XXX verify these stream counts */
2794 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2795 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2796 	sac->sac_assoc_id = sctp_get_associd(stcb);
2797 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2798 	SCTP_BUF_NEXT(m_notify) = NULL;
2799 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2800 	    0, 0, 0, 0, 0, 0,
2801 	    m_notify);
2802 	if (control == NULL) {
2803 		/* no memory */
2804 		sctp_m_freem(m_notify);
2805 		return;
2806 	}
2807 	control->length = SCTP_BUF_LEN(m_notify);
2808 	/* not that we need this */
2809 	control->tail_mbuf = m_notify;
2810 	control->spec_flags = M_NOTIFICATION;
2811 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2812 	    control,
2813 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2814 	    so_locked);
2815 	if (event == SCTP_COMM_LOST) {
2816 		/* Wake up any sleeper */
2817 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2818 		so = SCTP_INP_SO(stcb->sctp_ep);
2819 		if (!so_locked) {
2820 			atomic_add_int(&stcb->asoc.refcnt, 1);
2821 			SCTP_TCB_UNLOCK(stcb);
2822 			SCTP_SOCKET_LOCK(so, 1);
2823 			SCTP_TCB_LOCK(stcb);
2824 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2825 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2826 				SCTP_SOCKET_UNLOCK(so, 1);
2827 				return;
2828 			}
2829 		}
2830 #endif
2831 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2832 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2833 		if (!so_locked) {
2834 			SCTP_SOCKET_UNLOCK(so, 1);
2835 		}
2836 #endif
2837 	}
2838 }
2839 
2840 static void
2841 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2842     struct sockaddr *sa, uint32_t error)
2843 {
2844 	struct mbuf *m_notify;
2845 	struct sctp_paddr_change *spc;
2846 	struct sctp_queued_to_read *control;
2847 
2848 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2849 		/* event not enabled */
2850 		return;
2851 	}
2852 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2853 	if (m_notify == NULL)
2854 		return;
2855 	SCTP_BUF_LEN(m_notify) = 0;
2856 	spc = mtod(m_notify, struct sctp_paddr_change *);
2857 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2858 	spc->spc_flags = 0;
2859 	spc->spc_length = sizeof(struct sctp_paddr_change);
2860 	switch (sa->sa_family) {
2861 	case AF_INET:
2862 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2863 		break;
2864 #ifdef INET6
2865 	case AF_INET6:
2866 		{
2867 			struct sockaddr_in6 *sin6;
2868 
2869 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2870 
2871 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2872 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2873 				if (sin6->sin6_scope_id == 0) {
2874 					/* recover scope_id for user */
2875 					(void)sa6_recoverscope(sin6);
2876 				} else {
2877 					/* clear embedded scope_id for user */
2878 					in6_clearscope(&sin6->sin6_addr);
2879 				}
2880 			}
2881 			break;
2882 		}
2883 #endif
2884 	default:
2885 		/* TSNH */
2886 		break;
2887 	}
2888 	spc->spc_state = state;
2889 	spc->spc_error = error;
2890 	spc->spc_assoc_id = sctp_get_associd(stcb);
2891 
2892 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2893 	SCTP_BUF_NEXT(m_notify) = NULL;
2894 
2895 	/* append to socket */
2896 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2897 	    0, 0, 0, 0, 0, 0,
2898 	    m_notify);
2899 	if (control == NULL) {
2900 		/* no memory */
2901 		sctp_m_freem(m_notify);
2902 		return;
2903 	}
2904 	control->length = SCTP_BUF_LEN(m_notify);
2905 	control->spec_flags = M_NOTIFICATION;
2906 	/* not that we need this */
2907 	control->tail_mbuf = m_notify;
2908 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2909 	    control,
2910 	    &stcb->sctp_socket->so_rcv, 1,
2911 	    SCTP_READ_LOCK_NOT_HELD,
2912 	    SCTP_SO_NOT_LOCKED);
2913 }
2914 
2915 
2916 static void
2917 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2918     struct sctp_tmit_chunk *chk, int so_locked
2919 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2920     SCTP_UNUSED
2921 #endif
2922 )
2923 {
2924 	struct mbuf *m_notify;
2925 	struct sctp_send_failed *ssf;
2926 	struct sctp_queued_to_read *control;
2927 	int length;
2928 
2929 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2930 		/* event not enabled */
2931 		return;
2932 	}
2933 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2934 	if (m_notify == NULL)
2935 		/* no space left */
2936 		return;
2937 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2938 	length -= sizeof(struct sctp_data_chunk);
2939 	SCTP_BUF_LEN(m_notify) = 0;
2940 	ssf = mtod(m_notify, struct sctp_send_failed *);
2941 	ssf->ssf_type = SCTP_SEND_FAILED;
2942 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2943 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2944 	else
2945 		ssf->ssf_flags = SCTP_DATA_SENT;
2946 	ssf->ssf_length = length;
2947 	ssf->ssf_error = error;
2948 	/* not exactly what the user sent in, but should be close :) */
2949 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2950 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2951 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2952 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2953 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2954 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2955 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2956 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2957 
2958 	if (chk->data) {
2959 		/*
2960 		 * trim off the sctp chunk header(it should be there)
2961 		 */
2962 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2963 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2964 			sctp_mbuf_crush(chk->data);
2965 			chk->send_size -= sizeof(struct sctp_data_chunk);
2966 		}
2967 	}
2968 	SCTP_BUF_NEXT(m_notify) = chk->data;
2969 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
2970 	/* Steal off the mbuf */
2971 	chk->data = NULL;
2972 	/*
2973 	 * For this case, we check the actual socket buffer, since the assoc
2974 	 * is going away we don't want to overfill the socket buffer for a
2975 	 * non-reader
2976 	 */
2977 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
2978 		sctp_m_freem(m_notify);
2979 		return;
2980 	}
2981 	/* append to socket */
2982 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2983 	    0, 0, 0, 0, 0, 0,
2984 	    m_notify);
2985 	if (control == NULL) {
2986 		/* no memory */
2987 		sctp_m_freem(m_notify);
2988 		return;
2989 	}
2990 	control->spec_flags = M_NOTIFICATION;
2991 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2992 	    control,
2993 	    &stcb->sctp_socket->so_rcv, 1,
2994 	    SCTP_READ_LOCK_NOT_HELD,
2995 	    so_locked);
2996 }
2997 
2998 
2999 static void
3000 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3001     struct sctp_stream_queue_pending *sp, int so_locked
3002 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3003     SCTP_UNUSED
3004 #endif
3005 )
3006 {
3007 	struct mbuf *m_notify;
3008 	struct sctp_send_failed *ssf;
3009 	struct sctp_queued_to_read *control;
3010 	int length;
3011 
3012 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3013 		/* event not enabled */
3014 		return;
3015 	}
3016 	length = sizeof(struct sctp_send_failed) + sp->length;
3017 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3018 	if (m_notify == NULL)
3019 		/* no space left */
3020 		return;
3021 	SCTP_BUF_LEN(m_notify) = 0;
3022 	ssf = mtod(m_notify, struct sctp_send_failed *);
3023 	ssf->ssf_type = SCTP_SEND_FAILED;
3024 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3025 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3026 	else
3027 		ssf->ssf_flags = SCTP_DATA_SENT;
3028 	ssf->ssf_length = length;
3029 	ssf->ssf_error = error;
3030 	/* not exactly what the user sent in, but should be close :) */
3031 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3032 	ssf->ssf_info.sinfo_stream = sp->stream;
3033 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3034 	if (sp->some_taken) {
3035 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3036 	} else {
3037 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3038 	}
3039 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3040 	ssf->ssf_info.sinfo_context = sp->context;
3041 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3042 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3043 	SCTP_BUF_NEXT(m_notify) = sp->data;
3044 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3045 
3046 	/* Steal off the mbuf */
3047 	sp->data = NULL;
3048 	/*
3049 	 * For this case, we check the actual socket buffer, since the assoc
3050 	 * is going away we don't want to overfill the socket buffer for a
3051 	 * non-reader
3052 	 */
3053 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3054 		sctp_m_freem(m_notify);
3055 		return;
3056 	}
3057 	/* append to socket */
3058 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3059 	    0, 0, 0, 0, 0, 0,
3060 	    m_notify);
3061 	if (control == NULL) {
3062 		/* no memory */
3063 		sctp_m_freem(m_notify);
3064 		return;
3065 	}
3066 	control->spec_flags = M_NOTIFICATION;
3067 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3068 	    control,
3069 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3070 }
3071 
3072 
3073 
3074 static void
3075 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3076     uint32_t error)
3077 {
3078 	struct mbuf *m_notify;
3079 	struct sctp_adaptation_event *sai;
3080 	struct sctp_queued_to_read *control;
3081 
3082 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3083 		/* event not enabled */
3084 		return;
3085 	}
3086 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3087 	if (m_notify == NULL)
3088 		/* no space left */
3089 		return;
3090 	SCTP_BUF_LEN(m_notify) = 0;
3091 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3092 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3093 	sai->sai_flags = 0;
3094 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3095 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3096 	sai->sai_assoc_id = sctp_get_associd(stcb);
3097 
3098 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3099 	SCTP_BUF_NEXT(m_notify) = NULL;
3100 
3101 	/* append to socket */
3102 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3103 	    0, 0, 0, 0, 0, 0,
3104 	    m_notify);
3105 	if (control == NULL) {
3106 		/* no memory */
3107 		sctp_m_freem(m_notify);
3108 		return;
3109 	}
3110 	control->length = SCTP_BUF_LEN(m_notify);
3111 	control->spec_flags = M_NOTIFICATION;
3112 	/* not that we need this */
3113 	control->tail_mbuf = m_notify;
3114 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3115 	    control,
3116 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3117 }
3118 
3119 /* This always must be called with the read-queue LOCKED in the INP */
3120 static void
3121 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3122     uint32_t val, int so_locked
3123 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3124     SCTP_UNUSED
3125 #endif
3126 )
3127 {
3128 	struct mbuf *m_notify;
3129 	struct sctp_pdapi_event *pdapi;
3130 	struct sctp_queued_to_read *control;
3131 	struct sockbuf *sb;
3132 
3133 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3134 		/* event not enabled */
3135 		return;
3136 	}
3137 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
3138 		return;
3139 	}
3140 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3141 	if (m_notify == NULL)
3142 		/* no space left */
3143 		return;
3144 	SCTP_BUF_LEN(m_notify) = 0;
3145 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3146 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3147 	pdapi->pdapi_flags = 0;
3148 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3149 	pdapi->pdapi_indication = error;
3150 	pdapi->pdapi_stream = (val >> 16);
3151 	pdapi->pdapi_seq = (val & 0x0000ffff);
3152 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3153 
3154 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3155 	SCTP_BUF_NEXT(m_notify) = NULL;
3156 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3157 	    0, 0, 0, 0, 0, 0,
3158 	    m_notify);
3159 	if (control == NULL) {
3160 		/* no memory */
3161 		sctp_m_freem(m_notify);
3162 		return;
3163 	}
3164 	control->spec_flags = M_NOTIFICATION;
3165 	control->length = SCTP_BUF_LEN(m_notify);
3166 	/* not that we need this */
3167 	control->tail_mbuf = m_notify;
3168 	control->held_length = 0;
3169 	control->length = 0;
3170 	sb = &stcb->sctp_socket->so_rcv;
3171 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3172 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3173 	}
3174 	sctp_sballoc(stcb, sb, m_notify);
3175 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3176 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3177 	}
3178 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3179 	control->end_added = 1;
3180 	if (stcb->asoc.control_pdapi)
3181 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3182 	else {
3183 		/* we really should not see this case */
3184 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3185 	}
3186 	if (stcb->sctp_ep && stcb->sctp_socket) {
3187 		/* This should always be the case */
3188 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3189 		struct socket *so;
3190 
3191 		so = SCTP_INP_SO(stcb->sctp_ep);
3192 		if (!so_locked) {
3193 			atomic_add_int(&stcb->asoc.refcnt, 1);
3194 			SCTP_TCB_UNLOCK(stcb);
3195 			SCTP_SOCKET_LOCK(so, 1);
3196 			SCTP_TCB_LOCK(stcb);
3197 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3198 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3199 				SCTP_SOCKET_UNLOCK(so, 1);
3200 				return;
3201 			}
3202 		}
3203 #endif
3204 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3205 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3206 		if (!so_locked) {
3207 			SCTP_SOCKET_UNLOCK(so, 1);
3208 		}
3209 #endif
3210 	}
3211 }
3212 
3213 static void
3214 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3215 {
3216 	struct mbuf *m_notify;
3217 	struct sctp_shutdown_event *sse;
3218 	struct sctp_queued_to_read *control;
3219 
3220 	/*
3221 	 * For TCP model AND UDP connected sockets we will send an error up
3222 	 * when an SHUTDOWN completes
3223 	 */
3224 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3225 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3226 		/* mark socket closed for read/write and wakeup! */
3227 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3228 		struct socket *so;
3229 
3230 		so = SCTP_INP_SO(stcb->sctp_ep);
3231 		atomic_add_int(&stcb->asoc.refcnt, 1);
3232 		SCTP_TCB_UNLOCK(stcb);
3233 		SCTP_SOCKET_LOCK(so, 1);
3234 		SCTP_TCB_LOCK(stcb);
3235 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3236 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3237 			SCTP_SOCKET_UNLOCK(so, 1);
3238 			return;
3239 		}
3240 #endif
3241 		socantsendmore(stcb->sctp_socket);
3242 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3243 		SCTP_SOCKET_UNLOCK(so, 1);
3244 #endif
3245 	}
3246 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3247 		/* event not enabled */
3248 		return;
3249 	}
3250 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3251 	if (m_notify == NULL)
3252 		/* no space left */
3253 		return;
3254 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3255 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3256 	sse->sse_flags = 0;
3257 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3258 	sse->sse_assoc_id = sctp_get_associd(stcb);
3259 
3260 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3261 	SCTP_BUF_NEXT(m_notify) = NULL;
3262 
3263 	/* append to socket */
3264 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3265 	    0, 0, 0, 0, 0, 0,
3266 	    m_notify);
3267 	if (control == NULL) {
3268 		/* no memory */
3269 		sctp_m_freem(m_notify);
3270 		return;
3271 	}
3272 	control->spec_flags = M_NOTIFICATION;
3273 	control->length = SCTP_BUF_LEN(m_notify);
3274 	/* not that we need this */
3275 	control->tail_mbuf = m_notify;
3276 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3277 	    control,
3278 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3279 }
3280 
3281 static void
3282 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3283     int so_locked
3284 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3285     SCTP_UNUSED
3286 #endif
3287 )
3288 {
3289 	struct mbuf *m_notify;
3290 	struct sctp_sender_dry_event *event;
3291 	struct sctp_queued_to_read *control;
3292 
3293 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3294 		/* event not enabled */
3295 		return;
3296 	}
3297 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3298 	if (m_notify == NULL) {
3299 		/* no space left */
3300 		return;
3301 	}
3302 	SCTP_BUF_LEN(m_notify) = 0;
3303 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3304 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3305 	event->sender_dry_flags = 0;
3306 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3307 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3308 
3309 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3310 	SCTP_BUF_NEXT(m_notify) = NULL;
3311 
3312 	/* append to socket */
3313 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3314 	    0, 0, 0, 0, 0, 0, m_notify);
3315 	if (control == NULL) {
3316 		/* no memory */
3317 		sctp_m_freem(m_notify);
3318 		return;
3319 	}
3320 	control->length = SCTP_BUF_LEN(m_notify);
3321 	control->spec_flags = M_NOTIFICATION;
3322 	/* not that we need this */
3323 	control->tail_mbuf = m_notify;
3324 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3325 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3326 }
3327 
3328 
3329 static void
3330 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3331 {
3332 	struct mbuf *m_notify;
3333 	struct sctp_queued_to_read *control;
3334 	struct sctp_stream_reset_event *strreset;
3335 	int len;
3336 
3337 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3338 		/* event not enabled */
3339 		return;
3340 	}
3341 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3342 	if (m_notify == NULL)
3343 		/* no space left */
3344 		return;
3345 	SCTP_BUF_LEN(m_notify) = 0;
3346 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3347 	if (len > M_TRAILINGSPACE(m_notify)) {
3348 		/* never enough room */
3349 		sctp_m_freem(m_notify);
3350 		return;
3351 	}
3352 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3353 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3354 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3355 	strreset->strreset_length = len;
3356 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3357 	strreset->strreset_list[0] = number_entries;
3358 
3359 	SCTP_BUF_LEN(m_notify) = len;
3360 	SCTP_BUF_NEXT(m_notify) = NULL;
3361 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3362 		/* no space */
3363 		sctp_m_freem(m_notify);
3364 		return;
3365 	}
3366 	/* append to socket */
3367 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3368 	    0, 0, 0, 0, 0, 0,
3369 	    m_notify);
3370 	if (control == NULL) {
3371 		/* no memory */
3372 		sctp_m_freem(m_notify);
3373 		return;
3374 	}
3375 	control->spec_flags = M_NOTIFICATION;
3376 	control->length = SCTP_BUF_LEN(m_notify);
3377 	/* not that we need this */
3378 	control->tail_mbuf = m_notify;
3379 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3380 	    control,
3381 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3382 }
3383 
3384 
3385 static void
3386 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3387     int number_entries, uint16_t * list, int flag)
3388 {
3389 	struct mbuf *m_notify;
3390 	struct sctp_queued_to_read *control;
3391 	struct sctp_stream_reset_event *strreset;
3392 	int len;
3393 
3394 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3395 		/* event not enabled */
3396 		return;
3397 	}
3398 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3399 	if (m_notify == NULL)
3400 		/* no space left */
3401 		return;
3402 	SCTP_BUF_LEN(m_notify) = 0;
3403 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3404 	if (len > M_TRAILINGSPACE(m_notify)) {
3405 		/* never enough room */
3406 		sctp_m_freem(m_notify);
3407 		return;
3408 	}
3409 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3410 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3411 	if (number_entries == 0) {
3412 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3413 	} else {
3414 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3415 	}
3416 	strreset->strreset_length = len;
3417 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3418 	if (number_entries) {
3419 		int i;
3420 
3421 		for (i = 0; i < number_entries; i++) {
3422 			strreset->strreset_list[i] = ntohs(list[i]);
3423 		}
3424 	}
3425 	SCTP_BUF_LEN(m_notify) = len;
3426 	SCTP_BUF_NEXT(m_notify) = NULL;
3427 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3428 		/* no space */
3429 		sctp_m_freem(m_notify);
3430 		return;
3431 	}
3432 	/* append to socket */
3433 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3434 	    0, 0, 0, 0, 0, 0,
3435 	    m_notify);
3436 	if (control == NULL) {
3437 		/* no memory */
3438 		sctp_m_freem(m_notify);
3439 		return;
3440 	}
3441 	control->spec_flags = M_NOTIFICATION;
3442 	control->length = SCTP_BUF_LEN(m_notify);
3443 	/* not that we need this */
3444 	control->tail_mbuf = m_notify;
3445 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3446 	    control,
3447 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3448 }
3449 
3450 
3451 void
3452 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3453     uint32_t error, void *data, int so_locked
3454 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3455     SCTP_UNUSED
3456 #endif
3457 )
3458 {
3459 	if ((stcb == NULL) ||
3460 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3461 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3462 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3463 		/* If the socket is gone we are out of here */
3464 		return;
3465 	}
3466 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3467 		return;
3468 	}
3469 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3470 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3471 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3472 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3473 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3474 			/* Don't report these in front states */
3475 			return;
3476 		}
3477 	}
3478 	switch (notification) {
3479 	case SCTP_NOTIFY_ASSOC_UP:
3480 		if (stcb->asoc.assoc_up_sent == 0) {
3481 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3482 			stcb->asoc.assoc_up_sent = 1;
3483 		}
3484 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3485 			sctp_notify_adaptation_layer(stcb, error);
3486 		}
3487 		if (stcb->asoc.peer_supports_auth == 0) {
3488 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3489 			    NULL, so_locked);
3490 		}
3491 		break;
3492 	case SCTP_NOTIFY_ASSOC_DOWN:
3493 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3494 		break;
3495 	case SCTP_NOTIFY_INTERFACE_DOWN:
3496 		{
3497 			struct sctp_nets *net;
3498 
3499 			net = (struct sctp_nets *)data;
3500 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3501 			    (struct sockaddr *)&net->ro._l_addr, error);
3502 			break;
3503 		}
3504 	case SCTP_NOTIFY_INTERFACE_UP:
3505 		{
3506 			struct sctp_nets *net;
3507 
3508 			net = (struct sctp_nets *)data;
3509 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3510 			    (struct sockaddr *)&net->ro._l_addr, error);
3511 			break;
3512 		}
3513 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3514 		{
3515 			struct sctp_nets *net;
3516 
3517 			net = (struct sctp_nets *)data;
3518 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3519 			    (struct sockaddr *)&net->ro._l_addr, error);
3520 			break;
3521 		}
3522 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3523 		sctp_notify_send_failed2(stcb, error,
3524 		    (struct sctp_stream_queue_pending *)data, so_locked);
3525 		break;
3526 	case SCTP_NOTIFY_DG_FAIL:
3527 		sctp_notify_send_failed(stcb, error,
3528 		    (struct sctp_tmit_chunk *)data, so_locked);
3529 		break;
3530 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3531 		{
3532 			uint32_t val;
3533 
3534 			val = *((uint32_t *) data);
3535 
3536 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3537 			break;
3538 		}
3539 	case SCTP_NOTIFY_STRDATA_ERR:
3540 		break;
3541 	case SCTP_NOTIFY_ASSOC_ABORTED:
3542 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3543 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3544 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3545 		} else {
3546 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3547 		}
3548 		break;
3549 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3550 		break;
3551 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3552 		break;
3553 	case SCTP_NOTIFY_ASSOC_RESTART:
3554 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3555 		if (stcb->asoc.peer_supports_auth == 0) {
3556 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3557 			    NULL, so_locked);
3558 		}
3559 		break;
3560 	case SCTP_NOTIFY_HB_RESP:
3561 		break;
3562 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3563 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3564 		break;
3565 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3566 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3567 		break;
3568 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3569 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3570 		break;
3571 
3572 	case SCTP_NOTIFY_STR_RESET_SEND:
3573 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3574 		break;
3575 	case SCTP_NOTIFY_STR_RESET_RECV:
3576 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3577 		break;
3578 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3579 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3580 		break;
3581 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3582 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3583 		break;
3584 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3585 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3586 		    error);
3587 		break;
3588 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3589 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3590 		    error);
3591 		break;
3592 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3593 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3594 		    error);
3595 		break;
3596 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3597 		break;
3598 	case SCTP_NOTIFY_ASCONF_FAILED:
3599 		break;
3600 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3601 		sctp_notify_shutdown_event(stcb);
3602 		break;
3603 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3604 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3605 		    (uint16_t) (uintptr_t) data,
3606 		    so_locked);
3607 		break;
3608 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3609 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3610 		    (uint16_t) (uintptr_t) data,
3611 		    so_locked);
3612 		break;
3613 	case SCTP_NOTIFY_NO_PEER_AUTH:
3614 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3615 		    (uint16_t) (uintptr_t) data,
3616 		    so_locked);
3617 		break;
3618 	case SCTP_NOTIFY_SENDER_DRY:
3619 		sctp_notify_sender_dry_event(stcb, so_locked);
3620 		break;
3621 	default:
3622 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3623 		    __FUNCTION__, notification, notification);
3624 		break;
3625 	}			/* end switch */
3626 }
3627 
3628 void
3629 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3630 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3631     SCTP_UNUSED
3632 #endif
3633 )
3634 {
3635 	struct sctp_association *asoc;
3636 	struct sctp_stream_out *outs;
3637 	struct sctp_tmit_chunk *chk, *nchk;
3638 	struct sctp_stream_queue_pending *sp, *nsp;
3639 	int i;
3640 
3641 	if (stcb == NULL) {
3642 		return;
3643 	}
3644 	asoc = &stcb->asoc;
3645 	if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) {
3646 		/* already being freed */
3647 		return;
3648 	}
3649 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3650 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3651 	    (asoc->state & SCTP_STATE_CLOSED_SOCKET)) {
3652 		return;
3653 	}
3654 	/* now through all the gunk freeing chunks */
3655 	if (holds_lock == 0) {
3656 		SCTP_TCB_SEND_LOCK(stcb);
3657 	}
3658 	/* sent queue SHOULD be empty */
3659 	TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
3660 		TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3661 		asoc->sent_queue_cnt--;
3662 		if (chk->data != NULL) {
3663 			sctp_free_bufspace(stcb, asoc, chk, 1);
3664 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3665 			    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3666 			if (chk->data) {
3667 				sctp_m_freem(chk->data);
3668 				chk->data = NULL;
3669 			}
3670 		}
3671 		sctp_free_a_chunk(stcb, chk);
3672 		/* sa_ignore FREED_MEMORY */
3673 	}
3674 	/* pending send queue SHOULD be empty */
3675 	TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
3676 		TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3677 		asoc->send_queue_cnt--;
3678 		if (chk->data != NULL) {
3679 			sctp_free_bufspace(stcb, asoc, chk, 1);
3680 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3681 			    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3682 			if (chk->data) {
3683 				sctp_m_freem(chk->data);
3684 				chk->data = NULL;
3685 			}
3686 		}
3687 		sctp_free_a_chunk(stcb, chk);
3688 		/* sa_ignore FREED_MEMORY */
3689 	}
3690 	for (i = 0; i < asoc->streamoutcnt; i++) {
3691 		/* For each stream */
3692 		outs = &asoc->strmout[i];
3693 		/* clean up any sends there */
3694 		asoc->locked_on_sending = NULL;
3695 		TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) {
3696 			asoc->stream_queue_cnt--;
3697 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3698 			sctp_free_spbufspace(stcb, asoc, sp);
3699 			if (sp->data) {
3700 				sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3701 				    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3702 				if (sp->data) {
3703 					sctp_m_freem(sp->data);
3704 					sp->data = NULL;
3705 				}
3706 			}
3707 			if (sp->net) {
3708 				sctp_free_remote_addr(sp->net);
3709 				sp->net = NULL;
3710 			}
3711 			/* Free the chunk */
3712 			sctp_free_a_strmoq(stcb, sp);
3713 			/* sa_ignore FREED_MEMORY */
3714 		}
3715 	}
3716 
3717 	if (holds_lock == 0) {
3718 		SCTP_TCB_SEND_UNLOCK(stcb);
3719 	}
3720 }
3721 
3722 void
3723 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3724 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3725     SCTP_UNUSED
3726 #endif
3727 )
3728 {
3729 
3730 	if (stcb == NULL) {
3731 		return;
3732 	}
3733 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3734 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3735 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3736 		return;
3737 	}
3738 	/* Tell them we lost the asoc */
3739 	sctp_report_all_outbound(stcb, 1, so_locked);
3740 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3741 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3742 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3743 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3744 	}
3745 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3746 }
3747 
3748 void
3749 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3750     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3751     uint32_t vrf_id, uint16_t port)
3752 {
3753 	uint32_t vtag;
3754 
3755 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3756 	struct socket *so;
3757 
3758 #endif
3759 
3760 	vtag = 0;
3761 	if (stcb != NULL) {
3762 		/* We have a TCB to abort, send notification too */
3763 		vtag = stcb->asoc.peer_vtag;
3764 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3765 		/* get the assoc vrf id and table id */
3766 		vrf_id = stcb->asoc.vrf_id;
3767 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3768 	}
3769 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3770 	if (stcb != NULL) {
3771 		/* Ok, now lets free it */
3772 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3773 		so = SCTP_INP_SO(inp);
3774 		atomic_add_int(&stcb->asoc.refcnt, 1);
3775 		SCTP_TCB_UNLOCK(stcb);
3776 		SCTP_SOCKET_LOCK(so, 1);
3777 		SCTP_TCB_LOCK(stcb);
3778 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3779 #endif
3780 		SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3781 		if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3782 		    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3783 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3784 		}
3785 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3786 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3787 		SCTP_SOCKET_UNLOCK(so, 1);
3788 #endif
3789 	}
3790 }
3791 
3792 #ifdef SCTP_ASOCLOG_OF_TSNS
3793 void
3794 sctp_print_out_track_log(struct sctp_tcb *stcb)
3795 {
3796 #ifdef NOSIY_PRINTS
3797 	int i;
3798 
3799 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3800 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3801 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3802 		SCTP_PRINTF("None rcvd\n");
3803 		goto none_in;
3804 	}
3805 	if (stcb->asoc.tsn_in_wrapped) {
3806 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3807 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3808 			    stcb->asoc.in_tsnlog[i].tsn,
3809 			    stcb->asoc.in_tsnlog[i].strm,
3810 			    stcb->asoc.in_tsnlog[i].seq,
3811 			    stcb->asoc.in_tsnlog[i].flgs,
3812 			    stcb->asoc.in_tsnlog[i].sz);
3813 		}
3814 	}
3815 	if (stcb->asoc.tsn_in_at) {
3816 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3817 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3818 			    stcb->asoc.in_tsnlog[i].tsn,
3819 			    stcb->asoc.in_tsnlog[i].strm,
3820 			    stcb->asoc.in_tsnlog[i].seq,
3821 			    stcb->asoc.in_tsnlog[i].flgs,
3822 			    stcb->asoc.in_tsnlog[i].sz);
3823 		}
3824 	}
3825 none_in:
3826 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3827 	if ((stcb->asoc.tsn_out_at == 0) &&
3828 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3829 		SCTP_PRINTF("None sent\n");
3830 	}
3831 	if (stcb->asoc.tsn_out_wrapped) {
3832 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3833 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3834 			    stcb->asoc.out_tsnlog[i].tsn,
3835 			    stcb->asoc.out_tsnlog[i].strm,
3836 			    stcb->asoc.out_tsnlog[i].seq,
3837 			    stcb->asoc.out_tsnlog[i].flgs,
3838 			    stcb->asoc.out_tsnlog[i].sz);
3839 		}
3840 	}
3841 	if (stcb->asoc.tsn_out_at) {
3842 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3843 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3844 			    stcb->asoc.out_tsnlog[i].tsn,
3845 			    stcb->asoc.out_tsnlog[i].strm,
3846 			    stcb->asoc.out_tsnlog[i].seq,
3847 			    stcb->asoc.out_tsnlog[i].flgs,
3848 			    stcb->asoc.out_tsnlog[i].sz);
3849 		}
3850 	}
3851 #endif
3852 }
3853 
3854 #endif
3855 
3856 void
3857 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3858     int error, struct mbuf *op_err,
3859     int so_locked
3860 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3861     SCTP_UNUSED
3862 #endif
3863 )
3864 {
3865 	uint32_t vtag;
3866 
3867 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3868 	struct socket *so;
3869 
3870 #endif
3871 
3872 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3873 	so = SCTP_INP_SO(inp);
3874 #endif
3875 	if (stcb == NULL) {
3876 		/* Got to have a TCB */
3877 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3878 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3879 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3880 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3881 			}
3882 		}
3883 		return;
3884 	} else {
3885 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3886 	}
3887 	vtag = stcb->asoc.peer_vtag;
3888 	/* notify the ulp */
3889 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3890 		sctp_abort_notification(stcb, error, so_locked);
3891 	/* notify the peer */
3892 #if defined(SCTP_PANIC_ON_ABORT)
3893 	panic("aborting an association");
3894 #endif
3895 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3896 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3897 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3898 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3899 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3900 	}
3901 	/* now free the asoc */
3902 #ifdef SCTP_ASOCLOG_OF_TSNS
3903 	sctp_print_out_track_log(stcb);
3904 #endif
3905 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3906 	if (!so_locked) {
3907 		atomic_add_int(&stcb->asoc.refcnt, 1);
3908 		SCTP_TCB_UNLOCK(stcb);
3909 		SCTP_SOCKET_LOCK(so, 1);
3910 		SCTP_TCB_LOCK(stcb);
3911 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3912 	}
3913 #endif
3914 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3915 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3916 	if (!so_locked) {
3917 		SCTP_SOCKET_UNLOCK(so, 1);
3918 	}
3919 #endif
3920 }
3921 
3922 void
3923 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3924     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3925 {
3926 	struct sctp_chunkhdr *ch, chunk_buf;
3927 	unsigned int chk_length;
3928 
3929 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3930 	/* Generate a TO address for future reference */
3931 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3932 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3933 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3934 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3935 		}
3936 	}
3937 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3938 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3939 	while (ch != NULL) {
3940 		chk_length = ntohs(ch->chunk_length);
3941 		if (chk_length < sizeof(*ch)) {
3942 			/* break to abort land */
3943 			break;
3944 		}
3945 		switch (ch->chunk_type) {
3946 		case SCTP_COOKIE_ECHO:
3947 			/* We hit here only if the assoc is being freed */
3948 			return;
3949 		case SCTP_PACKET_DROPPED:
3950 			/* we don't respond to pkt-dropped */
3951 			return;
3952 		case SCTP_ABORT_ASSOCIATION:
3953 			/* we don't respond with an ABORT to an ABORT */
3954 			return;
3955 		case SCTP_SHUTDOWN_COMPLETE:
3956 			/*
3957 			 * we ignore it since we are not waiting for it and
3958 			 * peer is gone
3959 			 */
3960 			return;
3961 		case SCTP_SHUTDOWN_ACK:
3962 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
3963 			return;
3964 		default:
3965 			break;
3966 		}
3967 		offset += SCTP_SIZE32(chk_length);
3968 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3969 		    sizeof(*ch), (uint8_t *) & chunk_buf);
3970 	}
3971 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
3972 }
3973 
3974 /*
3975  * check the inbound datagram to make sure there is not an abort inside it,
3976  * if there is return 1, else return 0.
3977  */
3978 int
3979 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
3980 {
3981 	struct sctp_chunkhdr *ch;
3982 	struct sctp_init_chunk *init_chk, chunk_buf;
3983 	int offset;
3984 	unsigned int chk_length;
3985 
3986 	offset = iphlen + sizeof(struct sctphdr);
3987 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
3988 	    (uint8_t *) & chunk_buf);
3989 	while (ch != NULL) {
3990 		chk_length = ntohs(ch->chunk_length);
3991 		if (chk_length < sizeof(*ch)) {
3992 			/* packet is probably corrupt */
3993 			break;
3994 		}
3995 		/* we seem to be ok, is it an abort? */
3996 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
3997 			/* yep, tell them */
3998 			return (1);
3999 		}
4000 		if (ch->chunk_type == SCTP_INITIATION) {
4001 			/* need to update the Vtag */
4002 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4003 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4004 			if (init_chk != NULL) {
4005 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4006 			}
4007 		}
4008 		/* Nope, move to the next chunk */
4009 		offset += SCTP_SIZE32(chk_length);
4010 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4011 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4012 	}
4013 	return (0);
4014 }
4015 
4016 /*
4017  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4018  * set (i.e. it's 0) so, create this function to compare link local scopes
4019  */
4020 #ifdef INET6
4021 uint32_t
4022 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4023 {
4024 	struct sockaddr_in6 a, b;
4025 
4026 	/* save copies */
4027 	a = *addr1;
4028 	b = *addr2;
4029 
4030 	if (a.sin6_scope_id == 0)
4031 		if (sa6_recoverscope(&a)) {
4032 			/* can't get scope, so can't match */
4033 			return (0);
4034 		}
4035 	if (b.sin6_scope_id == 0)
4036 		if (sa6_recoverscope(&b)) {
4037 			/* can't get scope, so can't match */
4038 			return (0);
4039 		}
4040 	if (a.sin6_scope_id != b.sin6_scope_id)
4041 		return (0);
4042 
4043 	return (1);
4044 }
4045 
4046 /*
4047  * returns a sockaddr_in6 with embedded scope recovered and removed
4048  */
4049 struct sockaddr_in6 *
4050 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4051 {
4052 	/* check and strip embedded scope junk */
4053 	if (addr->sin6_family == AF_INET6) {
4054 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4055 			if (addr->sin6_scope_id == 0) {
4056 				*store = *addr;
4057 				if (!sa6_recoverscope(store)) {
4058 					/* use the recovered scope */
4059 					addr = store;
4060 				}
4061 			} else {
4062 				/* else, return the original "to" addr */
4063 				in6_clearscope(&addr->sin6_addr);
4064 			}
4065 		}
4066 	}
4067 	return (addr);
4068 }
4069 
4070 #endif
4071 
4072 /*
4073  * are the two addresses the same?  currently a "scopeless" check returns: 1
4074  * if same, 0 if not
4075  */
4076 int
4077 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4078 {
4079 
4080 	/* must be valid */
4081 	if (sa1 == NULL || sa2 == NULL)
4082 		return (0);
4083 
4084 	/* must be the same family */
4085 	if (sa1->sa_family != sa2->sa_family)
4086 		return (0);
4087 
4088 	switch (sa1->sa_family) {
4089 #ifdef INET6
4090 	case AF_INET6:
4091 		{
4092 			/* IPv6 addresses */
4093 			struct sockaddr_in6 *sin6_1, *sin6_2;
4094 
4095 			sin6_1 = (struct sockaddr_in6 *)sa1;
4096 			sin6_2 = (struct sockaddr_in6 *)sa2;
4097 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4098 			    sin6_2));
4099 		}
4100 #endif
4101 	case AF_INET:
4102 		{
4103 			/* IPv4 addresses */
4104 			struct sockaddr_in *sin_1, *sin_2;
4105 
4106 			sin_1 = (struct sockaddr_in *)sa1;
4107 			sin_2 = (struct sockaddr_in *)sa2;
4108 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4109 		}
4110 	default:
4111 		/* we don't do these... */
4112 		return (0);
4113 	}
4114 }
4115 
4116 void
4117 sctp_print_address(struct sockaddr *sa)
4118 {
4119 #ifdef INET6
4120 	char ip6buf[INET6_ADDRSTRLEN];
4121 
4122 	ip6buf[0] = 0;
4123 #endif
4124 
4125 	switch (sa->sa_family) {
4126 #ifdef INET6
4127 	case AF_INET6:
4128 		{
4129 			struct sockaddr_in6 *sin6;
4130 
4131 			sin6 = (struct sockaddr_in6 *)sa;
4132 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4133 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4134 			    ntohs(sin6->sin6_port),
4135 			    sin6->sin6_scope_id);
4136 			break;
4137 		}
4138 #endif
4139 	case AF_INET:
4140 		{
4141 			struct sockaddr_in *sin;
4142 			unsigned char *p;
4143 
4144 			sin = (struct sockaddr_in *)sa;
4145 			p = (unsigned char *)&sin->sin_addr;
4146 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4147 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4148 			break;
4149 		}
4150 	default:
4151 		SCTP_PRINTF("?\n");
4152 		break;
4153 	}
4154 }
4155 
4156 void
4157 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4158 {
4159 	switch (iph->ip_v) {
4160 	case IPVERSION:
4161 		{
4162 			struct sockaddr_in lsa, fsa;
4163 
4164 			bzero(&lsa, sizeof(lsa));
4165 			lsa.sin_len = sizeof(lsa);
4166 			lsa.sin_family = AF_INET;
4167 			lsa.sin_addr = iph->ip_src;
4168 			lsa.sin_port = sh->src_port;
4169 			bzero(&fsa, sizeof(fsa));
4170 			fsa.sin_len = sizeof(fsa);
4171 			fsa.sin_family = AF_INET;
4172 			fsa.sin_addr = iph->ip_dst;
4173 			fsa.sin_port = sh->dest_port;
4174 			SCTP_PRINTF("src: ");
4175 			sctp_print_address((struct sockaddr *)&lsa);
4176 			SCTP_PRINTF("dest: ");
4177 			sctp_print_address((struct sockaddr *)&fsa);
4178 			break;
4179 		}
4180 #ifdef INET6
4181 	case IPV6_VERSION >> 4:
4182 		{
4183 			struct ip6_hdr *ip6;
4184 			struct sockaddr_in6 lsa6, fsa6;
4185 
4186 			ip6 = (struct ip6_hdr *)iph;
4187 			bzero(&lsa6, sizeof(lsa6));
4188 			lsa6.sin6_len = sizeof(lsa6);
4189 			lsa6.sin6_family = AF_INET6;
4190 			lsa6.sin6_addr = ip6->ip6_src;
4191 			lsa6.sin6_port = sh->src_port;
4192 			bzero(&fsa6, sizeof(fsa6));
4193 			fsa6.sin6_len = sizeof(fsa6);
4194 			fsa6.sin6_family = AF_INET6;
4195 			fsa6.sin6_addr = ip6->ip6_dst;
4196 			fsa6.sin6_port = sh->dest_port;
4197 			SCTP_PRINTF("src: ");
4198 			sctp_print_address((struct sockaddr *)&lsa6);
4199 			SCTP_PRINTF("dest: ");
4200 			sctp_print_address((struct sockaddr *)&fsa6);
4201 			break;
4202 		}
4203 #endif
4204 	default:
4205 		/* TSNH */
4206 		break;
4207 	}
4208 }
4209 
4210 void
4211 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4212     struct sctp_inpcb *new_inp,
4213     struct sctp_tcb *stcb,
4214     int waitflags)
4215 {
4216 	/*
4217 	 * go through our old INP and pull off any control structures that
4218 	 * belong to stcb and move then to the new inp.
4219 	 */
4220 	struct socket *old_so, *new_so;
4221 	struct sctp_queued_to_read *control, *nctl;
4222 	struct sctp_readhead tmp_queue;
4223 	struct mbuf *m;
4224 	int error = 0;
4225 
4226 	old_so = old_inp->sctp_socket;
4227 	new_so = new_inp->sctp_socket;
4228 	TAILQ_INIT(&tmp_queue);
4229 	error = sblock(&old_so->so_rcv, waitflags);
4230 	if (error) {
4231 		/*
4232 		 * Gak, can't get sblock, we have a problem. data will be
4233 		 * left stranded.. and we don't dare look at it since the
4234 		 * other thread may be reading something. Oh well, its a
4235 		 * screwed up app that does a peeloff OR a accept while
4236 		 * reading from the main socket... actually its only the
4237 		 * peeloff() case, since I think read will fail on a
4238 		 * listening socket..
4239 		 */
4240 		return;
4241 	}
4242 	/* lock the socket buffers */
4243 	SCTP_INP_READ_LOCK(old_inp);
4244 	TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) {
4245 		/* Pull off all for out target stcb */
4246 		if (control->stcb == stcb) {
4247 			/* remove it we want it */
4248 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4249 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4250 			m = control->data;
4251 			while (m) {
4252 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4253 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4254 				}
4255 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4256 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4257 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4258 				}
4259 				m = SCTP_BUF_NEXT(m);
4260 			}
4261 		}
4262 	}
4263 	SCTP_INP_READ_UNLOCK(old_inp);
4264 	/* Remove the sb-lock on the old socket */
4265 
4266 	sbunlock(&old_so->so_rcv);
4267 	/* Now we move them over to the new socket buffer */
4268 	SCTP_INP_READ_LOCK(new_inp);
4269 	TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
4270 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4271 		m = control->data;
4272 		while (m) {
4273 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4274 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4275 			}
4276 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4277 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4278 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4279 			}
4280 			m = SCTP_BUF_NEXT(m);
4281 		}
4282 	}
4283 	SCTP_INP_READ_UNLOCK(new_inp);
4284 }
4285 
4286 void
4287 sctp_add_to_readq(struct sctp_inpcb *inp,
4288     struct sctp_tcb *stcb,
4289     struct sctp_queued_to_read *control,
4290     struct sockbuf *sb,
4291     int end,
4292     int inp_read_lock_held,
4293     int so_locked
4294 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4295     SCTP_UNUSED
4296 #endif
4297 )
4298 {
4299 	/*
4300 	 * Here we must place the control on the end of the socket read
4301 	 * queue AND increment sb_cc so that select will work properly on
4302 	 * read.
4303 	 */
4304 	struct mbuf *m, *prev = NULL;
4305 
4306 	if (inp == NULL) {
4307 		/* Gak, TSNH!! */
4308 #ifdef INVARIANTS
4309 		panic("Gak, inp NULL on add_to_readq");
4310 #endif
4311 		return;
4312 	}
4313 	if (inp_read_lock_held == 0)
4314 		SCTP_INP_READ_LOCK(inp);
4315 	if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) {
4316 		sctp_free_remote_addr(control->whoFrom);
4317 		if (control->data) {
4318 			sctp_m_freem(control->data);
4319 			control->data = NULL;
4320 		}
4321 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4322 		if (inp_read_lock_held == 0)
4323 			SCTP_INP_READ_UNLOCK(inp);
4324 		return;
4325 	}
4326 	if (!(control->spec_flags & M_NOTIFICATION)) {
4327 		atomic_add_int(&inp->total_recvs, 1);
4328 		if (!control->do_not_ref_stcb) {
4329 			atomic_add_int(&stcb->total_recvs, 1);
4330 		}
4331 	}
4332 	m = control->data;
4333 	control->held_length = 0;
4334 	control->length = 0;
4335 	while (m) {
4336 		if (SCTP_BUF_LEN(m) == 0) {
4337 			/* Skip mbufs with NO length */
4338 			if (prev == NULL) {
4339 				/* First one */
4340 				control->data = sctp_m_free(m);
4341 				m = control->data;
4342 			} else {
4343 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4344 				m = SCTP_BUF_NEXT(prev);
4345 			}
4346 			if (m == NULL) {
4347 				control->tail_mbuf = prev;
4348 			}
4349 			continue;
4350 		}
4351 		prev = m;
4352 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4353 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4354 		}
4355 		sctp_sballoc(stcb, sb, m);
4356 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4357 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4358 		}
4359 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4360 		m = SCTP_BUF_NEXT(m);
4361 	}
4362 	if (prev != NULL) {
4363 		control->tail_mbuf = prev;
4364 	} else {
4365 		/* Everything got collapsed out?? */
4366 		sctp_free_remote_addr(control->whoFrom);
4367 		SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control);
4368 		if (inp_read_lock_held == 0)
4369 			SCTP_INP_READ_UNLOCK(inp);
4370 		return;
4371 	}
4372 	if (end) {
4373 		control->end_added = 1;
4374 	}
4375 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4376 	if (inp_read_lock_held == 0)
4377 		SCTP_INP_READ_UNLOCK(inp);
4378 	if (inp && inp->sctp_socket) {
4379 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4380 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4381 		} else {
4382 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4383 			struct socket *so;
4384 
4385 			so = SCTP_INP_SO(inp);
4386 			if (!so_locked) {
4387 				atomic_add_int(&stcb->asoc.refcnt, 1);
4388 				SCTP_TCB_UNLOCK(stcb);
4389 				SCTP_SOCKET_LOCK(so, 1);
4390 				SCTP_TCB_LOCK(stcb);
4391 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4392 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4393 					SCTP_SOCKET_UNLOCK(so, 1);
4394 					return;
4395 				}
4396 			}
4397 #endif
4398 			sctp_sorwakeup(inp, inp->sctp_socket);
4399 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4400 			if (!so_locked) {
4401 				SCTP_SOCKET_UNLOCK(so, 1);
4402 			}
4403 #endif
4404 		}
4405 	}
4406 }
4407 
4408 
4409 int
4410 sctp_append_to_readq(struct sctp_inpcb *inp,
4411     struct sctp_tcb *stcb,
4412     struct sctp_queued_to_read *control,
4413     struct mbuf *m,
4414     int end,
4415     int ctls_cumack,
4416     struct sockbuf *sb)
4417 {
4418 	/*
4419 	 * A partial delivery API event is underway. OR we are appending on
4420 	 * the reassembly queue.
4421 	 *
4422 	 * If PDAPI this means we need to add m to the end of the data.
4423 	 * Increase the length in the control AND increment the sb_cc.
4424 	 * Otherwise sb is NULL and all we need to do is put it at the end
4425 	 * of the mbuf chain.
4426 	 */
4427 	int len = 0;
4428 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4429 
4430 	if (inp) {
4431 		SCTP_INP_READ_LOCK(inp);
4432 	}
4433 	if (control == NULL) {
4434 get_out:
4435 		if (inp) {
4436 			SCTP_INP_READ_UNLOCK(inp);
4437 		}
4438 		return (-1);
4439 	}
4440 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) {
4441 		SCTP_INP_READ_UNLOCK(inp);
4442 		return 0;
4443 	}
4444 	if (control->end_added) {
4445 		/* huh this one is complete? */
4446 		goto get_out;
4447 	}
4448 	mm = m;
4449 	if (mm == NULL) {
4450 		goto get_out;
4451 	}
4452 	while (mm) {
4453 		if (SCTP_BUF_LEN(mm) == 0) {
4454 			/* Skip mbufs with NO lenght */
4455 			if (prev == NULL) {
4456 				/* First one */
4457 				m = sctp_m_free(mm);
4458 				mm = m;
4459 			} else {
4460 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4461 				mm = SCTP_BUF_NEXT(prev);
4462 			}
4463 			continue;
4464 		}
4465 		prev = mm;
4466 		len += SCTP_BUF_LEN(mm);
4467 		if (sb) {
4468 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4469 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4470 			}
4471 			sctp_sballoc(stcb, sb, mm);
4472 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4473 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4474 			}
4475 		}
4476 		mm = SCTP_BUF_NEXT(mm);
4477 	}
4478 	if (prev) {
4479 		tail = prev;
4480 	} else {
4481 		/* Really there should always be a prev */
4482 		if (m == NULL) {
4483 			/* Huh nothing left? */
4484 #ifdef INVARIANTS
4485 			panic("Nothing left to add?");
4486 #else
4487 			goto get_out;
4488 #endif
4489 		}
4490 		tail = m;
4491 	}
4492 	if (control->tail_mbuf) {
4493 		/* append */
4494 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4495 		control->tail_mbuf = tail;
4496 	} else {
4497 		/* nothing there */
4498 #ifdef INVARIANTS
4499 		if (control->data != NULL) {
4500 			panic("This should NOT happen");
4501 		}
4502 #endif
4503 		control->data = m;
4504 		control->tail_mbuf = tail;
4505 	}
4506 	atomic_add_int(&control->length, len);
4507 	if (end) {
4508 		/* message is complete */
4509 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4510 			stcb->asoc.control_pdapi = NULL;
4511 		}
4512 		control->held_length = 0;
4513 		control->end_added = 1;
4514 	}
4515 	if (stcb == NULL) {
4516 		control->do_not_ref_stcb = 1;
4517 	}
4518 	/*
4519 	 * When we are appending in partial delivery, the cum-ack is used
4520 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4521 	 * is populated in the outbound sinfo structure from the true cumack
4522 	 * if the association exists...
4523 	 */
4524 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4525 	if (inp) {
4526 		SCTP_INP_READ_UNLOCK(inp);
4527 	}
4528 	if (inp && inp->sctp_socket) {
4529 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4530 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4531 		} else {
4532 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4533 			struct socket *so;
4534 
4535 			so = SCTP_INP_SO(inp);
4536 			atomic_add_int(&stcb->asoc.refcnt, 1);
4537 			SCTP_TCB_UNLOCK(stcb);
4538 			SCTP_SOCKET_LOCK(so, 1);
4539 			SCTP_TCB_LOCK(stcb);
4540 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4541 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4542 				SCTP_SOCKET_UNLOCK(so, 1);
4543 				return (0);
4544 			}
4545 #endif
4546 			sctp_sorwakeup(inp, inp->sctp_socket);
4547 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4548 			SCTP_SOCKET_UNLOCK(so, 1);
4549 #endif
4550 		}
4551 	}
4552 	return (0);
4553 }
4554 
4555 
4556 
4557 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4558  *************ALTERNATE ROUTING CODE
4559  */
4560 
4561 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4562  *************ALTERNATE ROUTING CODE
4563  */
4564 
4565 struct mbuf *
4566 sctp_generate_invmanparam(int err)
4567 {
4568 	/* Return a MBUF with a invalid mandatory parameter */
4569 	struct mbuf *m;
4570 
4571 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4572 	if (m) {
4573 		struct sctp_paramhdr *ph;
4574 
4575 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4576 		ph = mtod(m, struct sctp_paramhdr *);
4577 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4578 		ph->param_type = htons(err);
4579 	}
4580 	return (m);
4581 }
4582 
4583 #ifdef SCTP_MBCNT_LOGGING
4584 void
4585 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4586     struct sctp_tmit_chunk *tp1, int chk_cnt)
4587 {
4588 	if (tp1->data == NULL) {
4589 		return;
4590 	}
4591 	asoc->chunks_on_out_queue -= chk_cnt;
4592 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4593 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4594 		    asoc->total_output_queue_size,
4595 		    tp1->book_size,
4596 		    0,
4597 		    tp1->mbcnt);
4598 	}
4599 	if (asoc->total_output_queue_size >= tp1->book_size) {
4600 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4601 	} else {
4602 		asoc->total_output_queue_size = 0;
4603 	}
4604 
4605 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4606 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4607 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4608 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4609 		} else {
4610 			stcb->sctp_socket->so_snd.sb_cc = 0;
4611 
4612 		}
4613 	}
4614 }
4615 
4616 #endif
4617 
4618 int
4619 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4620     int reason, int so_locked
4621 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4622     SCTP_UNUSED
4623 #endif
4624 )
4625 {
4626 	struct sctp_stream_out *strq;
4627 	struct sctp_tmit_chunk *chk = NULL, *tp2;
4628 	struct sctp_stream_queue_pending *sp;
4629 	uint16_t stream = 0, seq = 0;
4630 	uint8_t foundeom = 0;
4631 	int ret_sz = 0;
4632 	int notdone;
4633 	int do_wakeup_routine = 0;
4634 
4635 	stream = tp1->rec.data.stream_number;
4636 	seq = tp1->rec.data.stream_seq;
4637 	do {
4638 		ret_sz += tp1->book_size;
4639 		if (tp1->data != NULL) {
4640 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4641 				sctp_flight_size_decrease(tp1);
4642 				sctp_total_flight_decrease(stcb, tp1);
4643 			}
4644 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4645 			stcb->asoc.peers_rwnd += tp1->send_size;
4646 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4647 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4648 			if (tp1->data) {
4649 				sctp_m_freem(tp1->data);
4650 				tp1->data = NULL;
4651 			}
4652 			do_wakeup_routine = 1;
4653 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4654 				stcb->asoc.sent_queue_cnt_removeable--;
4655 			}
4656 		}
4657 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4658 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4659 		    SCTP_DATA_NOT_FRAG) {
4660 			/* not frag'ed we ae done   */
4661 			notdone = 0;
4662 			foundeom = 1;
4663 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4664 			/* end of frag, we are done */
4665 			notdone = 0;
4666 			foundeom = 1;
4667 		} else {
4668 			/*
4669 			 * Its a begin or middle piece, we must mark all of
4670 			 * it
4671 			 */
4672 			notdone = 1;
4673 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4674 		}
4675 	} while (tp1 && notdone);
4676 	if (foundeom == 0) {
4677 		/*
4678 		 * The multi-part message was scattered across the send and
4679 		 * sent queue.
4680 		 */
4681 		TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) {
4682 			if ((tp1->rec.data.stream_number != stream) ||
4683 			    (tp1->rec.data.stream_seq != seq)) {
4684 				break;
4685 			}
4686 			/*
4687 			 * save to chk in case we have some on stream out
4688 			 * queue. If so and we have an un-transmitted one we
4689 			 * don't have to fudge the TSN.
4690 			 */
4691 			chk = tp1;
4692 			ret_sz += tp1->book_size;
4693 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4694 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4695 			if (tp1->data) {
4696 				sctp_m_freem(tp1->data);
4697 				tp1->data = NULL;
4698 			}
4699 			/* No flight involved here book the size to 0 */
4700 			tp1->book_size = 0;
4701 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4702 				foundeom = 1;
4703 			}
4704 			do_wakeup_routine = 1;
4705 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4706 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4707 			/*
4708 			 * on to the sent queue so we can wait for it to be
4709 			 * passed by.
4710 			 */
4711 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4712 			    sctp_next);
4713 			stcb->asoc.send_queue_cnt--;
4714 			stcb->asoc.sent_queue_cnt++;
4715 		}
4716 	}
4717 	if (foundeom == 0) {
4718 		/*
4719 		 * Still no eom found. That means there is stuff left on the
4720 		 * stream out queue.. yuck.
4721 		 */
4722 		strq = &stcb->asoc.strmout[stream];
4723 		SCTP_TCB_SEND_LOCK(stcb);
4724 		TAILQ_FOREACH(sp, &strq->outqueue, next) {
4725 			/* FIXME: Shouldn't this be a serial number check? */
4726 			if (sp->strseq > seq) {
4727 				break;
4728 			}
4729 			/* Check if its our SEQ */
4730 			if (sp->strseq == seq) {
4731 				sp->discard_rest = 1;
4732 				/*
4733 				 * We may need to put a chunk on the queue
4734 				 * that holds the TSN that would have been
4735 				 * sent with the LAST bit.
4736 				 */
4737 				if (chk == NULL) {
4738 					/* Yep, we have to */
4739 					sctp_alloc_a_chunk(stcb, chk);
4740 					if (chk == NULL) {
4741 						/*
4742 						 * we are hosed. All we can
4743 						 * do is nothing.. which
4744 						 * will cause an abort if
4745 						 * the peer is paying
4746 						 * attention.
4747 						 */
4748 						goto oh_well;
4749 					}
4750 					memset(chk, 0, sizeof(*chk));
4751 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4752 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4753 					chk->asoc = &stcb->asoc;
4754 					chk->rec.data.stream_seq = sp->strseq;
4755 					chk->rec.data.stream_number = sp->stream;
4756 					chk->rec.data.payloadtype = sp->ppid;
4757 					chk->rec.data.context = sp->context;
4758 					chk->flags = sp->act_flags;
4759 					if (sp->net)
4760 						chk->whoTo = sp->net;
4761 					else
4762 						chk->whoTo = stcb->asoc.primary_destination;
4763 					atomic_add_int(&chk->whoTo->ref_count, 1);
4764 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4765 					stcb->asoc.pr_sctp_cnt++;
4766 					chk->pr_sctp_on = 1;
4767 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4768 					stcb->asoc.sent_queue_cnt++;
4769 					stcb->asoc.pr_sctp_cnt++;
4770 				} else {
4771 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4772 				}
4773 		oh_well:
4774 				if (sp->data) {
4775 					/*
4776 					 * Pull any data to free up the SB
4777 					 * and allow sender to "add more"
4778 					 * whilc we will throw away :-)
4779 					 */
4780 					sctp_free_spbufspace(stcb, &stcb->asoc,
4781 					    sp);
4782 					ret_sz += sp->length;
4783 					do_wakeup_routine = 1;
4784 					sp->some_taken = 1;
4785 					sctp_m_freem(sp->data);
4786 					sp->length = 0;
4787 					sp->data = NULL;
4788 					sp->tail_mbuf = NULL;
4789 				}
4790 				break;
4791 			}
4792 		}		/* End tailq_foreach */
4793 		SCTP_TCB_SEND_UNLOCK(stcb);
4794 	}
4795 	if (do_wakeup_routine) {
4796 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4797 		struct socket *so;
4798 
4799 		so = SCTP_INP_SO(stcb->sctp_ep);
4800 		if (!so_locked) {
4801 			atomic_add_int(&stcb->asoc.refcnt, 1);
4802 			SCTP_TCB_UNLOCK(stcb);
4803 			SCTP_SOCKET_LOCK(so, 1);
4804 			SCTP_TCB_LOCK(stcb);
4805 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4806 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4807 				/* assoc was freed while we were unlocked */
4808 				SCTP_SOCKET_UNLOCK(so, 1);
4809 				return (ret_sz);
4810 			}
4811 		}
4812 #endif
4813 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4814 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4815 		if (!so_locked) {
4816 			SCTP_SOCKET_UNLOCK(so, 1);
4817 		}
4818 #endif
4819 	}
4820 	return (ret_sz);
4821 }
4822 
4823 /*
4824  * checks to see if the given address, sa, is one that is currently known by
4825  * the kernel note: can't distinguish the same address on multiple interfaces
4826  * and doesn't handle multiple addresses with different zone/scope id's note:
4827  * ifa_ifwithaddr() compares the entire sockaddr struct
4828  */
4829 struct sctp_ifa *
4830 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4831     int holds_lock)
4832 {
4833 	struct sctp_laddr *laddr;
4834 
4835 	if (holds_lock == 0) {
4836 		SCTP_INP_RLOCK(inp);
4837 	}
4838 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4839 		if (laddr->ifa == NULL)
4840 			continue;
4841 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4842 			continue;
4843 		if (addr->sa_family == AF_INET) {
4844 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4845 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4846 				/* found him. */
4847 				if (holds_lock == 0) {
4848 					SCTP_INP_RUNLOCK(inp);
4849 				}
4850 				return (laddr->ifa);
4851 				break;
4852 			}
4853 		}
4854 #ifdef INET6
4855 		if (addr->sa_family == AF_INET6) {
4856 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4857 			    &laddr->ifa->address.sin6)) {
4858 				/* found him. */
4859 				if (holds_lock == 0) {
4860 					SCTP_INP_RUNLOCK(inp);
4861 				}
4862 				return (laddr->ifa);
4863 				break;
4864 			}
4865 		}
4866 #endif
4867 	}
4868 	if (holds_lock == 0) {
4869 		SCTP_INP_RUNLOCK(inp);
4870 	}
4871 	return (NULL);
4872 }
4873 
4874 uint32_t
4875 sctp_get_ifa_hash_val(struct sockaddr *addr)
4876 {
4877 	if (addr->sa_family == AF_INET) {
4878 		struct sockaddr_in *sin;
4879 
4880 		sin = (struct sockaddr_in *)addr;
4881 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4882 	} else if (addr->sa_family == AF_INET6) {
4883 		struct sockaddr_in6 *sin6;
4884 		uint32_t hash_of_addr;
4885 
4886 		sin6 = (struct sockaddr_in6 *)addr;
4887 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4888 		    sin6->sin6_addr.s6_addr32[1] +
4889 		    sin6->sin6_addr.s6_addr32[2] +
4890 		    sin6->sin6_addr.s6_addr32[3]);
4891 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4892 		return (hash_of_addr);
4893 	}
4894 	return (0);
4895 }
4896 
4897 struct sctp_ifa *
4898 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4899 {
4900 	struct sctp_ifa *sctp_ifap;
4901 	struct sctp_vrf *vrf;
4902 	struct sctp_ifalist *hash_head;
4903 	uint32_t hash_of_addr;
4904 
4905 	if (holds_lock == 0)
4906 		SCTP_IPI_ADDR_RLOCK();
4907 
4908 	vrf = sctp_find_vrf(vrf_id);
4909 	if (vrf == NULL) {
4910 stage_right:
4911 		if (holds_lock == 0)
4912 			SCTP_IPI_ADDR_RUNLOCK();
4913 		return (NULL);
4914 	}
4915 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4916 
4917 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4918 	if (hash_head == NULL) {
4919 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4920 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4921 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4922 		sctp_print_address(addr);
4923 		SCTP_PRINTF("No such bucket for address\n");
4924 		if (holds_lock == 0)
4925 			SCTP_IPI_ADDR_RUNLOCK();
4926 
4927 		return (NULL);
4928 	}
4929 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4930 		if (sctp_ifap == NULL) {
4931 #ifdef INVARIANTS
4932 			panic("Huh LIST_FOREACH corrupt");
4933 			goto stage_right;
4934 #else
4935 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4936 			goto stage_right;
4937 #endif
4938 		}
4939 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4940 			continue;
4941 		if (addr->sa_family == AF_INET) {
4942 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4943 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4944 				/* found him. */
4945 				if (holds_lock == 0)
4946 					SCTP_IPI_ADDR_RUNLOCK();
4947 				return (sctp_ifap);
4948 				break;
4949 			}
4950 		}
4951 #ifdef INET6
4952 		if (addr->sa_family == AF_INET6) {
4953 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4954 			    &sctp_ifap->address.sin6)) {
4955 				/* found him. */
4956 				if (holds_lock == 0)
4957 					SCTP_IPI_ADDR_RUNLOCK();
4958 				return (sctp_ifap);
4959 				break;
4960 			}
4961 		}
4962 #endif
4963 	}
4964 	if (holds_lock == 0)
4965 		SCTP_IPI_ADDR_RUNLOCK();
4966 	return (NULL);
4967 }
4968 
4969 static void
4970 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
4971     uint32_t rwnd_req)
4972 {
4973 	/* User pulled some data, do we need a rwnd update? */
4974 	int r_unlocked = 0;
4975 	uint32_t dif, rwnd;
4976 	struct socket *so = NULL;
4977 
4978 	if (stcb == NULL)
4979 		return;
4980 
4981 	atomic_add_int(&stcb->asoc.refcnt, 1);
4982 
4983 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
4984 	    SCTP_STATE_SHUTDOWN_RECEIVED |
4985 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
4986 		/* Pre-check If we are freeing no update */
4987 		goto no_lock;
4988 	}
4989 	SCTP_INP_INCR_REF(stcb->sctp_ep);
4990 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
4991 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
4992 		goto out;
4993 	}
4994 	so = stcb->sctp_socket;
4995 	if (so == NULL) {
4996 		goto out;
4997 	}
4998 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
4999 	/* Have you have freed enough to look */
5000 	*freed_so_far = 0;
5001 	/* Yep, its worth a look and the lock overhead */
5002 
5003 	/* Figure out what the rwnd would be */
5004 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5005 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5006 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5007 	} else {
5008 		dif = 0;
5009 	}
5010 	if (dif >= rwnd_req) {
5011 		if (hold_rlock) {
5012 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5013 			r_unlocked = 1;
5014 		}
5015 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5016 			/*
5017 			 * One last check before we allow the guy possibly
5018 			 * to get in. There is a race, where the guy has not
5019 			 * reached the gate. In that case
5020 			 */
5021 			goto out;
5022 		}
5023 		SCTP_TCB_LOCK(stcb);
5024 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5025 			/* No reports here */
5026 			SCTP_TCB_UNLOCK(stcb);
5027 			goto out;
5028 		}
5029 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5030 		sctp_send_sack(stcb);
5031 
5032 		sctp_chunk_output(stcb->sctp_ep, stcb,
5033 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5034 		/* make sure no timer is running */
5035 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5036 		SCTP_TCB_UNLOCK(stcb);
5037 	} else {
5038 		/* Update how much we have pending */
5039 		stcb->freed_by_sorcv_sincelast = dif;
5040 	}
5041 out:
5042 	if (so && r_unlocked && hold_rlock) {
5043 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5044 	}
5045 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5046 no_lock:
5047 	atomic_add_int(&stcb->asoc.refcnt, -1);
5048 	return;
5049 }
5050 
5051 int
5052 sctp_sorecvmsg(struct socket *so,
5053     struct uio *uio,
5054     struct mbuf **mp,
5055     struct sockaddr *from,
5056     int fromlen,
5057     int *msg_flags,
5058     struct sctp_sndrcvinfo *sinfo,
5059     int filling_sinfo)
5060 {
5061 	/*
5062 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5063 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5064 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5065 	 * On the way out we may send out any combination of:
5066 	 * MSG_NOTIFICATION MSG_EOR
5067 	 *
5068 	 */
5069 	struct sctp_inpcb *inp = NULL;
5070 	int my_len = 0;
5071 	int cp_len = 0, error = 0;
5072 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5073 	struct mbuf *m = NULL;
5074 	struct sctp_tcb *stcb = NULL;
5075 	int wakeup_read_socket = 0;
5076 	int freecnt_applied = 0;
5077 	int out_flags = 0, in_flags = 0;
5078 	int block_allowed = 1;
5079 	uint32_t freed_so_far = 0;
5080 	uint32_t copied_so_far = 0;
5081 	int in_eeor_mode = 0;
5082 	int no_rcv_needed = 0;
5083 	uint32_t rwnd_req = 0;
5084 	int hold_sblock = 0;
5085 	int hold_rlock = 0;
5086 	int slen = 0;
5087 	uint32_t held_length = 0;
5088 	int sockbuf_lock = 0;
5089 
5090 	if (uio == NULL) {
5091 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5092 		return (EINVAL);
5093 	}
5094 	if (msg_flags) {
5095 		in_flags = *msg_flags;
5096 		if (in_flags & MSG_PEEK)
5097 			SCTP_STAT_INCR(sctps_read_peeks);
5098 	} else {
5099 		in_flags = 0;
5100 	}
5101 	slen = uio->uio_resid;
5102 
5103 	/* Pull in and set up our int flags */
5104 	if (in_flags & MSG_OOB) {
5105 		/* Out of band's NOT supported */
5106 		return (EOPNOTSUPP);
5107 	}
5108 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5109 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5110 		return (EINVAL);
5111 	}
5112 	if ((in_flags & (MSG_DONTWAIT
5113 	    | MSG_NBIO
5114 	    )) ||
5115 	    SCTP_SO_IS_NBIO(so)) {
5116 		block_allowed = 0;
5117 	}
5118 	/* setup the endpoint */
5119 	inp = (struct sctp_inpcb *)so->so_pcb;
5120 	if (inp == NULL) {
5121 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5122 		return (EFAULT);
5123 	}
5124 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5125 	/* Must be at least a MTU's worth */
5126 	if (rwnd_req < SCTP_MIN_RWND)
5127 		rwnd_req = SCTP_MIN_RWND;
5128 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5129 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5130 		sctp_misc_ints(SCTP_SORECV_ENTER,
5131 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5132 	}
5133 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5134 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5135 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5136 	}
5137 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5138 	sockbuf_lock = 1;
5139 	if (error) {
5140 		goto release_unlocked;
5141 	}
5142 restart:
5143 
5144 
5145 restart_nosblocks:
5146 	if (hold_sblock == 0) {
5147 		SOCKBUF_LOCK(&so->so_rcv);
5148 		hold_sblock = 1;
5149 	}
5150 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5151 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5152 		goto out;
5153 	}
5154 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5155 		if (so->so_error) {
5156 			error = so->so_error;
5157 			if ((in_flags & MSG_PEEK) == 0)
5158 				so->so_error = 0;
5159 			goto out;
5160 		} else {
5161 			if (so->so_rcv.sb_cc == 0) {
5162 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5163 				/* indicate EOF */
5164 				error = 0;
5165 				goto out;
5166 			}
5167 		}
5168 	}
5169 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5170 		/* we need to wait for data */
5171 		if ((so->so_rcv.sb_cc == 0) &&
5172 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5173 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5174 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5175 				/*
5176 				 * For active open side clear flags for
5177 				 * re-use passive open is blocked by
5178 				 * connect.
5179 				 */
5180 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5181 					/*
5182 					 * You were aborted, passive side
5183 					 * always hits here
5184 					 */
5185 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5186 					error = ECONNRESET;
5187 					/*
5188 					 * You get this once if you are
5189 					 * active open side
5190 					 */
5191 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5192 						/*
5193 						 * Remove flag if on the
5194 						 * active open side
5195 						 */
5196 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5197 					}
5198 				}
5199 				so->so_state &= ~(SS_ISCONNECTING |
5200 				    SS_ISDISCONNECTING |
5201 				    SS_ISCONFIRMING |
5202 				    SS_ISCONNECTED);
5203 				if (error == 0) {
5204 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5205 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5206 						error = ENOTCONN;
5207 					} else {
5208 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5209 					}
5210 				}
5211 				goto out;
5212 			}
5213 		}
5214 		error = sbwait(&so->so_rcv);
5215 		if (error) {
5216 			goto out;
5217 		}
5218 		held_length = 0;
5219 		goto restart_nosblocks;
5220 	} else if (so->so_rcv.sb_cc == 0) {
5221 		if (so->so_error) {
5222 			error = so->so_error;
5223 			if ((in_flags & MSG_PEEK) == 0)
5224 				so->so_error = 0;
5225 		} else {
5226 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5227 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5228 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5229 					/*
5230 					 * For active open side clear flags
5231 					 * for re-use passive open is
5232 					 * blocked by connect.
5233 					 */
5234 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5235 						/*
5236 						 * You were aborted, passive
5237 						 * side always hits here
5238 						 */
5239 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5240 						error = ECONNRESET;
5241 						/*
5242 						 * You get this once if you
5243 						 * are active open side
5244 						 */
5245 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5246 							/*
5247 							 * Remove flag if on
5248 							 * the active open
5249 							 * side
5250 							 */
5251 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5252 						}
5253 					}
5254 					so->so_state &= ~(SS_ISCONNECTING |
5255 					    SS_ISDISCONNECTING |
5256 					    SS_ISCONFIRMING |
5257 					    SS_ISCONNECTED);
5258 					if (error == 0) {
5259 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5260 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5261 							error = ENOTCONN;
5262 						} else {
5263 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5264 						}
5265 					}
5266 					goto out;
5267 				}
5268 			}
5269 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5270 			error = EWOULDBLOCK;
5271 		}
5272 		goto out;
5273 	}
5274 	if (hold_sblock == 1) {
5275 		SOCKBUF_UNLOCK(&so->so_rcv);
5276 		hold_sblock = 0;
5277 	}
5278 	/* we possibly have data we can read */
5279 	/* sa_ignore FREED_MEMORY */
5280 	control = TAILQ_FIRST(&inp->read_queue);
5281 	if (control == NULL) {
5282 		/*
5283 		 * This could be happening since the appender did the
5284 		 * increment but as not yet did the tailq insert onto the
5285 		 * read_queue
5286 		 */
5287 		if (hold_rlock == 0) {
5288 			SCTP_INP_READ_LOCK(inp);
5289 			hold_rlock = 1;
5290 		}
5291 		control = TAILQ_FIRST(&inp->read_queue);
5292 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5293 #ifdef INVARIANTS
5294 			panic("Huh, its non zero and nothing on control?");
5295 #endif
5296 			so->so_rcv.sb_cc = 0;
5297 		}
5298 		SCTP_INP_READ_UNLOCK(inp);
5299 		hold_rlock = 0;
5300 		goto restart;
5301 	}
5302 	if ((control->length == 0) &&
5303 	    (control->do_not_ref_stcb)) {
5304 		/*
5305 		 * Clean up code for freeing assoc that left behind a
5306 		 * pdapi.. maybe a peer in EEOR that just closed after
5307 		 * sending and never indicated a EOR.
5308 		 */
5309 		if (hold_rlock == 0) {
5310 			hold_rlock = 1;
5311 			SCTP_INP_READ_LOCK(inp);
5312 		}
5313 		control->held_length = 0;
5314 		if (control->data) {
5315 			/* Hmm there is data here .. fix */
5316 			struct mbuf *m_tmp;
5317 			int cnt = 0;
5318 
5319 			m_tmp = control->data;
5320 			while (m_tmp) {
5321 				cnt += SCTP_BUF_LEN(m_tmp);
5322 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5323 					control->tail_mbuf = m_tmp;
5324 					control->end_added = 1;
5325 				}
5326 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5327 			}
5328 			control->length = cnt;
5329 		} else {
5330 			/* remove it */
5331 			TAILQ_REMOVE(&inp->read_queue, control, next);
5332 			/* Add back any hiddend data */
5333 			sctp_free_remote_addr(control->whoFrom);
5334 			sctp_free_a_readq(stcb, control);
5335 		}
5336 		if (hold_rlock) {
5337 			hold_rlock = 0;
5338 			SCTP_INP_READ_UNLOCK(inp);
5339 		}
5340 		goto restart;
5341 	}
5342 	if ((control->length == 0) &&
5343 	    (control->end_added == 1)) {
5344 		/*
5345 		 * Do we also need to check for (control->pdapi_aborted ==
5346 		 * 1)?
5347 		 */
5348 		if (hold_rlock == 0) {
5349 			hold_rlock = 1;
5350 			SCTP_INP_READ_LOCK(inp);
5351 		}
5352 		TAILQ_REMOVE(&inp->read_queue, control, next);
5353 		if (control->data) {
5354 #ifdef INVARIANTS
5355 			panic("control->data not null but control->length == 0");
5356 #else
5357 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5358 			sctp_m_freem(control->data);
5359 			control->data = NULL;
5360 #endif
5361 		}
5362 		if (control->aux_data) {
5363 			sctp_m_free(control->aux_data);
5364 			control->aux_data = NULL;
5365 		}
5366 		sctp_free_remote_addr(control->whoFrom);
5367 		sctp_free_a_readq(stcb, control);
5368 		if (hold_rlock) {
5369 			hold_rlock = 0;
5370 			SCTP_INP_READ_UNLOCK(inp);
5371 		}
5372 		goto restart;
5373 	}
5374 	if (control->length == 0) {
5375 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5376 		    (filling_sinfo)) {
5377 			/* find a more suitable one then this */
5378 			ctl = TAILQ_NEXT(control, next);
5379 			while (ctl) {
5380 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5381 				    (ctl->some_taken ||
5382 				    (ctl->spec_flags & M_NOTIFICATION) ||
5383 				    ((ctl->do_not_ref_stcb == 0) &&
5384 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5385 				    ) {
5386 					/*-
5387 					 * If we have a different TCB next, and there is data
5388 					 * present. If we have already taken some (pdapi), OR we can
5389 					 * ref the tcb and no delivery as started on this stream, we
5390 					 * take it. Note we allow a notification on a different
5391 					 * assoc to be delivered..
5392 					 */
5393 					control = ctl;
5394 					goto found_one;
5395 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5396 					    (ctl->length) &&
5397 					    ((ctl->some_taken) ||
5398 					    ((ctl->do_not_ref_stcb == 0) &&
5399 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5400 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
5401 					/*-
5402 					 * If we have the same tcb, and there is data present, and we
5403 					 * have the strm interleave feature present. Then if we have
5404 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5405 					 * not started a delivery for this stream, we can take it.
5406 					 * Note we do NOT allow a notificaiton on the same assoc to
5407 					 * be delivered.
5408 					 */
5409 					control = ctl;
5410 					goto found_one;
5411 				}
5412 				ctl = TAILQ_NEXT(ctl, next);
5413 			}
5414 		}
5415 		/*
5416 		 * if we reach here, not suitable replacement is available
5417 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5418 		 * into the our held count, and its time to sleep again.
5419 		 */
5420 		held_length = so->so_rcv.sb_cc;
5421 		control->held_length = so->so_rcv.sb_cc;
5422 		goto restart;
5423 	}
5424 	/* Clear the held length since there is something to read */
5425 	control->held_length = 0;
5426 	if (hold_rlock) {
5427 		SCTP_INP_READ_UNLOCK(inp);
5428 		hold_rlock = 0;
5429 	}
5430 found_one:
5431 	/*
5432 	 * If we reach here, control has a some data for us to read off.
5433 	 * Note that stcb COULD be NULL.
5434 	 */
5435 	control->some_taken++;
5436 	if (hold_sblock) {
5437 		SOCKBUF_UNLOCK(&so->so_rcv);
5438 		hold_sblock = 0;
5439 	}
5440 	stcb = control->stcb;
5441 	if (stcb) {
5442 		if ((control->do_not_ref_stcb == 0) &&
5443 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5444 			if (freecnt_applied == 0)
5445 				stcb = NULL;
5446 		} else if (control->do_not_ref_stcb == 0) {
5447 			/* you can't free it on me please */
5448 			/*
5449 			 * The lock on the socket buffer protects us so the
5450 			 * free code will stop. But since we used the
5451 			 * socketbuf lock and the sender uses the tcb_lock
5452 			 * to increment, we need to use the atomic add to
5453 			 * the refcnt
5454 			 */
5455 			if (freecnt_applied) {
5456 #ifdef INVARIANTS
5457 				panic("refcnt already incremented");
5458 #else
5459 				printf("refcnt already incremented?\n");
5460 #endif
5461 			} else {
5462 				atomic_add_int(&stcb->asoc.refcnt, 1);
5463 				freecnt_applied = 1;
5464 			}
5465 			/*
5466 			 * Setup to remember how much we have not yet told
5467 			 * the peer our rwnd has opened up. Note we grab the
5468 			 * value from the tcb from last time. Note too that
5469 			 * sack sending clears this when a sack is sent,
5470 			 * which is fine. Once we hit the rwnd_req, we then
5471 			 * will go to the sctp_user_rcvd() that will not
5472 			 * lock until it KNOWs it MUST send a WUP-SACK.
5473 			 */
5474 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5475 			stcb->freed_by_sorcv_sincelast = 0;
5476 		}
5477 	}
5478 	if (stcb &&
5479 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5480 	    control->do_not_ref_stcb == 0) {
5481 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5482 	}
5483 	/* First lets get off the sinfo and sockaddr info */
5484 	if ((sinfo) && filling_sinfo) {
5485 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5486 		nxt = TAILQ_NEXT(control, next);
5487 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5488 			struct sctp_extrcvinfo *s_extra;
5489 
5490 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5491 			if ((nxt) &&
5492 			    (nxt->length)) {
5493 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5494 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5495 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5496 				}
5497 				if (nxt->spec_flags & M_NOTIFICATION) {
5498 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5499 				}
5500 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5501 				s_extra->sreinfo_next_length = nxt->length;
5502 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5503 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5504 				if (nxt->tail_mbuf != NULL) {
5505 					if (nxt->end_added) {
5506 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5507 					}
5508 				}
5509 			} else {
5510 				/*
5511 				 * we explicitly 0 this, since the memcpy
5512 				 * got some other things beyond the older
5513 				 * sinfo_ that is on the control's structure
5514 				 * :-D
5515 				 */
5516 				nxt = NULL;
5517 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5518 				s_extra->sreinfo_next_aid = 0;
5519 				s_extra->sreinfo_next_length = 0;
5520 				s_extra->sreinfo_next_ppid = 0;
5521 				s_extra->sreinfo_next_stream = 0;
5522 			}
5523 		}
5524 		/*
5525 		 * update off the real current cum-ack, if we have an stcb.
5526 		 */
5527 		if ((control->do_not_ref_stcb == 0) && stcb)
5528 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5529 		/*
5530 		 * mask off the high bits, we keep the actual chunk bits in
5531 		 * there.
5532 		 */
5533 		sinfo->sinfo_flags &= 0x00ff;
5534 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5535 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5536 		}
5537 	}
5538 #ifdef SCTP_ASOCLOG_OF_TSNS
5539 	{
5540 		int index, newindex;
5541 		struct sctp_pcbtsn_rlog *entry;
5542 
5543 		do {
5544 			index = inp->readlog_index;
5545 			newindex = index + 1;
5546 			if (newindex >= SCTP_READ_LOG_SIZE) {
5547 				newindex = 0;
5548 			}
5549 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5550 		entry = &inp->readlog[index];
5551 		entry->vtag = control->sinfo_assoc_id;
5552 		entry->strm = control->sinfo_stream;
5553 		entry->seq = control->sinfo_ssn;
5554 		entry->sz = control->length;
5555 		entry->flgs = control->sinfo_flags;
5556 	}
5557 #endif
5558 	if (fromlen && from) {
5559 		struct sockaddr *to;
5560 
5561 #ifdef INET
5562 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5563 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5564 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5565 #else
5566 		/* No AF_INET use AF_INET6 */
5567 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5568 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5569 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5570 #endif
5571 
5572 		to = from;
5573 #if defined(INET) && defined(INET6)
5574 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5575 		    (to->sa_family == AF_INET) &&
5576 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5577 			struct sockaddr_in *sin;
5578 			struct sockaddr_in6 sin6;
5579 
5580 			sin = (struct sockaddr_in *)to;
5581 			bzero(&sin6, sizeof(sin6));
5582 			sin6.sin6_family = AF_INET6;
5583 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5584 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5585 			bcopy(&sin->sin_addr,
5586 			    &sin6.sin6_addr.s6_addr32[3],
5587 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5588 			sin6.sin6_port = sin->sin_port;
5589 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5590 		}
5591 #endif
5592 #if defined(INET6)
5593 		{
5594 			struct sockaddr_in6 lsa6, *to6;
5595 
5596 			to6 = (struct sockaddr_in6 *)to;
5597 			sctp_recover_scope_mac(to6, (&lsa6));
5598 		}
5599 #endif
5600 	}
5601 	/* now copy out what data we can */
5602 	if (mp == NULL) {
5603 		/* copy out each mbuf in the chain up to length */
5604 get_more_data:
5605 		m = control->data;
5606 		while (m) {
5607 			/* Move out all we can */
5608 			cp_len = (int)uio->uio_resid;
5609 			my_len = (int)SCTP_BUF_LEN(m);
5610 			if (cp_len > my_len) {
5611 				/* not enough in this buf */
5612 				cp_len = my_len;
5613 			}
5614 			if (hold_rlock) {
5615 				SCTP_INP_READ_UNLOCK(inp);
5616 				hold_rlock = 0;
5617 			}
5618 			if (cp_len > 0)
5619 				error = uiomove(mtod(m, char *), cp_len, uio);
5620 			/* re-read */
5621 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5622 				goto release;
5623 			}
5624 			if ((control->do_not_ref_stcb == 0) && stcb &&
5625 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5626 				no_rcv_needed = 1;
5627 			}
5628 			if (error) {
5629 				/* error we are out of here */
5630 				goto release;
5631 			}
5632 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5633 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5634 			    ((control->end_added == 0) ||
5635 			    (control->end_added &&
5636 			    (TAILQ_NEXT(control, next) == NULL)))
5637 			    ) {
5638 				SCTP_INP_READ_LOCK(inp);
5639 				hold_rlock = 1;
5640 			}
5641 			if (cp_len == SCTP_BUF_LEN(m)) {
5642 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5643 				    (control->end_added)) {
5644 					out_flags |= MSG_EOR;
5645 					if ((control->do_not_ref_stcb == 0) &&
5646 					    (control->stcb != NULL) &&
5647 					    ((control->spec_flags & M_NOTIFICATION) == 0))
5648 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5649 				}
5650 				if (control->spec_flags & M_NOTIFICATION) {
5651 					out_flags |= MSG_NOTIFICATION;
5652 				}
5653 				/* we ate up the mbuf */
5654 				if (in_flags & MSG_PEEK) {
5655 					/* just looking */
5656 					m = SCTP_BUF_NEXT(m);
5657 					copied_so_far += cp_len;
5658 				} else {
5659 					/* dispose of the mbuf */
5660 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5661 						sctp_sblog(&so->so_rcv,
5662 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5663 					}
5664 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5665 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5666 						sctp_sblog(&so->so_rcv,
5667 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5668 					}
5669 					copied_so_far += cp_len;
5670 					freed_so_far += cp_len;
5671 					freed_so_far += MSIZE;
5672 					atomic_subtract_int(&control->length, cp_len);
5673 					control->data = sctp_m_free(m);
5674 					m = control->data;
5675 					/*
5676 					 * been through it all, must hold sb
5677 					 * lock ok to null tail
5678 					 */
5679 					if (control->data == NULL) {
5680 #ifdef INVARIANTS
5681 						if ((control->end_added == 0) ||
5682 						    (TAILQ_NEXT(control, next) == NULL)) {
5683 							/*
5684 							 * If the end is not
5685 							 * added, OR the
5686 							 * next is NOT null
5687 							 * we MUST have the
5688 							 * lock.
5689 							 */
5690 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5691 								panic("Hmm we don't own the lock?");
5692 							}
5693 						}
5694 #endif
5695 						control->tail_mbuf = NULL;
5696 #ifdef INVARIANTS
5697 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5698 							panic("end_added, nothing left and no MSG_EOR");
5699 						}
5700 #endif
5701 					}
5702 				}
5703 			} else {
5704 				/* Do we need to trim the mbuf? */
5705 				if (control->spec_flags & M_NOTIFICATION) {
5706 					out_flags |= MSG_NOTIFICATION;
5707 				}
5708 				if ((in_flags & MSG_PEEK) == 0) {
5709 					SCTP_BUF_RESV_UF(m, cp_len);
5710 					SCTP_BUF_LEN(m) -= cp_len;
5711 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5712 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5713 					}
5714 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5715 					if ((control->do_not_ref_stcb == 0) &&
5716 					    stcb) {
5717 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5718 					}
5719 					copied_so_far += cp_len;
5720 					freed_so_far += cp_len;
5721 					freed_so_far += MSIZE;
5722 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5723 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5724 						    SCTP_LOG_SBRESULT, 0);
5725 					}
5726 					atomic_subtract_int(&control->length, cp_len);
5727 				} else {
5728 					copied_so_far += cp_len;
5729 				}
5730 			}
5731 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5732 				break;
5733 			}
5734 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5735 			    (control->do_not_ref_stcb == 0) &&
5736 			    (freed_so_far >= rwnd_req)) {
5737 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5738 			}
5739 		}		/* end while(m) */
5740 		/*
5741 		 * At this point we have looked at it all and we either have
5742 		 * a MSG_EOR/or read all the user wants... <OR>
5743 		 * control->length == 0.
5744 		 */
5745 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5746 			/* we are done with this control */
5747 			if (control->length == 0) {
5748 				if (control->data) {
5749 #ifdef INVARIANTS
5750 					panic("control->data not null at read eor?");
5751 #else
5752 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5753 					sctp_m_freem(control->data);
5754 					control->data = NULL;
5755 #endif
5756 				}
5757 		done_with_control:
5758 				if (TAILQ_NEXT(control, next) == NULL) {
5759 					/*
5760 					 * If we don't have a next we need a
5761 					 * lock, if there is a next
5762 					 * interrupt is filling ahead of us
5763 					 * and we don't need a lock to
5764 					 * remove this guy (which is the
5765 					 * head of the queue).
5766 					 */
5767 					if (hold_rlock == 0) {
5768 						SCTP_INP_READ_LOCK(inp);
5769 						hold_rlock = 1;
5770 					}
5771 				}
5772 				TAILQ_REMOVE(&inp->read_queue, control, next);
5773 				/* Add back any hiddend data */
5774 				if (control->held_length) {
5775 					held_length = 0;
5776 					control->held_length = 0;
5777 					wakeup_read_socket = 1;
5778 				}
5779 				if (control->aux_data) {
5780 					sctp_m_free(control->aux_data);
5781 					control->aux_data = NULL;
5782 				}
5783 				no_rcv_needed = control->do_not_ref_stcb;
5784 				sctp_free_remote_addr(control->whoFrom);
5785 				control->data = NULL;
5786 				sctp_free_a_readq(stcb, control);
5787 				control = NULL;
5788 				if ((freed_so_far >= rwnd_req) &&
5789 				    (no_rcv_needed == 0))
5790 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5791 
5792 			} else {
5793 				/*
5794 				 * The user did not read all of this
5795 				 * message, turn off the returned MSG_EOR
5796 				 * since we are leaving more behind on the
5797 				 * control to read.
5798 				 */
5799 #ifdef INVARIANTS
5800 				if (control->end_added &&
5801 				    (control->data == NULL) &&
5802 				    (control->tail_mbuf == NULL)) {
5803 					panic("Gak, control->length is corrupt?");
5804 				}
5805 #endif
5806 				no_rcv_needed = control->do_not_ref_stcb;
5807 				out_flags &= ~MSG_EOR;
5808 			}
5809 		}
5810 		if (out_flags & MSG_EOR) {
5811 			goto release;
5812 		}
5813 		if ((uio->uio_resid == 0) ||
5814 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5815 		    ) {
5816 			goto release;
5817 		}
5818 		/*
5819 		 * If I hit here the receiver wants more and this message is
5820 		 * NOT done (pd-api). So two questions. Can we block? if not
5821 		 * we are done. Did the user NOT set MSG_WAITALL?
5822 		 */
5823 		if (block_allowed == 0) {
5824 			goto release;
5825 		}
5826 		/*
5827 		 * We need to wait for more data a few things: - We don't
5828 		 * sbunlock() so we don't get someone else reading. - We
5829 		 * must be sure to account for the case where what is added
5830 		 * is NOT to our control when we wakeup.
5831 		 */
5832 
5833 		/*
5834 		 * Do we need to tell the transport a rwnd update might be
5835 		 * needed before we go to sleep?
5836 		 */
5837 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5838 		    ((freed_so_far >= rwnd_req) &&
5839 		    (control->do_not_ref_stcb == 0) &&
5840 		    (no_rcv_needed == 0))) {
5841 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5842 		}
5843 wait_some_more:
5844 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5845 			goto release;
5846 		}
5847 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5848 			goto release;
5849 
5850 		if (hold_rlock == 1) {
5851 			SCTP_INP_READ_UNLOCK(inp);
5852 			hold_rlock = 0;
5853 		}
5854 		if (hold_sblock == 0) {
5855 			SOCKBUF_LOCK(&so->so_rcv);
5856 			hold_sblock = 1;
5857 		}
5858 		if ((copied_so_far) && (control->length == 0) &&
5859 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
5860 			goto release;
5861 		}
5862 		if (so->so_rcv.sb_cc <= control->held_length) {
5863 			error = sbwait(&so->so_rcv);
5864 			if (error) {
5865 				goto release;
5866 			}
5867 			control->held_length = 0;
5868 		}
5869 		if (hold_sblock) {
5870 			SOCKBUF_UNLOCK(&so->so_rcv);
5871 			hold_sblock = 0;
5872 		}
5873 		if (control->length == 0) {
5874 			/* still nothing here */
5875 			if (control->end_added == 1) {
5876 				/* he aborted, or is done i.e.did a shutdown */
5877 				out_flags |= MSG_EOR;
5878 				if (control->pdapi_aborted) {
5879 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5880 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5881 
5882 					out_flags |= MSG_TRUNC;
5883 				} else {
5884 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5885 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5886 				}
5887 				goto done_with_control;
5888 			}
5889 			if (so->so_rcv.sb_cc > held_length) {
5890 				control->held_length = so->so_rcv.sb_cc;
5891 				held_length = 0;
5892 			}
5893 			goto wait_some_more;
5894 		} else if (control->data == NULL) {
5895 			/*
5896 			 * we must re-sync since data is probably being
5897 			 * added
5898 			 */
5899 			SCTP_INP_READ_LOCK(inp);
5900 			if ((control->length > 0) && (control->data == NULL)) {
5901 				/*
5902 				 * big trouble.. we have the lock and its
5903 				 * corrupt?
5904 				 */
5905 #ifdef INVARIANTS
5906 				panic("Impossible data==NULL length !=0");
5907 #endif
5908 				out_flags |= MSG_EOR;
5909 				out_flags |= MSG_TRUNC;
5910 				control->length = 0;
5911 				SCTP_INP_READ_UNLOCK(inp);
5912 				goto done_with_control;
5913 			}
5914 			SCTP_INP_READ_UNLOCK(inp);
5915 			/* We will fall around to get more data */
5916 		}
5917 		goto get_more_data;
5918 	} else {
5919 		/*-
5920 		 * Give caller back the mbuf chain,
5921 		 * store in uio_resid the length
5922 		 */
5923 		wakeup_read_socket = 0;
5924 		if ((control->end_added == 0) ||
5925 		    (TAILQ_NEXT(control, next) == NULL)) {
5926 			/* Need to get rlock */
5927 			if (hold_rlock == 0) {
5928 				SCTP_INP_READ_LOCK(inp);
5929 				hold_rlock = 1;
5930 			}
5931 		}
5932 		if (control->end_added) {
5933 			out_flags |= MSG_EOR;
5934 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5935 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5936 		}
5937 		if (control->spec_flags & M_NOTIFICATION) {
5938 			out_flags |= MSG_NOTIFICATION;
5939 		}
5940 		uio->uio_resid = control->length;
5941 		*mp = control->data;
5942 		m = control->data;
5943 		while (m) {
5944 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5945 				sctp_sblog(&so->so_rcv,
5946 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5947 			}
5948 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5949 			freed_so_far += SCTP_BUF_LEN(m);
5950 			freed_so_far += MSIZE;
5951 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5952 				sctp_sblog(&so->so_rcv,
5953 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5954 			}
5955 			m = SCTP_BUF_NEXT(m);
5956 		}
5957 		control->data = control->tail_mbuf = NULL;
5958 		control->length = 0;
5959 		if (out_flags & MSG_EOR) {
5960 			/* Done with this control */
5961 			goto done_with_control;
5962 		}
5963 	}
5964 release:
5965 	if (hold_rlock == 1) {
5966 		SCTP_INP_READ_UNLOCK(inp);
5967 		hold_rlock = 0;
5968 	}
5969 	if (hold_sblock == 1) {
5970 		SOCKBUF_UNLOCK(&so->so_rcv);
5971 		hold_sblock = 0;
5972 	}
5973 	sbunlock(&so->so_rcv);
5974 	sockbuf_lock = 0;
5975 
5976 release_unlocked:
5977 	if (hold_sblock) {
5978 		SOCKBUF_UNLOCK(&so->so_rcv);
5979 		hold_sblock = 0;
5980 	}
5981 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
5982 		if ((freed_so_far >= rwnd_req) &&
5983 		    (control && (control->do_not_ref_stcb == 0)) &&
5984 		    (no_rcv_needed == 0))
5985 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5986 	}
5987 out:
5988 	if (msg_flags) {
5989 		*msg_flags = out_flags;
5990 	}
5991 	if (((out_flags & MSG_EOR) == 0) &&
5992 	    ((in_flags & MSG_PEEK) == 0) &&
5993 	    (sinfo) &&
5994 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
5995 		struct sctp_extrcvinfo *s_extra;
5996 
5997 		s_extra = (struct sctp_extrcvinfo *)sinfo;
5998 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5999 	}
6000 	if (hold_rlock == 1) {
6001 		SCTP_INP_READ_UNLOCK(inp);
6002 		hold_rlock = 0;
6003 	}
6004 	if (hold_sblock) {
6005 		SOCKBUF_UNLOCK(&so->so_rcv);
6006 		hold_sblock = 0;
6007 	}
6008 	if (sockbuf_lock) {
6009 		sbunlock(&so->so_rcv);
6010 	}
6011 	if (freecnt_applied) {
6012 		/*
6013 		 * The lock on the socket buffer protects us so the free
6014 		 * code will stop. But since we used the socketbuf lock and
6015 		 * the sender uses the tcb_lock to increment, we need to use
6016 		 * the atomic add to the refcnt.
6017 		 */
6018 		if (stcb == NULL) {
6019 #ifdef INVARIANTS
6020 			panic("stcb for refcnt has gone NULL?");
6021 			goto stage_left;
6022 #else
6023 			goto stage_left;
6024 #endif
6025 		}
6026 		atomic_add_int(&stcb->asoc.refcnt, -1);
6027 		freecnt_applied = 0;
6028 		/* Save the value back for next time */
6029 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6030 	}
6031 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6032 		if (stcb) {
6033 			sctp_misc_ints(SCTP_SORECV_DONE,
6034 			    freed_so_far,
6035 			    ((uio) ? (slen - uio->uio_resid) : slen),
6036 			    stcb->asoc.my_rwnd,
6037 			    so->so_rcv.sb_cc);
6038 		} else {
6039 			sctp_misc_ints(SCTP_SORECV_DONE,
6040 			    freed_so_far,
6041 			    ((uio) ? (slen - uio->uio_resid) : slen),
6042 			    0,
6043 			    so->so_rcv.sb_cc);
6044 		}
6045 	}
6046 stage_left:
6047 	if (wakeup_read_socket) {
6048 		sctp_sorwakeup(inp, so);
6049 	}
6050 	return (error);
6051 }
6052 
6053 
6054 #ifdef SCTP_MBUF_LOGGING
6055 struct mbuf *
6056 sctp_m_free(struct mbuf *m)
6057 {
6058 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6059 		if (SCTP_BUF_IS_EXTENDED(m)) {
6060 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6061 		}
6062 	}
6063 	return (m_free(m));
6064 }
6065 
6066 void
6067 sctp_m_freem(struct mbuf *mb)
6068 {
6069 	while (mb != NULL)
6070 		mb = sctp_m_free(mb);
6071 }
6072 
6073 #endif
6074 
6075 int
6076 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6077 {
6078 	/*
6079 	 * Given a local address. For all associations that holds the
6080 	 * address, request a peer-set-primary.
6081 	 */
6082 	struct sctp_ifa *ifa;
6083 	struct sctp_laddr *wi;
6084 
6085 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6086 	if (ifa == NULL) {
6087 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6088 		return (EADDRNOTAVAIL);
6089 	}
6090 	/*
6091 	 * Now that we have the ifa we must awaken the iterator with this
6092 	 * message.
6093 	 */
6094 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6095 	if (wi == NULL) {
6096 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6097 		return (ENOMEM);
6098 	}
6099 	/* Now incr the count and int wi structure */
6100 	SCTP_INCR_LADDR_COUNT();
6101 	bzero(wi, sizeof(*wi));
6102 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6103 	wi->ifa = ifa;
6104 	wi->action = SCTP_SET_PRIM_ADDR;
6105 	atomic_add_int(&ifa->refcount, 1);
6106 
6107 	/* Now add it to the work queue */
6108 	SCTP_WQ_ADDR_LOCK();
6109 	/*
6110 	 * Should this really be a tailq? As it is we will process the
6111 	 * newest first :-0
6112 	 */
6113 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6114 	SCTP_WQ_ADDR_UNLOCK();
6115 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6116 	    (struct sctp_inpcb *)NULL,
6117 	    (struct sctp_tcb *)NULL,
6118 	    (struct sctp_nets *)NULL);
6119 	return (0);
6120 }
6121 
6122 
6123 int
6124 sctp_soreceive(struct socket *so,
6125     struct sockaddr **psa,
6126     struct uio *uio,
6127     struct mbuf **mp0,
6128     struct mbuf **controlp,
6129     int *flagsp)
6130 {
6131 	int error, fromlen;
6132 	uint8_t sockbuf[256];
6133 	struct sockaddr *from;
6134 	struct sctp_extrcvinfo sinfo;
6135 	int filling_sinfo = 1;
6136 	struct sctp_inpcb *inp;
6137 
6138 	inp = (struct sctp_inpcb *)so->so_pcb;
6139 	/* pickup the assoc we are reading from */
6140 	if (inp == NULL) {
6141 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6142 		return (EINVAL);
6143 	}
6144 	if ((sctp_is_feature_off(inp,
6145 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6146 	    (controlp == NULL)) {
6147 		/* user does not want the sndrcv ctl */
6148 		filling_sinfo = 0;
6149 	}
6150 	if (psa) {
6151 		from = (struct sockaddr *)sockbuf;
6152 		fromlen = sizeof(sockbuf);
6153 		from->sa_len = 0;
6154 	} else {
6155 		from = NULL;
6156 		fromlen = 0;
6157 	}
6158 
6159 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6160 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6161 	if ((controlp) && (filling_sinfo)) {
6162 		/* copy back the sinfo in a CMSG format */
6163 		if (filling_sinfo)
6164 			*controlp = sctp_build_ctl_nchunk(inp,
6165 			    (struct sctp_sndrcvinfo *)&sinfo);
6166 		else
6167 			*controlp = NULL;
6168 	}
6169 	if (psa) {
6170 		/* copy back the address info */
6171 		if (from && from->sa_len) {
6172 			*psa = sodupsockaddr(from, M_NOWAIT);
6173 		} else {
6174 			*psa = NULL;
6175 		}
6176 	}
6177 	return (error);
6178 }
6179 
6180 
6181 int
6182 sctp_l_soreceive(struct socket *so,
6183     struct sockaddr **name,
6184     struct uio *uio,
6185     char **controlp,
6186     int *controllen,
6187     int *flag)
6188 {
6189 	int error, fromlen;
6190 	uint8_t sockbuf[256];
6191 	struct sockaddr *from;
6192 	struct sctp_extrcvinfo sinfo;
6193 	int filling_sinfo = 1;
6194 	struct sctp_inpcb *inp;
6195 
6196 	inp = (struct sctp_inpcb *)so->so_pcb;
6197 	/* pickup the assoc we are reading from */
6198 	if (inp == NULL) {
6199 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6200 		return (EINVAL);
6201 	}
6202 	if ((sctp_is_feature_off(inp,
6203 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6204 	    (controlp == NULL)) {
6205 		/* user does not want the sndrcv ctl */
6206 		filling_sinfo = 0;
6207 	}
6208 	if (name) {
6209 		from = (struct sockaddr *)sockbuf;
6210 		fromlen = sizeof(sockbuf);
6211 		from->sa_len = 0;
6212 	} else {
6213 		from = NULL;
6214 		fromlen = 0;
6215 	}
6216 
6217 	error = sctp_sorecvmsg(so, uio,
6218 	    (struct mbuf **)NULL,
6219 	    from, fromlen, flag,
6220 	    (struct sctp_sndrcvinfo *)&sinfo,
6221 	    filling_sinfo);
6222 	if ((controlp) && (filling_sinfo)) {
6223 		/*
6224 		 * copy back the sinfo in a CMSG format note that the caller
6225 		 * has reponsibility for freeing the memory.
6226 		 */
6227 		if (filling_sinfo)
6228 			*controlp = sctp_build_ctl_cchunk(inp,
6229 			    controllen,
6230 			    (struct sctp_sndrcvinfo *)&sinfo);
6231 	}
6232 	if (name) {
6233 		/* copy back the address info */
6234 		if (from && from->sa_len) {
6235 			*name = sodupsockaddr(from, M_WAIT);
6236 		} else {
6237 			*name = NULL;
6238 		}
6239 	}
6240 	return (error);
6241 }
6242 
6243 
6244 
6245 
6246 
6247 
6248 
6249 int
6250 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6251     int totaddr, int *error)
6252 {
6253 	int added = 0;
6254 	int i;
6255 	struct sctp_inpcb *inp;
6256 	struct sockaddr *sa;
6257 	size_t incr = 0;
6258 
6259 	sa = addr;
6260 	inp = stcb->sctp_ep;
6261 	*error = 0;
6262 	for (i = 0; i < totaddr; i++) {
6263 		if (sa->sa_family == AF_INET) {
6264 			incr = sizeof(struct sockaddr_in);
6265 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6266 				/* assoc gone no un-lock */
6267 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6268 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6269 				*error = ENOBUFS;
6270 				goto out_now;
6271 			}
6272 			added++;
6273 		} else if (sa->sa_family == AF_INET6) {
6274 			incr = sizeof(struct sockaddr_in6);
6275 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6276 				/* assoc gone no un-lock */
6277 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6278 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6279 				*error = ENOBUFS;
6280 				goto out_now;
6281 			}
6282 			added++;
6283 		}
6284 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6285 	}
6286 out_now:
6287 	return (added);
6288 }
6289 
6290 struct sctp_tcb *
6291 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6292     int *totaddr, int *num_v4, int *num_v6, int *error,
6293     int limit, int *bad_addr)
6294 {
6295 	struct sockaddr *sa;
6296 	struct sctp_tcb *stcb = NULL;
6297 	size_t incr, at, i;
6298 
6299 	at = incr = 0;
6300 	sa = addr;
6301 	*error = *num_v6 = *num_v4 = 0;
6302 	/* account and validate addresses */
6303 	for (i = 0; i < (size_t)*totaddr; i++) {
6304 		if (sa->sa_family == AF_INET) {
6305 			(*num_v4) += 1;
6306 			incr = sizeof(struct sockaddr_in);
6307 			if (sa->sa_len != incr) {
6308 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6309 				*error = EINVAL;
6310 				*bad_addr = 1;
6311 				return (NULL);
6312 			}
6313 		} else if (sa->sa_family == AF_INET6) {
6314 			struct sockaddr_in6 *sin6;
6315 
6316 			sin6 = (struct sockaddr_in6 *)sa;
6317 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6318 				/* Must be non-mapped for connectx */
6319 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6320 				*error = EINVAL;
6321 				*bad_addr = 1;
6322 				return (NULL);
6323 			}
6324 			(*num_v6) += 1;
6325 			incr = sizeof(struct sockaddr_in6);
6326 			if (sa->sa_len != incr) {
6327 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6328 				*error = EINVAL;
6329 				*bad_addr = 1;
6330 				return (NULL);
6331 			}
6332 		} else {
6333 			*totaddr = i;
6334 			/* we are done */
6335 			break;
6336 		}
6337 		SCTP_INP_INCR_REF(inp);
6338 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6339 		if (stcb != NULL) {
6340 			/* Already have or am bring up an association */
6341 			return (stcb);
6342 		} else {
6343 			SCTP_INP_DECR_REF(inp);
6344 		}
6345 		if ((at + incr) > (size_t)limit) {
6346 			*totaddr = i;
6347 			break;
6348 		}
6349 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6350 	}
6351 	return ((struct sctp_tcb *)NULL);
6352 }
6353 
6354 /*
6355  * sctp_bindx(ADD) for one address.
6356  * assumes all arguments are valid/checked by caller.
6357  */
6358 void
6359 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6360     struct sockaddr *sa, sctp_assoc_t assoc_id,
6361     uint32_t vrf_id, int *error, void *p)
6362 {
6363 	struct sockaddr *addr_touse;
6364 
6365 #ifdef INET6
6366 	struct sockaddr_in sin;
6367 
6368 #endif
6369 
6370 	/* see if we're bound all already! */
6371 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6372 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6373 		*error = EINVAL;
6374 		return;
6375 	}
6376 	addr_touse = sa;
6377 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6378 	if (sa->sa_family == AF_INET6) {
6379 		struct sockaddr_in6 *sin6;
6380 
6381 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6382 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6383 			*error = EINVAL;
6384 			return;
6385 		}
6386 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6387 			/* can only bind v6 on PF_INET6 sockets */
6388 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6389 			*error = EINVAL;
6390 			return;
6391 		}
6392 		sin6 = (struct sockaddr_in6 *)addr_touse;
6393 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6394 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6395 			    SCTP_IPV6_V6ONLY(inp)) {
6396 				/* can't bind v4-mapped on PF_INET sockets */
6397 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6398 				*error = EINVAL;
6399 				return;
6400 			}
6401 			in6_sin6_2_sin(&sin, sin6);
6402 			addr_touse = (struct sockaddr *)&sin;
6403 		}
6404 	}
6405 #endif
6406 	if (sa->sa_family == AF_INET) {
6407 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6408 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6409 			*error = EINVAL;
6410 			return;
6411 		}
6412 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6413 		    SCTP_IPV6_V6ONLY(inp)) {
6414 			/* can't bind v4 on PF_INET sockets */
6415 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6416 			*error = EINVAL;
6417 			return;
6418 		}
6419 	}
6420 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6421 		if (p == NULL) {
6422 			/* Can't get proc for Net/Open BSD */
6423 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6424 			*error = EINVAL;
6425 			return;
6426 		}
6427 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6428 		return;
6429 	}
6430 	/*
6431 	 * No locks required here since bind and mgmt_ep_sa all do their own
6432 	 * locking. If we do something for the FIX: below we may need to
6433 	 * lock in that case.
6434 	 */
6435 	if (assoc_id == 0) {
6436 		/* add the address */
6437 		struct sctp_inpcb *lep;
6438 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6439 
6440 		/* validate the incoming port */
6441 		if ((lsin->sin_port != 0) &&
6442 		    (lsin->sin_port != inp->sctp_lport)) {
6443 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6444 			*error = EINVAL;
6445 			return;
6446 		} else {
6447 			/* user specified 0 port, set it to existing port */
6448 			lsin->sin_port = inp->sctp_lport;
6449 		}
6450 
6451 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6452 		if (lep != NULL) {
6453 			/*
6454 			 * We must decrement the refcount since we have the
6455 			 * ep already and are binding. No remove going on
6456 			 * here.
6457 			 */
6458 			SCTP_INP_DECR_REF(lep);
6459 		}
6460 		if (lep == inp) {
6461 			/* already bound to it.. ok */
6462 			return;
6463 		} else if (lep == NULL) {
6464 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6465 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6466 			    SCTP_ADD_IP_ADDRESS,
6467 			    vrf_id, NULL);
6468 		} else {
6469 			*error = EADDRINUSE;
6470 		}
6471 		if (*error)
6472 			return;
6473 	} else {
6474 		/*
6475 		 * FIX: decide whether we allow assoc based bindx
6476 		 */
6477 	}
6478 }
6479 
6480 /*
6481  * sctp_bindx(DELETE) for one address.
6482  * assumes all arguments are valid/checked by caller.
6483  */
6484 void
6485 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6486     struct sockaddr *sa, sctp_assoc_t assoc_id,
6487     uint32_t vrf_id, int *error)
6488 {
6489 	struct sockaddr *addr_touse;
6490 
6491 #ifdef INET6
6492 	struct sockaddr_in sin;
6493 
6494 #endif
6495 
6496 	/* see if we're bound all already! */
6497 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6498 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6499 		*error = EINVAL;
6500 		return;
6501 	}
6502 	addr_touse = sa;
6503 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6504 	if (sa->sa_family == AF_INET6) {
6505 		struct sockaddr_in6 *sin6;
6506 
6507 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6508 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6509 			*error = EINVAL;
6510 			return;
6511 		}
6512 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6513 			/* can only bind v6 on PF_INET6 sockets */
6514 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6515 			*error = EINVAL;
6516 			return;
6517 		}
6518 		sin6 = (struct sockaddr_in6 *)addr_touse;
6519 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6520 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6521 			    SCTP_IPV6_V6ONLY(inp)) {
6522 				/* can't bind mapped-v4 on PF_INET sockets */
6523 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6524 				*error = EINVAL;
6525 				return;
6526 			}
6527 			in6_sin6_2_sin(&sin, sin6);
6528 			addr_touse = (struct sockaddr *)&sin;
6529 		}
6530 	}
6531 #endif
6532 	if (sa->sa_family == AF_INET) {
6533 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6534 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6535 			*error = EINVAL;
6536 			return;
6537 		}
6538 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6539 		    SCTP_IPV6_V6ONLY(inp)) {
6540 			/* can't bind v4 on PF_INET sockets */
6541 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6542 			*error = EINVAL;
6543 			return;
6544 		}
6545 	}
6546 	/*
6547 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6548 	 * below is ever changed we may need to lock before calling
6549 	 * association level binding.
6550 	 */
6551 	if (assoc_id == 0) {
6552 		/* delete the address */
6553 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6554 		    SCTP_DEL_IP_ADDRESS,
6555 		    vrf_id, NULL);
6556 	} else {
6557 		/*
6558 		 * FIX: decide whether we allow assoc based bindx
6559 		 */
6560 	}
6561 }
6562 
6563 /*
6564  * returns the valid local address count for an assoc, taking into account
6565  * all scoping rules
6566  */
6567 int
6568 sctp_local_addr_count(struct sctp_tcb *stcb)
6569 {
6570 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6571 	int ipv4_addr_legal, ipv6_addr_legal;
6572 	struct sctp_vrf *vrf;
6573 	struct sctp_ifn *sctp_ifn;
6574 	struct sctp_ifa *sctp_ifa;
6575 	int count = 0;
6576 
6577 	/* Turn on all the appropriate scopes */
6578 	loopback_scope = stcb->asoc.loopback_scope;
6579 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6580 	local_scope = stcb->asoc.local_scope;
6581 	site_scope = stcb->asoc.site_scope;
6582 	ipv4_addr_legal = ipv6_addr_legal = 0;
6583 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6584 		ipv6_addr_legal = 1;
6585 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6586 			ipv4_addr_legal = 1;
6587 		}
6588 	} else {
6589 		ipv4_addr_legal = 1;
6590 	}
6591 
6592 	SCTP_IPI_ADDR_RLOCK();
6593 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6594 	if (vrf == NULL) {
6595 		/* no vrf, no addresses */
6596 		SCTP_IPI_ADDR_RUNLOCK();
6597 		return (0);
6598 	}
6599 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6600 		/*
6601 		 * bound all case: go through all ifns on the vrf
6602 		 */
6603 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6604 			if ((loopback_scope == 0) &&
6605 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6606 				continue;
6607 			}
6608 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6609 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6610 					continue;
6611 				switch (sctp_ifa->address.sa.sa_family) {
6612 				case AF_INET:
6613 					if (ipv4_addr_legal) {
6614 						struct sockaddr_in *sin;
6615 
6616 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6617 						if (sin->sin_addr.s_addr == 0) {
6618 							/*
6619 							 * skip unspecified
6620 							 * addrs
6621 							 */
6622 							continue;
6623 						}
6624 						if ((ipv4_local_scope == 0) &&
6625 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6626 							continue;
6627 						}
6628 						/* count this one */
6629 						count++;
6630 					} else {
6631 						continue;
6632 					}
6633 					break;
6634 #ifdef INET6
6635 				case AF_INET6:
6636 					if (ipv6_addr_legal) {
6637 						struct sockaddr_in6 *sin6;
6638 
6639 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6640 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6641 							continue;
6642 						}
6643 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6644 							if (local_scope == 0)
6645 								continue;
6646 							if (sin6->sin6_scope_id == 0) {
6647 								if (sa6_recoverscope(sin6) != 0)
6648 									/*
6649 									 *
6650 									 * bad
6651 									 *
6652 									 * li
6653 									 * nk
6654 									 *
6655 									 * loc
6656 									 * al
6657 									 *
6658 									 * add
6659 									 * re
6660 									 * ss
6661 									 * */
6662 									continue;
6663 							}
6664 						}
6665 						if ((site_scope == 0) &&
6666 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6667 							continue;
6668 						}
6669 						/* count this one */
6670 						count++;
6671 					}
6672 					break;
6673 #endif
6674 				default:
6675 					/* TSNH */
6676 					break;
6677 				}
6678 			}
6679 		}
6680 	} else {
6681 		/*
6682 		 * subset bound case
6683 		 */
6684 		struct sctp_laddr *laddr;
6685 
6686 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6687 		    sctp_nxt_addr) {
6688 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6689 				continue;
6690 			}
6691 			/* count this one */
6692 			count++;
6693 		}
6694 	}
6695 	SCTP_IPI_ADDR_RUNLOCK();
6696 	return (count);
6697 }
6698 
6699 #if defined(SCTP_LOCAL_TRACE_BUF)
6700 
6701 void
6702 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6703 {
6704 	uint32_t saveindex, newindex;
6705 
6706 	do {
6707 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6708 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6709 			newindex = 1;
6710 		} else {
6711 			newindex = saveindex + 1;
6712 		}
6713 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6714 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6715 		saveindex = 0;
6716 	}
6717 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6718 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6719 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6720 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6721 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6722 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6723 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6724 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6725 }
6726 
6727 #endif
6728 /* We will need to add support
6729  * to bind the ports and such here
6730  * so we can do UDP tunneling. In
6731  * the mean-time, we return error
6732  */
6733 #include <netinet/udp.h>
6734 #include <netinet/udp_var.h>
6735 #include <sys/proc.h>
6736 #ifdef INET6
6737 #include <netinet6/sctp6_var.h>
6738 #endif
6739 
6740 static void
6741 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6742 {
6743 	struct ip *iph;
6744 	struct mbuf *sp, *last;
6745 	struct udphdr *uhdr;
6746 	uint16_t port = 0, len;
6747 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6748 
6749 	/*
6750 	 * Split out the mbuf chain. Leave the IP header in m, place the
6751 	 * rest in the sp.
6752 	 */
6753 	if ((m->m_flags & M_PKTHDR) == 0) {
6754 		/* Can't handle one that is not a pkt hdr */
6755 		goto out;
6756 	}
6757 	/* pull the src port */
6758 	iph = mtod(m, struct ip *);
6759 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6760 
6761 	port = uhdr->uh_sport;
6762 	sp = m_split(m, off, M_DONTWAIT);
6763 	if (sp == NULL) {
6764 		/* Gak, drop packet, we can't do a split */
6765 		goto out;
6766 	}
6767 	if (sp->m_pkthdr.len < header_size) {
6768 		/* Gak, packet can't have an SCTP header in it - to small */
6769 		m_freem(sp);
6770 		goto out;
6771 	}
6772 	/* ok now pull up the UDP header and SCTP header together */
6773 	sp = m_pullup(sp, header_size);
6774 	if (sp == NULL) {
6775 		/* Gak pullup failed */
6776 		goto out;
6777 	}
6778 	/* trim out the UDP header */
6779 	m_adj(sp, sizeof(struct udphdr));
6780 
6781 	/* Now reconstruct the mbuf chain */
6782 	/* 1) find last one */
6783 	last = m;
6784 	while (last->m_next != NULL) {
6785 		last = last->m_next;
6786 	}
6787 	last->m_next = sp;
6788 	m->m_pkthdr.len += sp->m_pkthdr.len;
6789 	last = m;
6790 	while (last != NULL) {
6791 		last = last->m_next;
6792 	}
6793 	/* Now its ready for sctp_input or sctp6_input */
6794 	iph = mtod(m, struct ip *);
6795 	switch (iph->ip_v) {
6796 	case IPVERSION:
6797 		{
6798 			/* its IPv4 */
6799 			len = SCTP_GET_IPV4_LENGTH(iph);
6800 			len -= sizeof(struct udphdr);
6801 			SCTP_GET_IPV4_LENGTH(iph) = len;
6802 			sctp_input_with_port(m, off, port);
6803 			break;
6804 		}
6805 #ifdef INET6
6806 	case IPV6_VERSION >> 4:
6807 		{
6808 			/* its IPv6 - NOT supported */
6809 			goto out;
6810 			break;
6811 
6812 		}
6813 #endif
6814 	default:
6815 		{
6816 			m_freem(m);
6817 			break;
6818 		}
6819 	}
6820 	return;
6821 out:
6822 	m_freem(m);
6823 }
6824 
6825 void
6826 sctp_over_udp_stop(void)
6827 {
6828 	struct socket *sop;
6829 
6830 	/*
6831 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6832 	 * for writting!
6833 	 */
6834 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6835 		/* Nothing to do */
6836 		return;
6837 	}
6838 	sop = SCTP_BASE_INFO(udp_tun_socket);
6839 	soclose(sop);
6840 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6841 }
6842 int
6843 sctp_over_udp_start(void)
6844 {
6845 	uint16_t port;
6846 	int ret;
6847 	struct sockaddr_in sin;
6848 	struct socket *sop = NULL;
6849 	struct thread *th;
6850 	struct ucred *cred;
6851 
6852 	/*
6853 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6854 	 * for writting!
6855 	 */
6856 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6857 	if (port == 0) {
6858 		/* Must have a port set */
6859 		return (EINVAL);
6860 	}
6861 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6862 		/* Already running -- must stop first */
6863 		return (EALREADY);
6864 	}
6865 	th = curthread;
6866 	cred = th->td_ucred;
6867 	if ((ret = socreate(PF_INET, &sop,
6868 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6869 		return (ret);
6870 	}
6871 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6872 	/* call the special UDP hook */
6873 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6874 	if (ret) {
6875 		goto exit_stage_left;
6876 	}
6877 	/* Ok we have a socket, bind it to the port */
6878 	memset(&sin, 0, sizeof(sin));
6879 	sin.sin_len = sizeof(sin);
6880 	sin.sin_family = AF_INET;
6881 	sin.sin_port = htons(port);
6882 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6883 	if (ret) {
6884 		/* Close up we cant get the port */
6885 exit_stage_left:
6886 		sctp_over_udp_stop();
6887 		return (ret);
6888 	}
6889 	/*
6890 	 * Ok we should now get UDP packets directly to our input routine
6891 	 * sctp_recv_upd_tunneled_packet().
6892 	 */
6893 	return (0);
6894 }
6895