xref: /freebsd/sys/netinet/sctputil.c (revision 9bd497b8354567454e075076d40c996e21bd6095)
1 /*-
2  * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 /* $KAME: sctputil.c,v 1.37 2005/03/07 23:26:09 itojun Exp $	 */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_pcb.h>
38 #include <netinet/sctputil.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #ifdef INET6
42 #endif
43 #include <netinet/sctp_header.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_indata.h>/* for sctp_deliver_data() */
48 #include <netinet/sctp_auth.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_cc_functions.h>
51 
52 #define NUMBER_OF_MTU_SIZES 18
53 
54 
55 #ifndef KTR_SCTP
56 #define KTR_SCTP KTR_SUBSYS
57 #endif
58 
59 void
60 sctp_sblog(struct sockbuf *sb,
61     struct sctp_tcb *stcb, int from, int incr)
62 {
63 	struct sctp_cwnd_log sctp_clog;
64 
65 	sctp_clog.x.sb.stcb = stcb;
66 	sctp_clog.x.sb.so_sbcc = sb->sb_cc;
67 	if (stcb)
68 		sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
69 	else
70 		sctp_clog.x.sb.stcb_sbcc = 0;
71 	sctp_clog.x.sb.incr = incr;
72 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
73 	    SCTP_LOG_EVENT_SB,
74 	    from,
75 	    sctp_clog.x.misc.log1,
76 	    sctp_clog.x.misc.log2,
77 	    sctp_clog.x.misc.log3,
78 	    sctp_clog.x.misc.log4);
79 }
80 
81 void
82 sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc)
83 {
84 	struct sctp_cwnd_log sctp_clog;
85 
86 	sctp_clog.x.close.inp = (void *)inp;
87 	sctp_clog.x.close.sctp_flags = inp->sctp_flags;
88 	if (stcb) {
89 		sctp_clog.x.close.stcb = (void *)stcb;
90 		sctp_clog.x.close.state = (uint16_t) stcb->asoc.state;
91 	} else {
92 		sctp_clog.x.close.stcb = 0;
93 		sctp_clog.x.close.state = 0;
94 	}
95 	sctp_clog.x.close.loc = loc;
96 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
97 	    SCTP_LOG_EVENT_CLOSE,
98 	    0,
99 	    sctp_clog.x.misc.log1,
100 	    sctp_clog.x.misc.log2,
101 	    sctp_clog.x.misc.log3,
102 	    sctp_clog.x.misc.log4);
103 }
104 
105 
106 void
107 rto_logging(struct sctp_nets *net, int from)
108 {
109 	struct sctp_cwnd_log sctp_clog;
110 
111 	memset(&sctp_clog, 0, sizeof(sctp_clog));
112 	sctp_clog.x.rto.net = (void *)net;
113 	sctp_clog.x.rto.rtt = net->prev_rtt;
114 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
115 	    SCTP_LOG_EVENT_RTT,
116 	    from,
117 	    sctp_clog.x.misc.log1,
118 	    sctp_clog.x.misc.log2,
119 	    sctp_clog.x.misc.log3,
120 	    sctp_clog.x.misc.log4);
121 
122 }
123 
124 void
125 sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from)
126 {
127 	struct sctp_cwnd_log sctp_clog;
128 
129 	sctp_clog.x.strlog.stcb = stcb;
130 	sctp_clog.x.strlog.n_tsn = tsn;
131 	sctp_clog.x.strlog.n_sseq = sseq;
132 	sctp_clog.x.strlog.e_tsn = 0;
133 	sctp_clog.x.strlog.e_sseq = 0;
134 	sctp_clog.x.strlog.strm = stream;
135 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
136 	    SCTP_LOG_EVENT_STRM,
137 	    from,
138 	    sctp_clog.x.misc.log1,
139 	    sctp_clog.x.misc.log2,
140 	    sctp_clog.x.misc.log3,
141 	    sctp_clog.x.misc.log4);
142 
143 }
144 
145 void
146 sctp_log_nagle_event(struct sctp_tcb *stcb, int action)
147 {
148 	struct sctp_cwnd_log sctp_clog;
149 
150 	sctp_clog.x.nagle.stcb = (void *)stcb;
151 	sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight;
152 	sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size;
153 	sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue;
154 	sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count;
155 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
156 	    SCTP_LOG_EVENT_NAGLE,
157 	    action,
158 	    sctp_clog.x.misc.log1,
159 	    sctp_clog.x.misc.log2,
160 	    sctp_clog.x.misc.log3,
161 	    sctp_clog.x.misc.log4);
162 }
163 
164 
165 void
166 sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from)
167 {
168 	struct sctp_cwnd_log sctp_clog;
169 
170 	sctp_clog.x.sack.cumack = cumack;
171 	sctp_clog.x.sack.oldcumack = old_cumack;
172 	sctp_clog.x.sack.tsn = tsn;
173 	sctp_clog.x.sack.numGaps = gaps;
174 	sctp_clog.x.sack.numDups = dups;
175 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
176 	    SCTP_LOG_EVENT_SACK,
177 	    from,
178 	    sctp_clog.x.misc.log1,
179 	    sctp_clog.x.misc.log2,
180 	    sctp_clog.x.misc.log3,
181 	    sctp_clog.x.misc.log4);
182 }
183 
184 void
185 sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from)
186 {
187 	struct sctp_cwnd_log sctp_clog;
188 
189 	memset(&sctp_clog, 0, sizeof(sctp_clog));
190 	sctp_clog.x.map.base = map;
191 	sctp_clog.x.map.cum = cum;
192 	sctp_clog.x.map.high = high;
193 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
194 	    SCTP_LOG_EVENT_MAP,
195 	    from,
196 	    sctp_clog.x.misc.log1,
197 	    sctp_clog.x.misc.log2,
198 	    sctp_clog.x.misc.log3,
199 	    sctp_clog.x.misc.log4);
200 }
201 
202 void
203 sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn,
204     int from)
205 {
206 	struct sctp_cwnd_log sctp_clog;
207 
208 	memset(&sctp_clog, 0, sizeof(sctp_clog));
209 	sctp_clog.x.fr.largest_tsn = biggest_tsn;
210 	sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn;
211 	sctp_clog.x.fr.tsn = tsn;
212 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
213 	    SCTP_LOG_EVENT_FR,
214 	    from,
215 	    sctp_clog.x.misc.log1,
216 	    sctp_clog.x.misc.log2,
217 	    sctp_clog.x.misc.log3,
218 	    sctp_clog.x.misc.log4);
219 
220 }
221 
222 
223 void
224 sctp_log_mb(struct mbuf *m, int from)
225 {
226 	struct sctp_cwnd_log sctp_clog;
227 
228 	sctp_clog.x.mb.mp = m;
229 	sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m));
230 	sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m));
231 	sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0);
232 	if (SCTP_BUF_IS_EXTENDED(m)) {
233 		sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m);
234 		sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m));
235 	} else {
236 		sctp_clog.x.mb.ext = 0;
237 		sctp_clog.x.mb.refcnt = 0;
238 	}
239 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
240 	    SCTP_LOG_EVENT_MBUF,
241 	    from,
242 	    sctp_clog.x.misc.log1,
243 	    sctp_clog.x.misc.log2,
244 	    sctp_clog.x.misc.log3,
245 	    sctp_clog.x.misc.log4);
246 }
247 
248 
249 void
250 sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk,
251     int from)
252 {
253 	struct sctp_cwnd_log sctp_clog;
254 
255 	if (control == NULL) {
256 		SCTP_PRINTF("Gak log of NULL?\n");
257 		return;
258 	}
259 	sctp_clog.x.strlog.stcb = control->stcb;
260 	sctp_clog.x.strlog.n_tsn = control->sinfo_tsn;
261 	sctp_clog.x.strlog.n_sseq = control->sinfo_ssn;
262 	sctp_clog.x.strlog.strm = control->sinfo_stream;
263 	if (poschk != NULL) {
264 		sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn;
265 		sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn;
266 	} else {
267 		sctp_clog.x.strlog.e_tsn = 0;
268 		sctp_clog.x.strlog.e_sseq = 0;
269 	}
270 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
271 	    SCTP_LOG_EVENT_STRM,
272 	    from,
273 	    sctp_clog.x.misc.log1,
274 	    sctp_clog.x.misc.log2,
275 	    sctp_clog.x.misc.log3,
276 	    sctp_clog.x.misc.log4);
277 
278 }
279 
280 void
281 sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from)
282 {
283 	struct sctp_cwnd_log sctp_clog;
284 
285 	sctp_clog.x.cwnd.net = net;
286 	if (stcb->asoc.send_queue_cnt > 255)
287 		sctp_clog.x.cwnd.cnt_in_send = 255;
288 	else
289 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
290 	if (stcb->asoc.stream_queue_cnt > 255)
291 		sctp_clog.x.cwnd.cnt_in_str = 255;
292 	else
293 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
294 
295 	if (net) {
296 		sctp_clog.x.cwnd.cwnd_new_value = net->cwnd;
297 		sctp_clog.x.cwnd.inflight = net->flight_size;
298 		sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack;
299 		sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack;
300 		sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack;
301 	}
302 	if (SCTP_CWNDLOG_PRESEND == from) {
303 		sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd;
304 	}
305 	sctp_clog.x.cwnd.cwnd_augment = augment;
306 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
307 	    SCTP_LOG_EVENT_CWND,
308 	    from,
309 	    sctp_clog.x.misc.log1,
310 	    sctp_clog.x.misc.log2,
311 	    sctp_clog.x.misc.log3,
312 	    sctp_clog.x.misc.log4);
313 
314 }
315 
316 void
317 sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from)
318 {
319 	struct sctp_cwnd_log sctp_clog;
320 
321 	memset(&sctp_clog, 0, sizeof(sctp_clog));
322 	if (inp) {
323 		sctp_clog.x.lock.sock = (void *)inp->sctp_socket;
324 
325 	} else {
326 		sctp_clog.x.lock.sock = (void *)NULL;
327 	}
328 	sctp_clog.x.lock.inp = (void *)inp;
329 	if (stcb) {
330 		sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx);
331 	} else {
332 		sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN;
333 	}
334 	if (inp) {
335 		sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx);
336 		sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx);
337 	} else {
338 		sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN;
339 		sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN;
340 	}
341 	sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx));
342 	if (inp->sctp_socket) {
343 		sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
344 		sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx));
345 		sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx));
346 	} else {
347 		sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN;
348 		sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN;
349 		sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN;
350 	}
351 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
352 	    SCTP_LOG_LOCK_EVENT,
353 	    from,
354 	    sctp_clog.x.misc.log1,
355 	    sctp_clog.x.misc.log2,
356 	    sctp_clog.x.misc.log3,
357 	    sctp_clog.x.misc.log4);
358 
359 }
360 
361 void
362 sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from)
363 {
364 	struct sctp_cwnd_log sctp_clog;
365 
366 	memset(&sctp_clog, 0, sizeof(sctp_clog));
367 	sctp_clog.x.cwnd.net = net;
368 	sctp_clog.x.cwnd.cwnd_new_value = error;
369 	sctp_clog.x.cwnd.inflight = net->flight_size;
370 	sctp_clog.x.cwnd.cwnd_augment = burst;
371 	if (stcb->asoc.send_queue_cnt > 255)
372 		sctp_clog.x.cwnd.cnt_in_send = 255;
373 	else
374 		sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt;
375 	if (stcb->asoc.stream_queue_cnt > 255)
376 		sctp_clog.x.cwnd.cnt_in_str = 255;
377 	else
378 		sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt;
379 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
380 	    SCTP_LOG_EVENT_MAXBURST,
381 	    from,
382 	    sctp_clog.x.misc.log1,
383 	    sctp_clog.x.misc.log2,
384 	    sctp_clog.x.misc.log3,
385 	    sctp_clog.x.misc.log4);
386 
387 }
388 
389 void
390 sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead)
391 {
392 	struct sctp_cwnd_log sctp_clog;
393 
394 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
395 	sctp_clog.x.rwnd.send_size = snd_size;
396 	sctp_clog.x.rwnd.overhead = overhead;
397 	sctp_clog.x.rwnd.new_rwnd = 0;
398 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
399 	    SCTP_LOG_EVENT_RWND,
400 	    from,
401 	    sctp_clog.x.misc.log1,
402 	    sctp_clog.x.misc.log2,
403 	    sctp_clog.x.misc.log3,
404 	    sctp_clog.x.misc.log4);
405 }
406 
407 void
408 sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval)
409 {
410 	struct sctp_cwnd_log sctp_clog;
411 
412 	sctp_clog.x.rwnd.rwnd = peers_rwnd;
413 	sctp_clog.x.rwnd.send_size = flight_size;
414 	sctp_clog.x.rwnd.overhead = overhead;
415 	sctp_clog.x.rwnd.new_rwnd = a_rwndval;
416 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
417 	    SCTP_LOG_EVENT_RWND,
418 	    from,
419 	    sctp_clog.x.misc.log1,
420 	    sctp_clog.x.misc.log2,
421 	    sctp_clog.x.misc.log3,
422 	    sctp_clog.x.misc.log4);
423 }
424 
425 void
426 sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt)
427 {
428 	struct sctp_cwnd_log sctp_clog;
429 
430 	sctp_clog.x.mbcnt.total_queue_size = total_oq;
431 	sctp_clog.x.mbcnt.size_change = book;
432 	sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q;
433 	sctp_clog.x.mbcnt.mbcnt_change = mbcnt;
434 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
435 	    SCTP_LOG_EVENT_MBCNT,
436 	    from,
437 	    sctp_clog.x.misc.log1,
438 	    sctp_clog.x.misc.log2,
439 	    sctp_clog.x.misc.log3,
440 	    sctp_clog.x.misc.log4);
441 
442 }
443 
444 void
445 sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d)
446 {
447 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
448 	    SCTP_LOG_MISC_EVENT,
449 	    from,
450 	    a, b, c, d);
451 }
452 
453 void
454 sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t cumtsn, uint32_t wake_cnt, int from)
455 {
456 	struct sctp_cwnd_log sctp_clog;
457 
458 	sctp_clog.x.wake.stcb = (void *)stcb;
459 	sctp_clog.x.wake.wake_cnt = wake_cnt;
460 	sctp_clog.x.wake.flight = stcb->asoc.total_flight_count;
461 	sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt;
462 	sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt;
463 
464 	if (stcb->asoc.stream_queue_cnt < 0xff)
465 		sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt;
466 	else
467 		sctp_clog.x.wake.stream_qcnt = 0xff;
468 
469 	if (stcb->asoc.chunks_on_out_queue < 0xff)
470 		sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue;
471 	else
472 		sctp_clog.x.wake.chunks_on_oque = 0xff;
473 
474 	sctp_clog.x.wake.sctpflags = 0;
475 	/* set in the defered mode stuff */
476 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE)
477 		sctp_clog.x.wake.sctpflags |= 1;
478 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT)
479 		sctp_clog.x.wake.sctpflags |= 2;
480 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT)
481 		sctp_clog.x.wake.sctpflags |= 4;
482 	/* what about the sb */
483 	if (stcb->sctp_socket) {
484 		struct socket *so = stcb->sctp_socket;
485 
486 		sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff));
487 	} else {
488 		sctp_clog.x.wake.sbflags = 0xff;
489 	}
490 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
491 	    SCTP_LOG_EVENT_WAKE,
492 	    from,
493 	    sctp_clog.x.misc.log1,
494 	    sctp_clog.x.misc.log2,
495 	    sctp_clog.x.misc.log3,
496 	    sctp_clog.x.misc.log4);
497 
498 }
499 
500 void
501 sctp_log_block(uint8_t from, struct socket *so, struct sctp_association *asoc, int sendlen)
502 {
503 	struct sctp_cwnd_log sctp_clog;
504 
505 	sctp_clog.x.blk.onsb = asoc->total_output_queue_size;
506 	sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt);
507 	sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd;
508 	sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt;
509 	sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue;
510 	sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024);
511 	sctp_clog.x.blk.sndlen = sendlen;
512 	SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x",
513 	    SCTP_LOG_EVENT_BLOCK,
514 	    from,
515 	    sctp_clog.x.misc.log1,
516 	    sctp_clog.x.misc.log2,
517 	    sctp_clog.x.misc.log3,
518 	    sctp_clog.x.misc.log4);
519 
520 }
521 
522 int
523 sctp_fill_stat_log(void *optval, size_t *optsize)
524 {
525 	/* May need to fix this if ktrdump does not work */
526 	return (0);
527 }
528 
529 #ifdef SCTP_AUDITING_ENABLED
530 uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2];
531 static int sctp_audit_indx = 0;
532 
533 static
534 void
535 sctp_print_audit_report(void)
536 {
537 	int i;
538 	int cnt;
539 
540 	cnt = 0;
541 	for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) {
542 		if ((sctp_audit_data[i][0] == 0xe0) &&
543 		    (sctp_audit_data[i][1] == 0x01)) {
544 			cnt = 0;
545 			SCTP_PRINTF("\n");
546 		} else if (sctp_audit_data[i][0] == 0xf0) {
547 			cnt = 0;
548 			SCTP_PRINTF("\n");
549 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
550 		    (sctp_audit_data[i][1] == 0x01)) {
551 			SCTP_PRINTF("\n");
552 			cnt = 0;
553 		}
554 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
555 		    (uint32_t) sctp_audit_data[i][1]);
556 		cnt++;
557 		if ((cnt % 14) == 0)
558 			SCTP_PRINTF("\n");
559 	}
560 	for (i = 0; i < sctp_audit_indx; i++) {
561 		if ((sctp_audit_data[i][0] == 0xe0) &&
562 		    (sctp_audit_data[i][1] == 0x01)) {
563 			cnt = 0;
564 			SCTP_PRINTF("\n");
565 		} else if (sctp_audit_data[i][0] == 0xf0) {
566 			cnt = 0;
567 			SCTP_PRINTF("\n");
568 		} else if ((sctp_audit_data[i][0] == 0xc0) &&
569 		    (sctp_audit_data[i][1] == 0x01)) {
570 			SCTP_PRINTF("\n");
571 			cnt = 0;
572 		}
573 		SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0],
574 		    (uint32_t) sctp_audit_data[i][1]);
575 		cnt++;
576 		if ((cnt % 14) == 0)
577 			SCTP_PRINTF("\n");
578 	}
579 	SCTP_PRINTF("\n");
580 }
581 
582 void
583 sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
584     struct sctp_nets *net)
585 {
586 	int resend_cnt, tot_out, rep, tot_book_cnt;
587 	struct sctp_nets *lnet;
588 	struct sctp_tmit_chunk *chk;
589 
590 	sctp_audit_data[sctp_audit_indx][0] = 0xAA;
591 	sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from;
592 	sctp_audit_indx++;
593 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
594 		sctp_audit_indx = 0;
595 	}
596 	if (inp == NULL) {
597 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
598 		sctp_audit_data[sctp_audit_indx][1] = 0x01;
599 		sctp_audit_indx++;
600 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
601 			sctp_audit_indx = 0;
602 		}
603 		return;
604 	}
605 	if (stcb == NULL) {
606 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
607 		sctp_audit_data[sctp_audit_indx][1] = 0x02;
608 		sctp_audit_indx++;
609 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
610 			sctp_audit_indx = 0;
611 		}
612 		return;
613 	}
614 	sctp_audit_data[sctp_audit_indx][0] = 0xA1;
615 	sctp_audit_data[sctp_audit_indx][1] =
616 	    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
617 	sctp_audit_indx++;
618 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
619 		sctp_audit_indx = 0;
620 	}
621 	rep = 0;
622 	tot_book_cnt = 0;
623 	resend_cnt = tot_out = 0;
624 	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
625 		if (chk->sent == SCTP_DATAGRAM_RESEND) {
626 			resend_cnt++;
627 		} else if (chk->sent < SCTP_DATAGRAM_RESEND) {
628 			tot_out += chk->book_size;
629 			tot_book_cnt++;
630 		}
631 	}
632 	if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) {
633 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
634 		sctp_audit_data[sctp_audit_indx][1] = 0xA1;
635 		sctp_audit_indx++;
636 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
637 			sctp_audit_indx = 0;
638 		}
639 		SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n",
640 		    resend_cnt, stcb->asoc.sent_queue_retran_cnt);
641 		rep = 1;
642 		stcb->asoc.sent_queue_retran_cnt = resend_cnt;
643 		sctp_audit_data[sctp_audit_indx][0] = 0xA2;
644 		sctp_audit_data[sctp_audit_indx][1] =
645 		    (0x000000ff & stcb->asoc.sent_queue_retran_cnt);
646 		sctp_audit_indx++;
647 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
648 			sctp_audit_indx = 0;
649 		}
650 	}
651 	if (tot_out != stcb->asoc.total_flight) {
652 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
653 		sctp_audit_data[sctp_audit_indx][1] = 0xA2;
654 		sctp_audit_indx++;
655 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
656 			sctp_audit_indx = 0;
657 		}
658 		rep = 1;
659 		SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out,
660 		    (int)stcb->asoc.total_flight);
661 		stcb->asoc.total_flight = tot_out;
662 	}
663 	if (tot_book_cnt != stcb->asoc.total_flight_count) {
664 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
665 		sctp_audit_data[sctp_audit_indx][1] = 0xA5;
666 		sctp_audit_indx++;
667 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
668 			sctp_audit_indx = 0;
669 		}
670 		rep = 1;
671 		SCTP_PRINTF("tot_flt_book:%d\n", tot_book);
672 
673 		stcb->asoc.total_flight_count = tot_book_cnt;
674 	}
675 	tot_out = 0;
676 	TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
677 		tot_out += lnet->flight_size;
678 	}
679 	if (tot_out != stcb->asoc.total_flight) {
680 		sctp_audit_data[sctp_audit_indx][0] = 0xAF;
681 		sctp_audit_data[sctp_audit_indx][1] = 0xA3;
682 		sctp_audit_indx++;
683 		if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
684 			sctp_audit_indx = 0;
685 		}
686 		rep = 1;
687 		SCTP_PRINTF("real flight:%d net total was %d\n",
688 		    stcb->asoc.total_flight, tot_out);
689 		/* now corrective action */
690 		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
691 
692 			tot_out = 0;
693 			TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
694 				if ((chk->whoTo == lnet) &&
695 				    (chk->sent < SCTP_DATAGRAM_RESEND)) {
696 					tot_out += chk->book_size;
697 				}
698 			}
699 			if (lnet->flight_size != tot_out) {
700 				SCTP_PRINTF("net:%x flight was %d corrected to %d\n",
701 				    (uint32_t) lnet, lnet->flight_size,
702 				    tot_out);
703 				lnet->flight_size = tot_out;
704 			}
705 		}
706 	}
707 	if (rep) {
708 		sctp_print_audit_report();
709 	}
710 }
711 
712 void
713 sctp_audit_log(uint8_t ev, uint8_t fd)
714 {
715 
716 	sctp_audit_data[sctp_audit_indx][0] = ev;
717 	sctp_audit_data[sctp_audit_indx][1] = fd;
718 	sctp_audit_indx++;
719 	if (sctp_audit_indx >= SCTP_AUDIT_SIZE) {
720 		sctp_audit_indx = 0;
721 	}
722 }
723 
724 #endif
725 
726 /*
727  * a list of sizes based on typical mtu's, used only if next hop size not
728  * returned.
729  */
730 static int sctp_mtu_sizes[] = {
731 	68,
732 	296,
733 	508,
734 	512,
735 	544,
736 	576,
737 	1006,
738 	1492,
739 	1500,
740 	1536,
741 	2002,
742 	2048,
743 	4352,
744 	4464,
745 	8166,
746 	17914,
747 	32000,
748 	65535
749 };
750 
751 void
752 sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb)
753 {
754 	struct sctp_association *asoc;
755 	struct sctp_nets *net;
756 
757 	asoc = &stcb->asoc;
758 
759 	(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
760 	(void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
761 	(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
762 	(void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
763 	(void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
764 	(void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
765 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
766 		(void)SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
767 		(void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
768 	}
769 }
770 
771 int
772 find_next_best_mtu(int totsz)
773 {
774 	int i, perfer;
775 
776 	/*
777 	 * if we are in here we must find the next best fit based on the
778 	 * size of the dg that failed to be sent.
779 	 */
780 	perfer = 0;
781 	for (i = 0; i < NUMBER_OF_MTU_SIZES; i++) {
782 		if (totsz < sctp_mtu_sizes[i]) {
783 			perfer = i - 1;
784 			if (perfer < 0)
785 				perfer = 0;
786 			break;
787 		}
788 	}
789 	return (sctp_mtu_sizes[perfer]);
790 }
791 
792 void
793 sctp_fill_random_store(struct sctp_pcb *m)
794 {
795 	/*
796 	 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and
797 	 * our counter. The result becomes our good random numbers and we
798 	 * then setup to give these out. Note that we do no locking to
799 	 * protect this. This is ok, since if competing folks call this we
800 	 * will get more gobbled gook in the random store which is what we
801 	 * want. There is a danger that two guys will use the same random
802 	 * numbers, but thats ok too since that is random as well :->
803 	 */
804 	m->store_at = 0;
805 	(void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers,
806 	    sizeof(m->random_numbers), (uint8_t *) & m->random_counter,
807 	    sizeof(m->random_counter), (uint8_t *) m->random_store);
808 	m->random_counter++;
809 }
810 
811 uint32_t
812 sctp_select_initial_TSN(struct sctp_pcb *inp)
813 {
814 	/*
815 	 * A true implementation should use random selection process to get
816 	 * the initial stream sequence number, using RFC1750 as a good
817 	 * guideline
818 	 */
819 	uint32_t x, *xp;
820 	uint8_t *p;
821 	int store_at, new_store;
822 
823 	if (inp->initial_sequence_debug != 0) {
824 		uint32_t ret;
825 
826 		ret = inp->initial_sequence_debug;
827 		inp->initial_sequence_debug++;
828 		return (ret);
829 	}
830 retry:
831 	store_at = inp->store_at;
832 	new_store = store_at + sizeof(uint32_t);
833 	if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) {
834 		new_store = 0;
835 	}
836 	if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) {
837 		goto retry;
838 	}
839 	if (new_store == 0) {
840 		/* Refill the random store */
841 		sctp_fill_random_store(inp);
842 	}
843 	p = &inp->random_store[store_at];
844 	xp = (uint32_t *) p;
845 	x = *xp;
846 	return (x);
847 }
848 
849 uint32_t
850 sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int save_in_twait)
851 {
852 	u_long x, not_done;
853 	struct timeval now;
854 
855 	(void)SCTP_GETTIME_TIMEVAL(&now);
856 	not_done = 1;
857 	while (not_done) {
858 		x = sctp_select_initial_TSN(&inp->sctp_ep);
859 		if (x == 0) {
860 			/* we never use 0 */
861 			continue;
862 		}
863 		if (sctp_is_vtag_good(inp, x, lport, rport, &now, save_in_twait)) {
864 			not_done = 0;
865 		}
866 	}
867 	return (x);
868 }
869 
870 int
871 sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
872     int for_a_init, uint32_t override_tag, uint32_t vrf_id)
873 {
874 	struct sctp_association *asoc;
875 
876 	/*
877 	 * Anything set to zero is taken care of by the allocation routine's
878 	 * bzero
879 	 */
880 
881 	/*
882 	 * Up front select what scoping to apply on addresses I tell my peer
883 	 * Not sure what to do with these right now, we will need to come up
884 	 * with a way to set them. We may need to pass them through from the
885 	 * caller in the sctp_aloc_assoc() function.
886 	 */
887 	int i;
888 
889 	asoc = &stcb->asoc;
890 	/* init all variables to a known value. */
891 	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE);
892 	asoc->max_burst = m->sctp_ep.max_burst;
893 	asoc->heart_beat_delay = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
894 	asoc->cookie_life = m->sctp_ep.def_cookie_life;
895 	asoc->sctp_cmt_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_on_off);
896 	/* EY Init nr_sack variable */
897 	asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off);
898 	/* JRS 5/21/07 - Init CMT PF variables */
899 	asoc->sctp_cmt_pf = (uint8_t) SCTP_BASE_SYSCTL(sctp_cmt_pf);
900 	asoc->sctp_frag_point = m->sctp_frag_point;
901 #ifdef INET
902 	asoc->default_tos = m->ip_inp.inp.inp_ip_tos;
903 #else
904 	asoc->default_tos = 0;
905 #endif
906 
907 #ifdef INET6
908 	asoc->default_flowlabel = ((struct in6pcb *)m)->in6p_flowinfo;
909 #else
910 	asoc->default_flowlabel = 0;
911 #endif
912 	asoc->sb_send_resv = 0;
913 	if (override_tag) {
914 		asoc->my_vtag = override_tag;
915 	} else {
916 		asoc->my_vtag = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 1);
917 	}
918 	/* Get the nonce tags */
919 	asoc->my_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
920 	asoc->peer_vtag_nonce = sctp_select_a_tag(m, stcb->sctp_ep->sctp_lport, stcb->rport, 0);
921 	asoc->vrf_id = vrf_id;
922 
923 	if (sctp_is_feature_on(m, SCTP_PCB_FLAGS_DONOT_HEARTBEAT))
924 		asoc->hb_is_disabled = 1;
925 	else
926 		asoc->hb_is_disabled = 0;
927 
928 #ifdef SCTP_ASOCLOG_OF_TSNS
929 	asoc->tsn_in_at = 0;
930 	asoc->tsn_out_at = 0;
931 	asoc->tsn_in_wrapped = 0;
932 	asoc->tsn_out_wrapped = 0;
933 	asoc->cumack_log_at = 0;
934 	asoc->cumack_log_atsnt = 0;
935 #endif
936 #ifdef SCTP_FS_SPEC_LOG
937 	asoc->fs_index = 0;
938 #endif
939 	asoc->refcnt = 0;
940 	asoc->assoc_up_sent = 0;
941 	asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq =
942 	    sctp_select_initial_TSN(&m->sctp_ep);
943 	asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1;
944 	/* we are optimisitic here */
945 	asoc->peer_supports_pktdrop = 1;
946 	asoc->peer_supports_nat = 0;
947 	asoc->sent_queue_retran_cnt = 0;
948 
949 	/* for CMT */
950 	asoc->last_net_cmt_send_started = NULL;
951 
952 	/* This will need to be adjusted */
953 	asoc->last_cwr_tsn = asoc->init_seq_number - 1;
954 	asoc->last_acked_seq = asoc->init_seq_number - 1;
955 	asoc->advanced_peer_ack_point = asoc->last_acked_seq;
956 	asoc->asconf_seq_in = asoc->last_acked_seq;
957 
958 	/* here we are different, we hold the next one we expect */
959 	asoc->str_reset_seq_in = asoc->last_acked_seq + 1;
960 
961 	asoc->initial_init_rto_max = m->sctp_ep.initial_init_rto_max;
962 	asoc->initial_rto = m->sctp_ep.initial_rto;
963 
964 	asoc->max_init_times = m->sctp_ep.max_init_times;
965 	asoc->max_send_times = m->sctp_ep.max_send_times;
966 	asoc->def_net_failure = m->sctp_ep.def_net_failure;
967 	asoc->free_chunk_cnt = 0;
968 
969 	asoc->iam_blocking = 0;
970 	/* ECN Nonce initialization */
971 	asoc->context = m->sctp_context;
972 	asoc->def_send = m->def_send;
973 	asoc->ecn_nonce_allowed = 0;
974 	asoc->receiver_nonce_sum = 1;
975 	asoc->nonce_sum_expect_base = 1;
976 	asoc->nonce_sum_check = 1;
977 	asoc->nonce_resync_tsn = 0;
978 	asoc->nonce_wait_for_ecne = 0;
979 	asoc->nonce_wait_tsn = 0;
980 	asoc->delayed_ack = TICKS_TO_MSEC(m->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
981 	asoc->sack_freq = m->sctp_ep.sctp_sack_freq;
982 	asoc->pr_sctp_cnt = 0;
983 	asoc->total_output_queue_size = 0;
984 
985 	if (m->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
986 		struct in6pcb *inp6;
987 
988 		/* Its a V6 socket */
989 		inp6 = (struct in6pcb *)m;
990 		asoc->ipv6_addr_legal = 1;
991 		/* Now look at the binding flag to see if V4 will be legal */
992 		if (SCTP_IPV6_V6ONLY(inp6) == 0) {
993 			asoc->ipv4_addr_legal = 1;
994 		} else {
995 			/* V4 addresses are NOT legal on the association */
996 			asoc->ipv4_addr_legal = 0;
997 		}
998 	} else {
999 		/* Its a V4 socket, no - V6 */
1000 		asoc->ipv4_addr_legal = 1;
1001 		asoc->ipv6_addr_legal = 0;
1002 	}
1003 
1004 	asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(m->sctp_socket), SCTP_MINIMAL_RWND);
1005 	asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(m->sctp_socket);
1006 
1007 	asoc->smallest_mtu = m->sctp_frag_point;
1008 #ifdef SCTP_PRINT_FOR_B_AND_M
1009 	SCTP_PRINTF("smallest_mtu init'd with asoc to :%d\n",
1010 	    asoc->smallest_mtu);
1011 #endif
1012 	asoc->minrto = m->sctp_ep.sctp_minrto;
1013 	asoc->maxrto = m->sctp_ep.sctp_maxrto;
1014 
1015 	asoc->locked_on_sending = NULL;
1016 	asoc->stream_locked_on = 0;
1017 	asoc->ecn_echo_cnt_onq = 0;
1018 	asoc->stream_locked = 0;
1019 
1020 	asoc->send_sack = 1;
1021 
1022 	LIST_INIT(&asoc->sctp_restricted_addrs);
1023 
1024 	TAILQ_INIT(&asoc->nets);
1025 	TAILQ_INIT(&asoc->pending_reply_queue);
1026 	TAILQ_INIT(&asoc->asconf_ack_sent);
1027 	/* Setup to fill the hb random cache at first HB */
1028 	asoc->hb_random_idx = 4;
1029 
1030 	asoc->sctp_autoclose_ticks = m->sctp_ep.auto_close_time;
1031 
1032 	/*
1033 	 * JRS - Pick the default congestion control module based on the
1034 	 * sysctl.
1035 	 */
1036 	switch (m->sctp_ep.sctp_default_cc_module) {
1037 		/* JRS - Standard TCP congestion control */
1038 	case SCTP_CC_RFC2581:
1039 		{
1040 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1041 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1042 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1043 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1044 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1045 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1046 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1047 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1048 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1049 			break;
1050 		}
1051 		/* JRS - High Speed TCP congestion control (Floyd) */
1052 	case SCTP_CC_HSTCP:
1053 		{
1054 			stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
1055 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1056 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
1057 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
1058 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1059 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1060 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1061 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1062 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1063 			break;
1064 		}
1065 		/* JRS - HTCP congestion control */
1066 	case SCTP_CC_HTCP:
1067 		{
1068 			stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
1069 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
1070 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
1071 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
1072 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
1073 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
1074 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1075 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1076 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
1077 			break;
1078 		}
1079 		/* JRS - By default, use RFC2581 */
1080 	default:
1081 		{
1082 			stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
1083 			stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
1084 			stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
1085 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
1086 			stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
1087 			stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
1088 			stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
1089 			stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
1090 			stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
1091 			break;
1092 		}
1093 	}
1094 
1095 	/*
1096 	 * Now the stream parameters, here we allocate space for all streams
1097 	 * that we request by default.
1098 	 */
1099 	asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams =
1100 	    m->sctp_ep.pre_open_stream_count;
1101 	SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *,
1102 	    asoc->streamoutcnt * sizeof(struct sctp_stream_out),
1103 	    SCTP_M_STRMO);
1104 	if (asoc->strmout == NULL) {
1105 		/* big trouble no memory */
1106 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1107 		return (ENOMEM);
1108 	}
1109 	for (i = 0; i < asoc->streamoutcnt; i++) {
1110 		/*
1111 		 * inbound side must be set to 0xffff, also NOTE when we get
1112 		 * the INIT-ACK back (for INIT sender) we MUST reduce the
1113 		 * count (streamoutcnt) but first check if we sent to any of
1114 		 * the upper streams that were dropped (if some were). Those
1115 		 * that were dropped must be notified to the upper layer as
1116 		 * failed to send.
1117 		 */
1118 		asoc->strmout[i].next_sequence_sent = 0x0;
1119 		TAILQ_INIT(&asoc->strmout[i].outqueue);
1120 		asoc->strmout[i].stream_no = i;
1121 		asoc->strmout[i].last_msg_incomplete = 0;
1122 		asoc->strmout[i].next_spoke.tqe_next = 0;
1123 		asoc->strmout[i].next_spoke.tqe_prev = 0;
1124 	}
1125 	/* Now the mapping array */
1126 	asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY;
1127 	SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size,
1128 	    SCTP_M_MAP);
1129 	if (asoc->mapping_array == NULL) {
1130 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1131 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1132 		return (ENOMEM);
1133 	}
1134 	memset(asoc->mapping_array, 0, asoc->mapping_array_size);
1135 	/* EY  - initialize the nr_mapping_array just like mapping array */
1136 	asoc->nr_mapping_array_size = SCTP_INITIAL_NR_MAPPING_ARRAY;
1137 	SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->nr_mapping_array_size,
1138 	    SCTP_M_MAP);
1139 	if (asoc->nr_mapping_array == NULL) {
1140 		SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
1141 		SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1142 		SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
1143 		return (ENOMEM);
1144 	}
1145 	memset(asoc->nr_mapping_array, 0, asoc->nr_mapping_array_size);
1146 
1147 	/* Now the init of the other outqueues */
1148 	TAILQ_INIT(&asoc->free_chunks);
1149 	TAILQ_INIT(&asoc->out_wheel);
1150 	TAILQ_INIT(&asoc->control_send_queue);
1151 	TAILQ_INIT(&asoc->asconf_send_queue);
1152 	TAILQ_INIT(&asoc->send_queue);
1153 	TAILQ_INIT(&asoc->sent_queue);
1154 	TAILQ_INIT(&asoc->reasmqueue);
1155 	TAILQ_INIT(&asoc->resetHead);
1156 	asoc->max_inbound_streams = m->sctp_ep.max_open_streams_intome;
1157 	TAILQ_INIT(&asoc->asconf_queue);
1158 	/* authentication fields */
1159 	asoc->authinfo.random = NULL;
1160 	asoc->authinfo.active_keyid = 0;
1161 	asoc->authinfo.assoc_key = NULL;
1162 	asoc->authinfo.assoc_keyid = 0;
1163 	asoc->authinfo.recv_key = NULL;
1164 	asoc->authinfo.recv_keyid = 0;
1165 	LIST_INIT(&asoc->shared_keys);
1166 	asoc->marked_retrans = 0;
1167 	asoc->timoinit = 0;
1168 	asoc->timodata = 0;
1169 	asoc->timosack = 0;
1170 	asoc->timoshutdown = 0;
1171 	asoc->timoheartbeat = 0;
1172 	asoc->timocookie = 0;
1173 	asoc->timoshutdownack = 0;
1174 	(void)SCTP_GETTIME_TIMEVAL(&asoc->start_time);
1175 	asoc->discontinuity_time = asoc->start_time;
1176 	/*
1177 	 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and
1178 	 * freed later whe the association is freed.
1179 	 */
1180 	return (0);
1181 }
1182 
1183 int
1184 sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
1185 {
1186 	/* mapping array needs to grow */
1187 	uint8_t *new_array;
1188 	uint32_t new_size;
1189 
1190 	new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
1191 	SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1192 	if (new_array == NULL) {
1193 		/* can't get more, forget it */
1194 		SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1195 		    new_size);
1196 		return (-1);
1197 	}
1198 	memset(new_array, 0, new_size);
1199 	memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
1200 	SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
1201 	asoc->mapping_array = new_array;
1202 	asoc->mapping_array_size = new_size;
1203 	if (asoc->peer_supports_nr_sack) {
1204 		new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
1205 		SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
1206 		if (new_array == NULL) {
1207 			/* can't get more, forget it */
1208 			SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
1209 			    new_size);
1210 			return (-1);
1211 		}
1212 		memset(new_array, 0, new_size);
1213 		memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
1214 		SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
1215 		asoc->nr_mapping_array = new_array;
1216 		asoc->nr_mapping_array_size = new_size;
1217 	}
1218 	return (0);
1219 }
1220 
1221 
1222 #if defined(SCTP_USE_THREAD_BASED_ITERATOR)
1223 static void
1224 sctp_iterator_work(struct sctp_iterator *it)
1225 {
1226 	int iteration_count = 0;
1227 	int inp_skip = 0;
1228 
1229 	SCTP_ITERATOR_LOCK();
1230 	if (it->inp) {
1231 		SCTP_INP_DECR_REF(it->inp);
1232 	}
1233 	if (it->inp == NULL) {
1234 		/* iterator is complete */
1235 done_with_iterator:
1236 		SCTP_ITERATOR_UNLOCK();
1237 		if (it->function_atend != NULL) {
1238 			(*it->function_atend) (it->pointer, it->val);
1239 		}
1240 		SCTP_FREE(it, SCTP_M_ITER);
1241 		return;
1242 	}
1243 select_a_new_ep:
1244 	SCTP_INP_WLOCK(it->inp);
1245 	while (((it->pcb_flags) &&
1246 	    ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1247 	    ((it->pcb_features) &&
1248 	    ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1249 		/* endpoint flags or features don't match, so keep looking */
1250 		if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1251 			SCTP_INP_WUNLOCK(it->inp);
1252 			goto done_with_iterator;
1253 		}
1254 		SCTP_INP_WUNLOCK(it->inp);
1255 		it->inp = LIST_NEXT(it->inp, sctp_list);
1256 		if (it->inp == NULL) {
1257 			goto done_with_iterator;
1258 		}
1259 		SCTP_INP_WLOCK(it->inp);
1260 	}
1261 
1262 	SCTP_INP_WUNLOCK(it->inp);
1263 	SCTP_INP_RLOCK(it->inp);
1264 
1265 	/* now go through each assoc which is in the desired state */
1266 	if (it->done_current_ep == 0) {
1267 		if (it->function_inp != NULL)
1268 			inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1269 		it->done_current_ep = 1;
1270 	}
1271 	if (it->stcb == NULL) {
1272 		/* run the per instance function */
1273 		it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1274 	}
1275 	if ((inp_skip) || it->stcb == NULL) {
1276 		if (it->function_inp_end != NULL) {
1277 			inp_skip = (*it->function_inp_end) (it->inp,
1278 			    it->pointer,
1279 			    it->val);
1280 		}
1281 		SCTP_INP_RUNLOCK(it->inp);
1282 		goto no_stcb;
1283 	}
1284 	while (it->stcb) {
1285 		SCTP_TCB_LOCK(it->stcb);
1286 		if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1287 			/* not in the right state... keep looking */
1288 			SCTP_TCB_UNLOCK(it->stcb);
1289 			goto next_assoc;
1290 		}
1291 		/* see if we have limited out the iterator loop */
1292 		iteration_count++;
1293 		if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1294 			/* Pause to let others grab the lock */
1295 			atomic_add_int(&it->stcb->asoc.refcnt, 1);
1296 			SCTP_TCB_UNLOCK(it->stcb);
1297 
1298 			SCTP_INP_INCR_REF(it->inp);
1299 			SCTP_INP_RUNLOCK(it->inp);
1300 			SCTP_ITERATOR_UNLOCK();
1301 			SCTP_ITERATOR_LOCK();
1302 			SCTP_INP_RLOCK(it->inp);
1303 
1304 			SCTP_INP_DECR_REF(it->inp);
1305 			SCTP_TCB_LOCK(it->stcb);
1306 			atomic_add_int(&it->stcb->asoc.refcnt, -1);
1307 			iteration_count = 0;
1308 		}
1309 		/* run function on this one */
1310 		(*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1311 
1312 		/*
1313 		 * we lie here, it really needs to have its own type but
1314 		 * first I must verify that this won't effect things :-0
1315 		 */
1316 		if (it->no_chunk_output == 0)
1317 			sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1318 
1319 		SCTP_TCB_UNLOCK(it->stcb);
1320 next_assoc:
1321 		it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1322 		if (it->stcb == NULL) {
1323 			/* Run last function */
1324 			if (it->function_inp_end != NULL) {
1325 				inp_skip = (*it->function_inp_end) (it->inp,
1326 				    it->pointer,
1327 				    it->val);
1328 			}
1329 		}
1330 	}
1331 	SCTP_INP_RUNLOCK(it->inp);
1332 no_stcb:
1333 	/* done with all assocs on this endpoint, move on to next endpoint */
1334 	it->done_current_ep = 0;
1335 	SCTP_INP_WLOCK(it->inp);
1336 	SCTP_INP_WUNLOCK(it->inp);
1337 	if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1338 		it->inp = NULL;
1339 	} else {
1340 		SCTP_INP_INFO_RLOCK();
1341 		it->inp = LIST_NEXT(it->inp, sctp_list);
1342 		SCTP_INP_INFO_RUNLOCK();
1343 	}
1344 	if (it->inp == NULL) {
1345 		goto done_with_iterator;
1346 	}
1347 	goto select_a_new_ep;
1348 }
1349 
1350 void
1351 sctp_iterator_worker(void)
1352 {
1353 	struct sctp_iterator *it = NULL;
1354 
1355 	/* This function is called with the WQ lock in place */
1356 
1357 	SCTP_BASE_INFO(iterator_running) = 1;
1358 again:
1359 	it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1360 	while (it) {
1361 		/* now lets work on this one */
1362 		TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1363 		SCTP_IPI_ITERATOR_WQ_UNLOCK();
1364 		sctp_iterator_work(it);
1365 		SCTP_IPI_ITERATOR_WQ_LOCK();
1366 		/* sa_ignore FREED_MEMORY */
1367 		it = TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead));
1368 	}
1369 	if (TAILQ_FIRST(&SCTP_BASE_INFO(iteratorhead))) {
1370 		goto again;
1371 	}
1372 	SCTP_BASE_INFO(iterator_running) = 0;
1373 	return;
1374 }
1375 
1376 #endif
1377 
1378 
1379 static void
1380 sctp_handle_addr_wq(void)
1381 {
1382 	/* deal with the ADDR wq from the rtsock calls */
1383 	struct sctp_laddr *wi;
1384 	struct sctp_asconf_iterator *asc;
1385 
1386 	SCTP_MALLOC(asc, struct sctp_asconf_iterator *,
1387 	    sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT);
1388 	if (asc == NULL) {
1389 		/* Try later, no memory */
1390 		sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
1391 		    (struct sctp_inpcb *)NULL,
1392 		    (struct sctp_tcb *)NULL,
1393 		    (struct sctp_nets *)NULL);
1394 		return;
1395 	}
1396 	LIST_INIT(&asc->list_of_work);
1397 	asc->cnt = 0;
1398 	SCTP_IPI_ITERATOR_WQ_LOCK();
1399 	wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1400 	while (wi != NULL) {
1401 		LIST_REMOVE(wi, sctp_nxt_addr);
1402 		LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr);
1403 		asc->cnt++;
1404 		wi = LIST_FIRST(&SCTP_BASE_INFO(addr_wq));
1405 	}
1406 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
1407 	if (asc->cnt == 0) {
1408 		SCTP_FREE(asc, SCTP_M_ASC_IT);
1409 	} else {
1410 		(void)sctp_initiate_iterator(sctp_asconf_iterator_ep,
1411 		    sctp_asconf_iterator_stcb,
1412 		    NULL,	/* No ep end for boundall */
1413 		    SCTP_PCB_FLAGS_BOUNDALL,
1414 		    SCTP_PCB_ANY_FEATURES,
1415 		    SCTP_ASOC_ANY_STATE,
1416 		    (void *)asc, 0,
1417 		    sctp_asconf_iterator_end, NULL, 0);
1418 	}
1419 }
1420 
1421 int retcode = 0;
1422 int cur_oerr = 0;
1423 
1424 void
1425 sctp_timeout_handler(void *t)
1426 {
1427 	struct sctp_inpcb *inp;
1428 	struct sctp_tcb *stcb;
1429 	struct sctp_nets *net;
1430 	struct sctp_timer *tmr;
1431 
1432 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1433 	struct socket *so;
1434 
1435 #endif
1436 	int did_output, type;
1437 	struct sctp_iterator *it = NULL;
1438 
1439 	tmr = (struct sctp_timer *)t;
1440 	inp = (struct sctp_inpcb *)tmr->ep;
1441 	stcb = (struct sctp_tcb *)tmr->tcb;
1442 	net = (struct sctp_nets *)tmr->net;
1443 	CURVNET_SET((struct vnet *)tmr->vnet);
1444 	did_output = 1;
1445 
1446 #ifdef SCTP_AUDITING_ENABLED
1447 	sctp_audit_log(0xF0, (uint8_t) tmr->type);
1448 	sctp_auditing(3, inp, stcb, net);
1449 #endif
1450 
1451 	/* sanity checks... */
1452 	if (tmr->self != (void *)tmr) {
1453 		/*
1454 		 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n",
1455 		 * tmr);
1456 		 */
1457 		CURVNET_RESTORE();
1458 		return;
1459 	}
1460 	tmr->stopped_from = 0xa001;
1461 	if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) {
1462 		/*
1463 		 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n",
1464 		 * tmr->type);
1465 		 */
1466 		CURVNET_RESTORE();
1467 		return;
1468 	}
1469 	tmr->stopped_from = 0xa002;
1470 	if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) {
1471 		CURVNET_RESTORE();
1472 		return;
1473 	}
1474 	/* if this is an iterator timeout, get the struct and clear inp */
1475 	tmr->stopped_from = 0xa003;
1476 	if (tmr->type == SCTP_TIMER_TYPE_ITERATOR) {
1477 		it = (struct sctp_iterator *)inp;
1478 		inp = NULL;
1479 	}
1480 	type = tmr->type;
1481 	if (inp) {
1482 		SCTP_INP_INCR_REF(inp);
1483 		if ((inp->sctp_socket == 0) &&
1484 		    ((tmr->type != SCTP_TIMER_TYPE_INPKILL) &&
1485 		    (tmr->type != SCTP_TIMER_TYPE_INIT) &&
1486 		    (tmr->type != SCTP_TIMER_TYPE_SEND) &&
1487 		    (tmr->type != SCTP_TIMER_TYPE_RECV) &&
1488 		    (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) &&
1489 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) &&
1490 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) &&
1491 		    (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) &&
1492 		    (tmr->type != SCTP_TIMER_TYPE_ASOCKILL))
1493 		    ) {
1494 			SCTP_INP_DECR_REF(inp);
1495 			CURVNET_RESTORE();
1496 			return;
1497 		}
1498 	}
1499 	tmr->stopped_from = 0xa004;
1500 	if (stcb) {
1501 		atomic_add_int(&stcb->asoc.refcnt, 1);
1502 		if (stcb->asoc.state == 0) {
1503 			atomic_add_int(&stcb->asoc.refcnt, -1);
1504 			if (inp) {
1505 				SCTP_INP_DECR_REF(inp);
1506 			}
1507 			CURVNET_RESTORE();
1508 			return;
1509 		}
1510 	}
1511 	tmr->stopped_from = 0xa005;
1512 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type);
1513 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1514 		if (inp) {
1515 			SCTP_INP_DECR_REF(inp);
1516 		}
1517 		if (stcb) {
1518 			atomic_add_int(&stcb->asoc.refcnt, -1);
1519 		}
1520 		CURVNET_RESTORE();
1521 		return;
1522 	}
1523 	tmr->stopped_from = 0xa006;
1524 
1525 	if (stcb) {
1526 		SCTP_TCB_LOCK(stcb);
1527 		atomic_add_int(&stcb->asoc.refcnt, -1);
1528 		if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) &&
1529 		    ((stcb->asoc.state == 0) ||
1530 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) {
1531 			SCTP_TCB_UNLOCK(stcb);
1532 			if (inp) {
1533 				SCTP_INP_DECR_REF(inp);
1534 			}
1535 			CURVNET_RESTORE();
1536 			return;
1537 		}
1538 	}
1539 	/* record in stopped what t-o occured */
1540 	tmr->stopped_from = tmr->type;
1541 
1542 	/* mark as being serviced now */
1543 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
1544 		/*
1545 		 * Callout has been rescheduled.
1546 		 */
1547 		goto get_out;
1548 	}
1549 	if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) {
1550 		/*
1551 		 * Not active, so no action.
1552 		 */
1553 		goto get_out;
1554 	}
1555 	SCTP_OS_TIMER_DEACTIVATE(&tmr->timer);
1556 
1557 	/* call the handler for the appropriate timer type */
1558 	switch (tmr->type) {
1559 	case SCTP_TIMER_TYPE_ZERO_COPY:
1560 		if (inp == NULL) {
1561 			break;
1562 		}
1563 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1564 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
1565 		}
1566 		break;
1567 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1568 		if (inp == NULL) {
1569 			break;
1570 		}
1571 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
1572 			SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket);
1573 		}
1574 		break;
1575 	case SCTP_TIMER_TYPE_ADDR_WQ:
1576 		sctp_handle_addr_wq();
1577 		break;
1578 	case SCTP_TIMER_TYPE_ITERATOR:
1579 		SCTP_STAT_INCR(sctps_timoiterator);
1580 		sctp_iterator_timer(it);
1581 		break;
1582 	case SCTP_TIMER_TYPE_SEND:
1583 		if ((stcb == NULL) || (inp == NULL)) {
1584 			break;
1585 		}
1586 		SCTP_STAT_INCR(sctps_timodata);
1587 		stcb->asoc.timodata++;
1588 		stcb->asoc.num_send_timers_up--;
1589 		if (stcb->asoc.num_send_timers_up < 0) {
1590 			stcb->asoc.num_send_timers_up = 0;
1591 		}
1592 		SCTP_TCB_LOCK_ASSERT(stcb);
1593 		cur_oerr = stcb->asoc.overall_error_count;
1594 		retcode = sctp_t3rxt_timer(inp, stcb, net);
1595 		if (retcode) {
1596 			/* no need to unlock on tcb its gone */
1597 
1598 			goto out_decr;
1599 		}
1600 		SCTP_TCB_LOCK_ASSERT(stcb);
1601 #ifdef SCTP_AUDITING_ENABLED
1602 		sctp_auditing(4, inp, stcb, net);
1603 #endif
1604 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1605 		if ((stcb->asoc.num_send_timers_up == 0) &&
1606 		    (stcb->asoc.sent_queue_cnt > 0)
1607 		    ) {
1608 			struct sctp_tmit_chunk *chk;
1609 
1610 			/*
1611 			 * safeguard. If there on some on the sent queue
1612 			 * somewhere but no timers running something is
1613 			 * wrong... so we start a timer on the first chunk
1614 			 * on the send queue on whatever net it is sent to.
1615 			 */
1616 			chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
1617 			sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb,
1618 			    chk->whoTo);
1619 		}
1620 		break;
1621 	case SCTP_TIMER_TYPE_INIT:
1622 		if ((stcb == NULL) || (inp == NULL)) {
1623 			break;
1624 		}
1625 		SCTP_STAT_INCR(sctps_timoinit);
1626 		stcb->asoc.timoinit++;
1627 		if (sctp_t1init_timer(inp, stcb, net)) {
1628 			/* no need to unlock on tcb its gone */
1629 			goto out_decr;
1630 		}
1631 		/* We do output but not here */
1632 		did_output = 0;
1633 		break;
1634 	case SCTP_TIMER_TYPE_RECV:
1635 		if ((stcb == NULL) || (inp == NULL)) {
1636 			break;
1637 		} {
1638 			int abort_flag;
1639 
1640 			SCTP_STAT_INCR(sctps_timosack);
1641 			stcb->asoc.timosack++;
1642 			if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
1643 				sctp_sack_check(stcb, 0, 0, &abort_flag);
1644 
1645 			/*
1646 			 * EY if nr_sacks used then send an nr-sack , a sack
1647 			 * otherwise
1648 			 */
1649 			if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
1650 				sctp_send_nr_sack(stcb);
1651 			else
1652 				sctp_send_sack(stcb);
1653 		}
1654 #ifdef SCTP_AUDITING_ENABLED
1655 		sctp_auditing(4, inp, stcb, net);
1656 #endif
1657 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED);
1658 		break;
1659 	case SCTP_TIMER_TYPE_SHUTDOWN:
1660 		if ((stcb == NULL) || (inp == NULL)) {
1661 			break;
1662 		}
1663 		if (sctp_shutdown_timer(inp, stcb, net)) {
1664 			/* no need to unlock on tcb its gone */
1665 			goto out_decr;
1666 		}
1667 		SCTP_STAT_INCR(sctps_timoshutdown);
1668 		stcb->asoc.timoshutdown++;
1669 #ifdef SCTP_AUDITING_ENABLED
1670 		sctp_auditing(4, inp, stcb, net);
1671 #endif
1672 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED);
1673 		break;
1674 	case SCTP_TIMER_TYPE_HEARTBEAT:
1675 		{
1676 			struct sctp_nets *lnet;
1677 			int cnt_of_unconf = 0;
1678 
1679 			if ((stcb == NULL) || (inp == NULL)) {
1680 				break;
1681 			}
1682 			SCTP_STAT_INCR(sctps_timoheartbeat);
1683 			stcb->asoc.timoheartbeat++;
1684 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1685 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1686 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
1687 					cnt_of_unconf++;
1688 				}
1689 			}
1690 			if (cnt_of_unconf == 0) {
1691 				if (sctp_heartbeat_timer(inp, stcb, lnet,
1692 				    cnt_of_unconf)) {
1693 					/* no need to unlock on tcb its gone */
1694 					goto out_decr;
1695 				}
1696 			}
1697 #ifdef SCTP_AUDITING_ENABLED
1698 			sctp_auditing(4, inp, stcb, lnet);
1699 #endif
1700 			sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT,
1701 			    stcb->sctp_ep, stcb, lnet);
1702 			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED);
1703 		}
1704 		break;
1705 	case SCTP_TIMER_TYPE_COOKIE:
1706 		if ((stcb == NULL) || (inp == NULL)) {
1707 			break;
1708 		}
1709 		if (sctp_cookie_timer(inp, stcb, net)) {
1710 			/* no need to unlock on tcb its gone */
1711 			goto out_decr;
1712 		}
1713 		SCTP_STAT_INCR(sctps_timocookie);
1714 		stcb->asoc.timocookie++;
1715 #ifdef SCTP_AUDITING_ENABLED
1716 		sctp_auditing(4, inp, stcb, net);
1717 #endif
1718 		/*
1719 		 * We consider T3 and Cookie timer pretty much the same with
1720 		 * respect to where from in chunk_output.
1721 		 */
1722 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1723 		break;
1724 	case SCTP_TIMER_TYPE_NEWCOOKIE:
1725 		{
1726 			struct timeval tv;
1727 			int i, secret;
1728 
1729 			if (inp == NULL) {
1730 				break;
1731 			}
1732 			SCTP_STAT_INCR(sctps_timosecret);
1733 			(void)SCTP_GETTIME_TIMEVAL(&tv);
1734 			SCTP_INP_WLOCK(inp);
1735 			inp->sctp_ep.time_of_secret_change = tv.tv_sec;
1736 			inp->sctp_ep.last_secret_number =
1737 			    inp->sctp_ep.current_secret_number;
1738 			inp->sctp_ep.current_secret_number++;
1739 			if (inp->sctp_ep.current_secret_number >=
1740 			    SCTP_HOW_MANY_SECRETS) {
1741 				inp->sctp_ep.current_secret_number = 0;
1742 			}
1743 			secret = (int)inp->sctp_ep.current_secret_number;
1744 			for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1745 				inp->sctp_ep.secret_key[secret][i] =
1746 				    sctp_select_initial_TSN(&inp->sctp_ep);
1747 			}
1748 			SCTP_INP_WUNLOCK(inp);
1749 			sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net);
1750 		}
1751 		did_output = 0;
1752 		break;
1753 	case SCTP_TIMER_TYPE_PATHMTURAISE:
1754 		if ((stcb == NULL) || (inp == NULL)) {
1755 			break;
1756 		}
1757 		SCTP_STAT_INCR(sctps_timopathmtu);
1758 		sctp_pathmtu_timer(inp, stcb, net);
1759 		did_output = 0;
1760 		break;
1761 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
1762 		if ((stcb == NULL) || (inp == NULL)) {
1763 			break;
1764 		}
1765 		if (sctp_shutdownack_timer(inp, stcb, net)) {
1766 			/* no need to unlock on tcb its gone */
1767 			goto out_decr;
1768 		}
1769 		SCTP_STAT_INCR(sctps_timoshutdownack);
1770 		stcb->asoc.timoshutdownack++;
1771 #ifdef SCTP_AUDITING_ENABLED
1772 		sctp_auditing(4, inp, stcb, net);
1773 #endif
1774 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED);
1775 		break;
1776 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
1777 		if ((stcb == NULL) || (inp == NULL)) {
1778 			break;
1779 		}
1780 		SCTP_STAT_INCR(sctps_timoshutdownguard);
1781 		sctp_abort_an_association(inp, stcb,
1782 		    SCTP_SHUTDOWN_GUARD_EXPIRES, NULL, SCTP_SO_NOT_LOCKED);
1783 		/* no need to unlock on tcb its gone */
1784 		goto out_decr;
1785 
1786 	case SCTP_TIMER_TYPE_STRRESET:
1787 		if ((stcb == NULL) || (inp == NULL)) {
1788 			break;
1789 		}
1790 		if (sctp_strreset_timer(inp, stcb, net)) {
1791 			/* no need to unlock on tcb its gone */
1792 			goto out_decr;
1793 		}
1794 		SCTP_STAT_INCR(sctps_timostrmrst);
1795 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED);
1796 		break;
1797 	case SCTP_TIMER_TYPE_EARLYFR:
1798 		/* Need to do FR of things for net */
1799 		if ((stcb == NULL) || (inp == NULL)) {
1800 			break;
1801 		}
1802 		SCTP_STAT_INCR(sctps_timoearlyfr);
1803 		sctp_early_fr_timer(inp, stcb, net);
1804 		break;
1805 	case SCTP_TIMER_TYPE_ASCONF:
1806 		if ((stcb == NULL) || (inp == NULL)) {
1807 			break;
1808 		}
1809 		if (sctp_asconf_timer(inp, stcb, net)) {
1810 			/* no need to unlock on tcb its gone */
1811 			goto out_decr;
1812 		}
1813 		SCTP_STAT_INCR(sctps_timoasconf);
1814 #ifdef SCTP_AUDITING_ENABLED
1815 		sctp_auditing(4, inp, stcb, net);
1816 #endif
1817 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED);
1818 		break;
1819 	case SCTP_TIMER_TYPE_PRIM_DELETED:
1820 		if ((stcb == NULL) || (inp == NULL)) {
1821 			break;
1822 		}
1823 		sctp_delete_prim_timer(inp, stcb, net);
1824 		SCTP_STAT_INCR(sctps_timodelprim);
1825 		break;
1826 
1827 	case SCTP_TIMER_TYPE_AUTOCLOSE:
1828 		if ((stcb == NULL) || (inp == NULL)) {
1829 			break;
1830 		}
1831 		SCTP_STAT_INCR(sctps_timoautoclose);
1832 		sctp_autoclose_timer(inp, stcb, net);
1833 		sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1834 		did_output = 0;
1835 		break;
1836 	case SCTP_TIMER_TYPE_ASOCKILL:
1837 		if ((stcb == NULL) || (inp == NULL)) {
1838 			break;
1839 		}
1840 		SCTP_STAT_INCR(sctps_timoassockill);
1841 		/* Can we free it yet? */
1842 		SCTP_INP_DECR_REF(inp);
1843 		sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1);
1844 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1845 		so = SCTP_INP_SO(inp);
1846 		atomic_add_int(&stcb->asoc.refcnt, 1);
1847 		SCTP_TCB_UNLOCK(stcb);
1848 		SCTP_SOCKET_LOCK(so, 1);
1849 		SCTP_TCB_LOCK(stcb);
1850 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
1851 #endif
1852 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2);
1853 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1854 		SCTP_SOCKET_UNLOCK(so, 1);
1855 #endif
1856 		/*
1857 		 * free asoc, always unlocks (or destroy's) so prevent
1858 		 * duplicate unlock or unlock of a free mtx :-0
1859 		 */
1860 		stcb = NULL;
1861 		goto out_no_decr;
1862 	case SCTP_TIMER_TYPE_INPKILL:
1863 		SCTP_STAT_INCR(sctps_timoinpkill);
1864 		if (inp == NULL) {
1865 			break;
1866 		}
1867 		/*
1868 		 * special case, take away our increment since WE are the
1869 		 * killer
1870 		 */
1871 		SCTP_INP_DECR_REF(inp);
1872 		sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3);
1873 		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
1874 		    SCTP_CALLED_DIRECTLY_NOCMPSET);
1875 		inp = NULL;
1876 		goto out_no_decr;
1877 	default:
1878 		SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n",
1879 		    tmr->type);
1880 		break;
1881 	};
1882 #ifdef SCTP_AUDITING_ENABLED
1883 	sctp_audit_log(0xF1, (uint8_t) tmr->type);
1884 	if (inp)
1885 		sctp_auditing(5, inp, stcb, net);
1886 #endif
1887 	if ((did_output) && stcb) {
1888 		/*
1889 		 * Now we need to clean up the control chunk chain if an
1890 		 * ECNE is on it. It must be marked as UNSENT again so next
1891 		 * call will continue to send it until such time that we get
1892 		 * a CWR, to remove it. It is, however, less likely that we
1893 		 * will find a ecn echo on the chain though.
1894 		 */
1895 		sctp_fix_ecn_echo(&stcb->asoc);
1896 	}
1897 get_out:
1898 	if (stcb) {
1899 		SCTP_TCB_UNLOCK(stcb);
1900 	}
1901 out_decr:
1902 	if (inp) {
1903 		SCTP_INP_DECR_REF(inp);
1904 	}
1905 out_no_decr:
1906 	SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n",
1907 	    type);
1908 	CURVNET_RESTORE();
1909 }
1910 
1911 void
1912 sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1913     struct sctp_nets *net)
1914 {
1915 	int to_ticks;
1916 	struct sctp_timer *tmr;
1917 
1918 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL))
1919 		return;
1920 
1921 	to_ticks = 0;
1922 
1923 	tmr = NULL;
1924 	if (stcb) {
1925 		SCTP_TCB_LOCK_ASSERT(stcb);
1926 	}
1927 	switch (t_type) {
1928 	case SCTP_TIMER_TYPE_ZERO_COPY:
1929 		tmr = &inp->sctp_ep.zero_copy_timer;
1930 		to_ticks = SCTP_ZERO_COPY_TICK_DELAY;
1931 		break;
1932 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
1933 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
1934 		to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY;
1935 		break;
1936 	case SCTP_TIMER_TYPE_ADDR_WQ:
1937 		/* Only 1 tick away :-) */
1938 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
1939 		to_ticks = SCTP_ADDRESS_TICK_DELAY;
1940 		break;
1941 	case SCTP_TIMER_TYPE_ITERATOR:
1942 		{
1943 			struct sctp_iterator *it;
1944 
1945 			it = (struct sctp_iterator *)inp;
1946 			tmr = &it->tmr;
1947 			to_ticks = SCTP_ITERATOR_TICKS;
1948 		}
1949 		break;
1950 	case SCTP_TIMER_TYPE_SEND:
1951 		/* Here we use the RTO timer */
1952 		{
1953 			int rto_val;
1954 
1955 			if ((stcb == NULL) || (net == NULL)) {
1956 				return;
1957 			}
1958 			tmr = &net->rxt_timer;
1959 			if (net->RTO == 0) {
1960 				rto_val = stcb->asoc.initial_rto;
1961 			} else {
1962 				rto_val = net->RTO;
1963 			}
1964 			to_ticks = MSEC_TO_TICKS(rto_val);
1965 		}
1966 		break;
1967 	case SCTP_TIMER_TYPE_INIT:
1968 		/*
1969 		 * Here we use the INIT timer default usually about 1
1970 		 * minute.
1971 		 */
1972 		if ((stcb == NULL) || (net == NULL)) {
1973 			return;
1974 		}
1975 		tmr = &net->rxt_timer;
1976 		if (net->RTO == 0) {
1977 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
1978 		} else {
1979 			to_ticks = MSEC_TO_TICKS(net->RTO);
1980 		}
1981 		break;
1982 	case SCTP_TIMER_TYPE_RECV:
1983 		/*
1984 		 * Here we use the Delayed-Ack timer value from the inp
1985 		 * ususually about 200ms.
1986 		 */
1987 		if (stcb == NULL) {
1988 			return;
1989 		}
1990 		tmr = &stcb->asoc.dack_timer;
1991 		to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack);
1992 		break;
1993 	case SCTP_TIMER_TYPE_SHUTDOWN:
1994 		/* Here we use the RTO of the destination. */
1995 		if ((stcb == NULL) || (net == NULL)) {
1996 			return;
1997 		}
1998 		if (net->RTO == 0) {
1999 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2000 		} else {
2001 			to_ticks = MSEC_TO_TICKS(net->RTO);
2002 		}
2003 		tmr = &net->rxt_timer;
2004 		break;
2005 	case SCTP_TIMER_TYPE_HEARTBEAT:
2006 		/*
2007 		 * the net is used here so that we can add in the RTO. Even
2008 		 * though we use a different timer. We also add the HB timer
2009 		 * PLUS a random jitter.
2010 		 */
2011 		if ((inp == NULL) || (stcb == NULL)) {
2012 			return;
2013 		} else {
2014 			uint32_t rndval;
2015 			uint8_t this_random;
2016 			int cnt_of_unconf = 0;
2017 			struct sctp_nets *lnet;
2018 
2019 			TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2020 				if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2021 				    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2022 					cnt_of_unconf++;
2023 				}
2024 			}
2025 			if (cnt_of_unconf) {
2026 				net = lnet = NULL;
2027 				(void)sctp_heartbeat_timer(inp, stcb, lnet, cnt_of_unconf);
2028 			}
2029 			if (stcb->asoc.hb_random_idx > 3) {
2030 				rndval = sctp_select_initial_TSN(&inp->sctp_ep);
2031 				memcpy(stcb->asoc.hb_random_values, &rndval,
2032 				    sizeof(stcb->asoc.hb_random_values));
2033 				stcb->asoc.hb_random_idx = 0;
2034 			}
2035 			this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
2036 			stcb->asoc.hb_random_idx++;
2037 			stcb->asoc.hb_ect_randombit = 0;
2038 			/*
2039 			 * this_random will be 0 - 256 ms RTO is in ms.
2040 			 */
2041 			if ((stcb->asoc.hb_is_disabled) &&
2042 			    (cnt_of_unconf == 0)) {
2043 				return;
2044 			}
2045 			if (net) {
2046 				int delay;
2047 
2048 				delay = stcb->asoc.heart_beat_delay;
2049 				TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
2050 					if ((lnet->dest_state & SCTP_ADDR_UNCONFIRMED) &&
2051 					    ((lnet->dest_state & SCTP_ADDR_OUT_OF_SCOPE) == 0) &&
2052 					    (lnet->dest_state & SCTP_ADDR_REACHABLE)) {
2053 						delay = 0;
2054 					}
2055 				}
2056 				if (net->RTO == 0) {
2057 					/* Never been checked */
2058 					to_ticks = this_random + stcb->asoc.initial_rto + delay;
2059 				} else {
2060 					/* set rto_val to the ms */
2061 					to_ticks = delay + net->RTO + this_random;
2062 				}
2063 			} else {
2064 				if (cnt_of_unconf) {
2065 					to_ticks = this_random + stcb->asoc.initial_rto;
2066 				} else {
2067 					to_ticks = stcb->asoc.heart_beat_delay + this_random + stcb->asoc.initial_rto;
2068 				}
2069 			}
2070 			/*
2071 			 * Now we must convert the to_ticks that are now in
2072 			 * ms to ticks.
2073 			 */
2074 			to_ticks = MSEC_TO_TICKS(to_ticks);
2075 			tmr = &stcb->asoc.hb_timer;
2076 		}
2077 		break;
2078 	case SCTP_TIMER_TYPE_COOKIE:
2079 		/*
2080 		 * Here we can use the RTO timer from the network since one
2081 		 * RTT was compelete. If a retran happened then we will be
2082 		 * using the RTO initial value.
2083 		 */
2084 		if ((stcb == NULL) || (net == NULL)) {
2085 			return;
2086 		}
2087 		if (net->RTO == 0) {
2088 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2089 		} else {
2090 			to_ticks = MSEC_TO_TICKS(net->RTO);
2091 		}
2092 		tmr = &net->rxt_timer;
2093 		break;
2094 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2095 		/*
2096 		 * nothing needed but the endpoint here ususually about 60
2097 		 * minutes.
2098 		 */
2099 		if (inp == NULL) {
2100 			return;
2101 		}
2102 		tmr = &inp->sctp_ep.signature_change;
2103 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE];
2104 		break;
2105 	case SCTP_TIMER_TYPE_ASOCKILL:
2106 		if (stcb == NULL) {
2107 			return;
2108 		}
2109 		tmr = &stcb->asoc.strreset_timer;
2110 		to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT);
2111 		break;
2112 	case SCTP_TIMER_TYPE_INPKILL:
2113 		/*
2114 		 * The inp is setup to die. We re-use the signature_chage
2115 		 * timer since that has stopped and we are in the GONE
2116 		 * state.
2117 		 */
2118 		if (inp == NULL) {
2119 			return;
2120 		}
2121 		tmr = &inp->sctp_ep.signature_change;
2122 		to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT);
2123 		break;
2124 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2125 		/*
2126 		 * Here we use the value found in the EP for PMTU ususually
2127 		 * about 10 minutes.
2128 		 */
2129 		if ((stcb == NULL) || (inp == NULL)) {
2130 			return;
2131 		}
2132 		if (net == NULL) {
2133 			return;
2134 		}
2135 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU];
2136 		tmr = &net->pmtu_timer;
2137 		break;
2138 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2139 		/* Here we use the RTO of the destination */
2140 		if ((stcb == NULL) || (net == NULL)) {
2141 			return;
2142 		}
2143 		if (net->RTO == 0) {
2144 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2145 		} else {
2146 			to_ticks = MSEC_TO_TICKS(net->RTO);
2147 		}
2148 		tmr = &net->rxt_timer;
2149 		break;
2150 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2151 		/*
2152 		 * Here we use the endpoints shutdown guard timer usually
2153 		 * about 3 minutes.
2154 		 */
2155 		if ((inp == NULL) || (stcb == NULL)) {
2156 			return;
2157 		}
2158 		to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN];
2159 		tmr = &stcb->asoc.shut_guard_timer;
2160 		break;
2161 	case SCTP_TIMER_TYPE_STRRESET:
2162 		/*
2163 		 * Here the timer comes from the stcb but its value is from
2164 		 * the net's RTO.
2165 		 */
2166 		if ((stcb == NULL) || (net == NULL)) {
2167 			return;
2168 		}
2169 		if (net->RTO == 0) {
2170 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2171 		} else {
2172 			to_ticks = MSEC_TO_TICKS(net->RTO);
2173 		}
2174 		tmr = &stcb->asoc.strreset_timer;
2175 		break;
2176 
2177 	case SCTP_TIMER_TYPE_EARLYFR:
2178 		{
2179 			unsigned int msec;
2180 
2181 			if ((stcb == NULL) || (net == NULL)) {
2182 				return;
2183 			}
2184 			if (net->flight_size > net->cwnd) {
2185 				/* no need to start */
2186 				return;
2187 			}
2188 			SCTP_STAT_INCR(sctps_earlyfrstart);
2189 			if (net->lastsa == 0) {
2190 				/* Hmm no rtt estimate yet? */
2191 				msec = stcb->asoc.initial_rto >> 2;
2192 			} else {
2193 				msec = ((net->lastsa >> 2) + net->lastsv) >> 1;
2194 			}
2195 			if (msec < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
2196 				msec = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
2197 				if (msec < SCTP_MINFR_MSEC_FLOOR) {
2198 					msec = SCTP_MINFR_MSEC_FLOOR;
2199 				}
2200 			}
2201 			to_ticks = MSEC_TO_TICKS(msec);
2202 			tmr = &net->fr_timer;
2203 		}
2204 		break;
2205 	case SCTP_TIMER_TYPE_ASCONF:
2206 		/*
2207 		 * Here the timer comes from the stcb but its value is from
2208 		 * the net's RTO.
2209 		 */
2210 		if ((stcb == NULL) || (net == NULL)) {
2211 			return;
2212 		}
2213 		if (net->RTO == 0) {
2214 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2215 		} else {
2216 			to_ticks = MSEC_TO_TICKS(net->RTO);
2217 		}
2218 		tmr = &stcb->asoc.asconf_timer;
2219 		break;
2220 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2221 		if ((stcb == NULL) || (net != NULL)) {
2222 			return;
2223 		}
2224 		to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
2225 		tmr = &stcb->asoc.delete_prim_timer;
2226 		break;
2227 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2228 		if (stcb == NULL) {
2229 			return;
2230 		}
2231 		if (stcb->asoc.sctp_autoclose_ticks == 0) {
2232 			/*
2233 			 * Really an error since stcb is NOT set to
2234 			 * autoclose
2235 			 */
2236 			return;
2237 		}
2238 		to_ticks = stcb->asoc.sctp_autoclose_ticks;
2239 		tmr = &stcb->asoc.autoclose_timer;
2240 		break;
2241 	default:
2242 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2243 		    __FUNCTION__, t_type);
2244 		return;
2245 		break;
2246 	};
2247 	if ((to_ticks <= 0) || (tmr == NULL)) {
2248 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n",
2249 		    __FUNCTION__, t_type, to_ticks, tmr);
2250 		return;
2251 	}
2252 	if (SCTP_OS_TIMER_PENDING(&tmr->timer)) {
2253 		/*
2254 		 * we do NOT allow you to have it already running. if it is
2255 		 * we leave the current one up unchanged
2256 		 */
2257 		return;
2258 	}
2259 	/* At this point we can proceed */
2260 	if (t_type == SCTP_TIMER_TYPE_SEND) {
2261 		stcb->asoc.num_send_timers_up++;
2262 	}
2263 	tmr->stopped_from = 0;
2264 	tmr->type = t_type;
2265 	tmr->ep = (void *)inp;
2266 	tmr->tcb = (void *)stcb;
2267 	tmr->net = (void *)net;
2268 	tmr->self = (void *)tmr;
2269 	tmr->vnet = (void *)curvnet;
2270 	tmr->ticks = sctp_get_tick_count();
2271 	(void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr);
2272 	return;
2273 }
2274 
2275 void
2276 sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb,
2277     struct sctp_nets *net, uint32_t from)
2278 {
2279 	struct sctp_timer *tmr;
2280 
2281 	if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) &&
2282 	    (inp == NULL))
2283 		return;
2284 
2285 	tmr = NULL;
2286 	if (stcb) {
2287 		SCTP_TCB_LOCK_ASSERT(stcb);
2288 	}
2289 	switch (t_type) {
2290 	case SCTP_TIMER_TYPE_ZERO_COPY:
2291 		tmr = &inp->sctp_ep.zero_copy_timer;
2292 		break;
2293 	case SCTP_TIMER_TYPE_ZCOPY_SENDQ:
2294 		tmr = &inp->sctp_ep.zero_copy_sendq_timer;
2295 		break;
2296 	case SCTP_TIMER_TYPE_ADDR_WQ:
2297 		tmr = &SCTP_BASE_INFO(addr_wq_timer);
2298 		break;
2299 	case SCTP_TIMER_TYPE_EARLYFR:
2300 		if ((stcb == NULL) || (net == NULL)) {
2301 			return;
2302 		}
2303 		tmr = &net->fr_timer;
2304 		SCTP_STAT_INCR(sctps_earlyfrstop);
2305 		break;
2306 	case SCTP_TIMER_TYPE_ITERATOR:
2307 		{
2308 			struct sctp_iterator *it;
2309 
2310 			it = (struct sctp_iterator *)inp;
2311 			tmr = &it->tmr;
2312 		}
2313 		break;
2314 	case SCTP_TIMER_TYPE_SEND:
2315 		if ((stcb == NULL) || (net == NULL)) {
2316 			return;
2317 		}
2318 		tmr = &net->rxt_timer;
2319 		break;
2320 	case SCTP_TIMER_TYPE_INIT:
2321 		if ((stcb == NULL) || (net == NULL)) {
2322 			return;
2323 		}
2324 		tmr = &net->rxt_timer;
2325 		break;
2326 	case SCTP_TIMER_TYPE_RECV:
2327 		if (stcb == NULL) {
2328 			return;
2329 		}
2330 		tmr = &stcb->asoc.dack_timer;
2331 		break;
2332 	case SCTP_TIMER_TYPE_SHUTDOWN:
2333 		if ((stcb == NULL) || (net == NULL)) {
2334 			return;
2335 		}
2336 		tmr = &net->rxt_timer;
2337 		break;
2338 	case SCTP_TIMER_TYPE_HEARTBEAT:
2339 		if (stcb == NULL) {
2340 			return;
2341 		}
2342 		tmr = &stcb->asoc.hb_timer;
2343 		break;
2344 	case SCTP_TIMER_TYPE_COOKIE:
2345 		if ((stcb == NULL) || (net == NULL)) {
2346 			return;
2347 		}
2348 		tmr = &net->rxt_timer;
2349 		break;
2350 	case SCTP_TIMER_TYPE_NEWCOOKIE:
2351 		/* nothing needed but the endpoint here */
2352 		tmr = &inp->sctp_ep.signature_change;
2353 		/*
2354 		 * We re-use the newcookie timer for the INP kill timer. We
2355 		 * must assure that we do not kill it by accident.
2356 		 */
2357 		break;
2358 	case SCTP_TIMER_TYPE_ASOCKILL:
2359 		/*
2360 		 * Stop the asoc kill timer.
2361 		 */
2362 		if (stcb == NULL) {
2363 			return;
2364 		}
2365 		tmr = &stcb->asoc.strreset_timer;
2366 		break;
2367 
2368 	case SCTP_TIMER_TYPE_INPKILL:
2369 		/*
2370 		 * The inp is setup to die. We re-use the signature_chage
2371 		 * timer since that has stopped and we are in the GONE
2372 		 * state.
2373 		 */
2374 		tmr = &inp->sctp_ep.signature_change;
2375 		break;
2376 	case SCTP_TIMER_TYPE_PATHMTURAISE:
2377 		if ((stcb == NULL) || (net == NULL)) {
2378 			return;
2379 		}
2380 		tmr = &net->pmtu_timer;
2381 		break;
2382 	case SCTP_TIMER_TYPE_SHUTDOWNACK:
2383 		if ((stcb == NULL) || (net == NULL)) {
2384 			return;
2385 		}
2386 		tmr = &net->rxt_timer;
2387 		break;
2388 	case SCTP_TIMER_TYPE_SHUTDOWNGUARD:
2389 		if (stcb == NULL) {
2390 			return;
2391 		}
2392 		tmr = &stcb->asoc.shut_guard_timer;
2393 		break;
2394 	case SCTP_TIMER_TYPE_STRRESET:
2395 		if (stcb == NULL) {
2396 			return;
2397 		}
2398 		tmr = &stcb->asoc.strreset_timer;
2399 		break;
2400 	case SCTP_TIMER_TYPE_ASCONF:
2401 		if (stcb == NULL) {
2402 			return;
2403 		}
2404 		tmr = &stcb->asoc.asconf_timer;
2405 		break;
2406 	case SCTP_TIMER_TYPE_PRIM_DELETED:
2407 		if (stcb == NULL) {
2408 			return;
2409 		}
2410 		tmr = &stcb->asoc.delete_prim_timer;
2411 		break;
2412 	case SCTP_TIMER_TYPE_AUTOCLOSE:
2413 		if (stcb == NULL) {
2414 			return;
2415 		}
2416 		tmr = &stcb->asoc.autoclose_timer;
2417 		break;
2418 	default:
2419 		SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n",
2420 		    __FUNCTION__, t_type);
2421 		break;
2422 	};
2423 	if (tmr == NULL) {
2424 		return;
2425 	}
2426 	if ((tmr->type != t_type) && tmr->type) {
2427 		/*
2428 		 * Ok we have a timer that is under joint use. Cookie timer
2429 		 * per chance with the SEND timer. We therefore are NOT
2430 		 * running the timer that the caller wants stopped.  So just
2431 		 * return.
2432 		 */
2433 		return;
2434 	}
2435 	if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) {
2436 		stcb->asoc.num_send_timers_up--;
2437 		if (stcb->asoc.num_send_timers_up < 0) {
2438 			stcb->asoc.num_send_timers_up = 0;
2439 		}
2440 	}
2441 	tmr->self = NULL;
2442 	tmr->stopped_from = from;
2443 	(void)SCTP_OS_TIMER_STOP(&tmr->timer);
2444 	return;
2445 }
2446 
2447 uint32_t
2448 sctp_calculate_len(struct mbuf *m)
2449 {
2450 	uint32_t tlen = 0;
2451 	struct mbuf *at;
2452 
2453 	at = m;
2454 	while (at) {
2455 		tlen += SCTP_BUF_LEN(at);
2456 		at = SCTP_BUF_NEXT(at);
2457 	}
2458 	return (tlen);
2459 }
2460 
2461 void
2462 sctp_mtu_size_reset(struct sctp_inpcb *inp,
2463     struct sctp_association *asoc, uint32_t mtu)
2464 {
2465 	/*
2466 	 * Reset the P-MTU size on this association, this involves changing
2467 	 * the asoc MTU, going through ANY chunk+overhead larger than mtu to
2468 	 * allow the DF flag to be cleared.
2469 	 */
2470 	struct sctp_tmit_chunk *chk;
2471 	unsigned int eff_mtu, ovh;
2472 
2473 #ifdef SCTP_PRINT_FOR_B_AND_M
2474 	SCTP_PRINTF("sctp_mtu_size_reset(%p, asoc:%p mtu:%d\n",
2475 	    inp, asoc, mtu);
2476 #endif
2477 	asoc->smallest_mtu = mtu;
2478 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2479 		ovh = SCTP_MIN_OVERHEAD;
2480 	} else {
2481 		ovh = SCTP_MIN_V4_OVERHEAD;
2482 	}
2483 	eff_mtu = mtu - ovh;
2484 	TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
2485 
2486 		if (chk->send_size > eff_mtu) {
2487 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2488 		}
2489 	}
2490 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
2491 		if (chk->send_size > eff_mtu) {
2492 			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
2493 		}
2494 	}
2495 }
2496 
2497 
2498 /*
2499  * given an association and starting time of the current RTT period return
2500  * RTO in number of msecs net should point to the current network
2501  */
2502 uint32_t
2503 sctp_calculate_rto(struct sctp_tcb *stcb,
2504     struct sctp_association *asoc,
2505     struct sctp_nets *net,
2506     struct timeval *told,
2507     int safe)
2508 {
2509 	/*-
2510 	 * given an association and the starting time of the current RTT
2511 	 * period (in value1/value2) return RTO in number of msecs.
2512 	 */
2513 	int calc_time = 0;
2514 	int o_calctime;
2515 	uint32_t new_rto = 0;
2516 	int first_measure = 0;
2517 	struct timeval now, then, *old;
2518 
2519 	/* Copy it out for sparc64 */
2520 	if (safe == sctp_align_unsafe_makecopy) {
2521 		old = &then;
2522 		memcpy(&then, told, sizeof(struct timeval));
2523 	} else if (safe == sctp_align_safe_nocopy) {
2524 		old = told;
2525 	} else {
2526 		/* error */
2527 		SCTP_PRINTF("Huh, bad rto calc call\n");
2528 		return (0);
2529 	}
2530 	/************************/
2531 	/* 1. calculate new RTT */
2532 	/************************/
2533 	/* get the current time */
2534 	(void)SCTP_GETTIME_TIMEVAL(&now);
2535 	/* compute the RTT value */
2536 	if ((u_long)now.tv_sec > (u_long)old->tv_sec) {
2537 		calc_time = ((u_long)now.tv_sec - (u_long)old->tv_sec) * 1000;
2538 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2539 			calc_time += (((u_long)now.tv_usec -
2540 			    (u_long)old->tv_usec) / 1000);
2541 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2542 			/* Borrow 1,000ms from current calculation */
2543 			calc_time -= 1000;
2544 			/* Add in the slop over */
2545 			calc_time += ((int)now.tv_usec / 1000);
2546 			/* Add in the pre-second ms's */
2547 			calc_time += (((int)1000000 - (int)old->tv_usec) / 1000);
2548 		}
2549 	} else if ((u_long)now.tv_sec == (u_long)old->tv_sec) {
2550 		if ((u_long)now.tv_usec > (u_long)old->tv_usec) {
2551 			calc_time = ((u_long)now.tv_usec -
2552 			    (u_long)old->tv_usec) / 1000;
2553 		} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
2554 			/* impossible .. garbage in nothing out */
2555 			goto calc_rto;
2556 		} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
2557 			/*
2558 			 * We have to have 1 usec :-D this must be the
2559 			 * loopback.
2560 			 */
2561 			calc_time = 1;
2562 		} else {
2563 			/* impossible .. garbage in nothing out */
2564 			goto calc_rto;
2565 		}
2566 	} else {
2567 		/* Clock wrapped? */
2568 		goto calc_rto;
2569 	}
2570 	/***************************/
2571 	/* 2. update RTTVAR & SRTT */
2572 	/***************************/
2573 	net->rtt = o_calctime = calc_time;
2574 	/* this is Van Jacobson's integer version */
2575 	if (net->RTO_measured) {
2576 		calc_time -= (net->lastsa >> SCTP_RTT_SHIFT);	/* take away 1/8th when
2577 								 * shift=3 */
2578 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2579 			rto_logging(net, SCTP_LOG_RTTVAR);
2580 		}
2581 		net->prev_rtt = o_calctime;
2582 		net->lastsa += calc_time;	/* add 7/8th into sa when
2583 						 * shift=3 */
2584 		if (calc_time < 0) {
2585 			calc_time = -calc_time;
2586 		}
2587 		calc_time -= (net->lastsv >> SCTP_RTT_VAR_SHIFT);	/* take away 1/4 when
2588 									 * VAR shift=2 */
2589 		net->lastsv += calc_time;
2590 		if (net->lastsv == 0) {
2591 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2592 		}
2593 	} else {
2594 		/* First RTO measurment */
2595 		net->RTO_measured = 1;
2596 		net->lastsa = calc_time << SCTP_RTT_SHIFT;	/* Multiply by 8 when
2597 								 * shift=3 */
2598 		net->lastsv = calc_time;
2599 		if (net->lastsv == 0) {
2600 			net->lastsv = SCTP_CLOCK_GRANULARITY;
2601 		}
2602 		first_measure = 1;
2603 		net->prev_rtt = o_calctime;
2604 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) {
2605 			rto_logging(net, SCTP_LOG_INITIAL_RTT);
2606 		}
2607 	}
2608 calc_rto:
2609 	new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
2610 	if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
2611 	    (stcb->asoc.sat_network_lockout == 0)) {
2612 		stcb->asoc.sat_network = 1;
2613 	} else if ((!first_measure) && stcb->asoc.sat_network) {
2614 		stcb->asoc.sat_network = 0;
2615 		stcb->asoc.sat_network_lockout = 1;
2616 	}
2617 	/* bound it, per C6/C7 in Section 5.3.1 */
2618 	if (new_rto < stcb->asoc.minrto) {
2619 		new_rto = stcb->asoc.minrto;
2620 	}
2621 	if (new_rto > stcb->asoc.maxrto) {
2622 		new_rto = stcb->asoc.maxrto;
2623 	}
2624 	/* we are now returning the RTO */
2625 	return (new_rto);
2626 }
2627 
2628 /*
2629  * return a pointer to a contiguous piece of data from the given mbuf chain
2630  * starting at 'off' for 'len' bytes.  If the desired piece spans more than
2631  * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size
2632  * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain.
2633  */
2634 caddr_t
2635 sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr)
2636 {
2637 	uint32_t count;
2638 	uint8_t *ptr;
2639 
2640 	ptr = in_ptr;
2641 	if ((off < 0) || (len <= 0))
2642 		return (NULL);
2643 
2644 	/* find the desired start location */
2645 	while ((m != NULL) && (off > 0)) {
2646 		if (off < SCTP_BUF_LEN(m))
2647 			break;
2648 		off -= SCTP_BUF_LEN(m);
2649 		m = SCTP_BUF_NEXT(m);
2650 	}
2651 	if (m == NULL)
2652 		return (NULL);
2653 
2654 	/* is the current mbuf large enough (eg. contiguous)? */
2655 	if ((SCTP_BUF_LEN(m) - off) >= len) {
2656 		return (mtod(m, caddr_t)+off);
2657 	} else {
2658 		/* else, it spans more than one mbuf, so save a temp copy... */
2659 		while ((m != NULL) && (len > 0)) {
2660 			count = min(SCTP_BUF_LEN(m) - off, len);
2661 			bcopy(mtod(m, caddr_t)+off, ptr, count);
2662 			len -= count;
2663 			ptr += count;
2664 			off = 0;
2665 			m = SCTP_BUF_NEXT(m);
2666 		}
2667 		if ((m == NULL) && (len > 0))
2668 			return (NULL);
2669 		else
2670 			return ((caddr_t)in_ptr);
2671 	}
2672 }
2673 
2674 
2675 
2676 struct sctp_paramhdr *
2677 sctp_get_next_param(struct mbuf *m,
2678     int offset,
2679     struct sctp_paramhdr *pull,
2680     int pull_limit)
2681 {
2682 	/* This just provides a typed signature to Peter's Pull routine */
2683 	return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit,
2684 	    (uint8_t *) pull));
2685 }
2686 
2687 
2688 int
2689 sctp_add_pad_tombuf(struct mbuf *m, int padlen)
2690 {
2691 	/*
2692 	 * add padlen bytes of 0 filled padding to the end of the mbuf. If
2693 	 * padlen is > 3 this routine will fail.
2694 	 */
2695 	uint8_t *dp;
2696 	int i;
2697 
2698 	if (padlen > 3) {
2699 		SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
2700 		return (ENOBUFS);
2701 	}
2702 	if (padlen <= M_TRAILINGSPACE(m)) {
2703 		/*
2704 		 * The easy way. We hope the majority of the time we hit
2705 		 * here :)
2706 		 */
2707 		dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m));
2708 		SCTP_BUF_LEN(m) += padlen;
2709 	} else {
2710 		/* Hard way we must grow the mbuf */
2711 		struct mbuf *tmp;
2712 
2713 		tmp = sctp_get_mbuf_for_msg(padlen, 0, M_DONTWAIT, 1, MT_DATA);
2714 		if (tmp == NULL) {
2715 			/* Out of space GAK! we are in big trouble. */
2716 			SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
2717 			return (ENOSPC);
2718 		}
2719 		/* setup and insert in middle */
2720 		SCTP_BUF_LEN(tmp) = padlen;
2721 		SCTP_BUF_NEXT(tmp) = NULL;
2722 		SCTP_BUF_NEXT(m) = tmp;
2723 		dp = mtod(tmp, uint8_t *);
2724 	}
2725 	/* zero out the pad */
2726 	for (i = 0; i < padlen; i++) {
2727 		*dp = 0;
2728 		dp++;
2729 	}
2730 	return (0);
2731 }
2732 
2733 int
2734 sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf)
2735 {
2736 	/* find the last mbuf in chain and pad it */
2737 	struct mbuf *m_at;
2738 
2739 	m_at = m;
2740 	if (last_mbuf) {
2741 		return (sctp_add_pad_tombuf(last_mbuf, padval));
2742 	} else {
2743 		while (m_at) {
2744 			if (SCTP_BUF_NEXT(m_at) == NULL) {
2745 				return (sctp_add_pad_tombuf(m_at, padval));
2746 			}
2747 			m_at = SCTP_BUF_NEXT(m_at);
2748 		}
2749 	}
2750 	SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
2751 	return (EFAULT);
2752 }
2753 
2754 int sctp_asoc_change_wake = 0;
2755 
2756 static void
2757 sctp_notify_assoc_change(uint32_t event, struct sctp_tcb *stcb,
2758     uint32_t error, void *data, int so_locked
2759 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2760     SCTP_UNUSED
2761 #endif
2762 )
2763 {
2764 	struct mbuf *m_notify;
2765 	struct sctp_assoc_change *sac;
2766 	struct sctp_queued_to_read *control;
2767 
2768 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2769 	struct socket *so;
2770 
2771 #endif
2772 
2773 	/*
2774 	 * For TCP model AND UDP connected sockets we will send an error up
2775 	 * when an ABORT comes in.
2776 	 */
2777 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2778 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) &&
2779 	    ((event == SCTP_COMM_LOST) || (event == SCTP_CANT_STR_ASSOC))) {
2780 		if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) {
2781 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED);
2782 			stcb->sctp_socket->so_error = ECONNREFUSED;
2783 		} else {
2784 			SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
2785 			stcb->sctp_socket->so_error = ECONNRESET;
2786 		}
2787 		/* Wake ANY sleepers */
2788 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2789 		so = SCTP_INP_SO(stcb->sctp_ep);
2790 		if (!so_locked) {
2791 			atomic_add_int(&stcb->asoc.refcnt, 1);
2792 			SCTP_TCB_UNLOCK(stcb);
2793 			SCTP_SOCKET_LOCK(so, 1);
2794 			SCTP_TCB_LOCK(stcb);
2795 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2796 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2797 				SCTP_SOCKET_UNLOCK(so, 1);
2798 				return;
2799 			}
2800 		}
2801 #endif
2802 		sorwakeup(stcb->sctp_socket);
2803 		sowwakeup(stcb->sctp_socket);
2804 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2805 		if (!so_locked) {
2806 			SCTP_SOCKET_UNLOCK(so, 1);
2807 		}
2808 #endif
2809 		sctp_asoc_change_wake++;
2810 	}
2811 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVASSOCEVNT)) {
2812 		/* event not enabled */
2813 		return;
2814 	}
2815 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_change), 0, M_DONTWAIT, 1, MT_DATA);
2816 	if (m_notify == NULL)
2817 		/* no space left */
2818 		return;
2819 	SCTP_BUF_LEN(m_notify) = 0;
2820 
2821 	sac = mtod(m_notify, struct sctp_assoc_change *);
2822 	sac->sac_type = SCTP_ASSOC_CHANGE;
2823 	sac->sac_flags = 0;
2824 	sac->sac_length = sizeof(struct sctp_assoc_change);
2825 	sac->sac_state = event;
2826 	sac->sac_error = error;
2827 	/* XXX verify these stream counts */
2828 	sac->sac_outbound_streams = stcb->asoc.streamoutcnt;
2829 	sac->sac_inbound_streams = stcb->asoc.streamincnt;
2830 	sac->sac_assoc_id = sctp_get_associd(stcb);
2831 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_change);
2832 	SCTP_BUF_NEXT(m_notify) = NULL;
2833 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2834 	    0, 0, 0, 0, 0, 0,
2835 	    m_notify);
2836 	if (control == NULL) {
2837 		/* no memory */
2838 		sctp_m_freem(m_notify);
2839 		return;
2840 	}
2841 	control->length = SCTP_BUF_LEN(m_notify);
2842 	/* not that we need this */
2843 	control->tail_mbuf = m_notify;
2844 	control->spec_flags = M_NOTIFICATION;
2845 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2846 	    control,
2847 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD,
2848 	    so_locked);
2849 	if (event == SCTP_COMM_LOST) {
2850 		/* Wake up any sleeper */
2851 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2852 		so = SCTP_INP_SO(stcb->sctp_ep);
2853 		if (!so_locked) {
2854 			atomic_add_int(&stcb->asoc.refcnt, 1);
2855 			SCTP_TCB_UNLOCK(stcb);
2856 			SCTP_SOCKET_LOCK(so, 1);
2857 			SCTP_TCB_LOCK(stcb);
2858 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
2859 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
2860 				SCTP_SOCKET_UNLOCK(so, 1);
2861 				return;
2862 			}
2863 		}
2864 #endif
2865 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
2866 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
2867 		if (!so_locked) {
2868 			SCTP_SOCKET_UNLOCK(so, 1);
2869 		}
2870 #endif
2871 	}
2872 }
2873 
2874 static void
2875 sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state,
2876     struct sockaddr *sa, uint32_t error)
2877 {
2878 	struct mbuf *m_notify;
2879 	struct sctp_paddr_change *spc;
2880 	struct sctp_queued_to_read *control;
2881 
2882 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVPADDREVNT)) {
2883 		/* event not enabled */
2884 		return;
2885 	}
2886 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_DONTWAIT, 1, MT_DATA);
2887 	if (m_notify == NULL)
2888 		return;
2889 	SCTP_BUF_LEN(m_notify) = 0;
2890 	spc = mtod(m_notify, struct sctp_paddr_change *);
2891 	spc->spc_type = SCTP_PEER_ADDR_CHANGE;
2892 	spc->spc_flags = 0;
2893 	spc->spc_length = sizeof(struct sctp_paddr_change);
2894 	switch (sa->sa_family) {
2895 	case AF_INET:
2896 		memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in));
2897 		break;
2898 #ifdef INET6
2899 	case AF_INET6:
2900 		{
2901 			struct sockaddr_in6 *sin6;
2902 
2903 			memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6));
2904 
2905 			sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr;
2906 			if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) {
2907 				if (sin6->sin6_scope_id == 0) {
2908 					/* recover scope_id for user */
2909 					(void)sa6_recoverscope(sin6);
2910 				} else {
2911 					/* clear embedded scope_id for user */
2912 					in6_clearscope(&sin6->sin6_addr);
2913 				}
2914 			}
2915 			break;
2916 		}
2917 #endif
2918 	default:
2919 		/* TSNH */
2920 		break;
2921 	}
2922 	spc->spc_state = state;
2923 	spc->spc_error = error;
2924 	spc->spc_assoc_id = sctp_get_associd(stcb);
2925 
2926 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change);
2927 	SCTP_BUF_NEXT(m_notify) = NULL;
2928 
2929 	/* append to socket */
2930 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
2931 	    0, 0, 0, 0, 0, 0,
2932 	    m_notify);
2933 	if (control == NULL) {
2934 		/* no memory */
2935 		sctp_m_freem(m_notify);
2936 		return;
2937 	}
2938 	control->length = SCTP_BUF_LEN(m_notify);
2939 	control->spec_flags = M_NOTIFICATION;
2940 	/* not that we need this */
2941 	control->tail_mbuf = m_notify;
2942 	sctp_add_to_readq(stcb->sctp_ep, stcb,
2943 	    control,
2944 	    &stcb->sctp_socket->so_rcv, 1,
2945 	    SCTP_READ_LOCK_NOT_HELD,
2946 	    SCTP_SO_NOT_LOCKED);
2947 }
2948 
2949 
2950 static void
2951 sctp_notify_send_failed(struct sctp_tcb *stcb, uint32_t error,
2952     struct sctp_tmit_chunk *chk, int so_locked
2953 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
2954     SCTP_UNUSED
2955 #endif
2956 )
2957 {
2958 	struct mbuf *m_notify;
2959 	struct sctp_send_failed *ssf;
2960 	struct sctp_queued_to_read *control;
2961 	int length;
2962 
2963 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
2964 		/* event not enabled */
2965 		return;
2966 	}
2967 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
2968 	if (m_notify == NULL)
2969 		/* no space left */
2970 		return;
2971 	length = sizeof(struct sctp_send_failed) + chk->send_size;
2972 	length -= sizeof(struct sctp_data_chunk);
2973 	SCTP_BUF_LEN(m_notify) = 0;
2974 	ssf = mtod(m_notify, struct sctp_send_failed *);
2975 	ssf->ssf_type = SCTP_SEND_FAILED;
2976 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
2977 		ssf->ssf_flags = SCTP_DATA_UNSENT;
2978 	else
2979 		ssf->ssf_flags = SCTP_DATA_SENT;
2980 	ssf->ssf_length = length;
2981 	ssf->ssf_error = error;
2982 	/* not exactly what the user sent in, but should be close :) */
2983 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
2984 	ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number;
2985 	ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq;
2986 	ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags;
2987 	ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype;
2988 	ssf->ssf_info.sinfo_context = chk->rec.data.context;
2989 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
2990 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
2991 
2992 	if (chk->data) {
2993 		/*
2994 		 * trim off the sctp chunk header(it should be there)
2995 		 */
2996 		if (chk->send_size >= sizeof(struct sctp_data_chunk)) {
2997 			m_adj(chk->data, sizeof(struct sctp_data_chunk));
2998 			sctp_mbuf_crush(chk->data);
2999 			chk->send_size -= sizeof(struct sctp_data_chunk);
3000 		}
3001 	}
3002 	SCTP_BUF_NEXT(m_notify) = chk->data;
3003 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3004 	/* Steal off the mbuf */
3005 	chk->data = NULL;
3006 	/*
3007 	 * For this case, we check the actual socket buffer, since the assoc
3008 	 * is going away we don't want to overfill the socket buffer for a
3009 	 * non-reader
3010 	 */
3011 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3012 		sctp_m_freem(m_notify);
3013 		return;
3014 	}
3015 	/* append to socket */
3016 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3017 	    0, 0, 0, 0, 0, 0,
3018 	    m_notify);
3019 	if (control == NULL) {
3020 		/* no memory */
3021 		sctp_m_freem(m_notify);
3022 		return;
3023 	}
3024 	control->spec_flags = M_NOTIFICATION;
3025 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3026 	    control,
3027 	    &stcb->sctp_socket->so_rcv, 1,
3028 	    SCTP_READ_LOCK_NOT_HELD,
3029 	    so_locked);
3030 }
3031 
3032 
3033 static void
3034 sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error,
3035     struct sctp_stream_queue_pending *sp, int so_locked
3036 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3037     SCTP_UNUSED
3038 #endif
3039 )
3040 {
3041 	struct mbuf *m_notify;
3042 	struct sctp_send_failed *ssf;
3043 	struct sctp_queued_to_read *control;
3044 	int length;
3045 
3046 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSENDFAILEVNT)) {
3047 		/* event not enabled */
3048 		return;
3049 	}
3050 	length = sizeof(struct sctp_send_failed) + sp->length;
3051 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_send_failed), 0, M_DONTWAIT, 1, MT_DATA);
3052 	if (m_notify == NULL)
3053 		/* no space left */
3054 		return;
3055 	SCTP_BUF_LEN(m_notify) = 0;
3056 	ssf = mtod(m_notify, struct sctp_send_failed *);
3057 	ssf->ssf_type = SCTP_SEND_FAILED;
3058 	if (error == SCTP_NOTIFY_DATAGRAM_UNSENT)
3059 		ssf->ssf_flags = SCTP_DATA_UNSENT;
3060 	else
3061 		ssf->ssf_flags = SCTP_DATA_SENT;
3062 	ssf->ssf_length = length;
3063 	ssf->ssf_error = error;
3064 	/* not exactly what the user sent in, but should be close :) */
3065 	bzero(&ssf->ssf_info, sizeof(ssf->ssf_info));
3066 	ssf->ssf_info.sinfo_stream = sp->stream;
3067 	ssf->ssf_info.sinfo_ssn = sp->strseq;
3068 	if (sp->some_taken) {
3069 		ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG;
3070 	} else {
3071 		ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG;
3072 	}
3073 	ssf->ssf_info.sinfo_ppid = sp->ppid;
3074 	ssf->ssf_info.sinfo_context = sp->context;
3075 	ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb);
3076 	ssf->ssf_assoc_id = sctp_get_associd(stcb);
3077 	SCTP_BUF_NEXT(m_notify) = sp->data;
3078 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed);
3079 
3080 	/* Steal off the mbuf */
3081 	sp->data = NULL;
3082 	/*
3083 	 * For this case, we check the actual socket buffer, since the assoc
3084 	 * is going away we don't want to overfill the socket buffer for a
3085 	 * non-reader
3086 	 */
3087 	if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3088 		sctp_m_freem(m_notify);
3089 		return;
3090 	}
3091 	/* append to socket */
3092 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3093 	    0, 0, 0, 0, 0, 0,
3094 	    m_notify);
3095 	if (control == NULL) {
3096 		/* no memory */
3097 		sctp_m_freem(m_notify);
3098 		return;
3099 	}
3100 	control->spec_flags = M_NOTIFICATION;
3101 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3102 	    control,
3103 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3104 }
3105 
3106 
3107 
3108 static void
3109 sctp_notify_adaptation_layer(struct sctp_tcb *stcb,
3110     uint32_t error)
3111 {
3112 	struct mbuf *m_notify;
3113 	struct sctp_adaptation_event *sai;
3114 	struct sctp_queued_to_read *control;
3115 
3116 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) {
3117 		/* event not enabled */
3118 		return;
3119 	}
3120 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_DONTWAIT, 1, MT_DATA);
3121 	if (m_notify == NULL)
3122 		/* no space left */
3123 		return;
3124 	SCTP_BUF_LEN(m_notify) = 0;
3125 	sai = mtod(m_notify, struct sctp_adaptation_event *);
3126 	sai->sai_type = SCTP_ADAPTATION_INDICATION;
3127 	sai->sai_flags = 0;
3128 	sai->sai_length = sizeof(struct sctp_adaptation_event);
3129 	sai->sai_adaptation_ind = stcb->asoc.peers_adaptation;
3130 	sai->sai_assoc_id = sctp_get_associd(stcb);
3131 
3132 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event);
3133 	SCTP_BUF_NEXT(m_notify) = NULL;
3134 
3135 	/* append to socket */
3136 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3137 	    0, 0, 0, 0, 0, 0,
3138 	    m_notify);
3139 	if (control == NULL) {
3140 		/* no memory */
3141 		sctp_m_freem(m_notify);
3142 		return;
3143 	}
3144 	control->length = SCTP_BUF_LEN(m_notify);
3145 	control->spec_flags = M_NOTIFICATION;
3146 	/* not that we need this */
3147 	control->tail_mbuf = m_notify;
3148 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3149 	    control,
3150 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3151 }
3152 
3153 /* This always must be called with the read-queue LOCKED in the INP */
3154 static void
3155 sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error,
3156     uint32_t val, int so_locked
3157 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3158     SCTP_UNUSED
3159 #endif
3160 )
3161 {
3162 	struct mbuf *m_notify;
3163 	struct sctp_pdapi_event *pdapi;
3164 	struct sctp_queued_to_read *control;
3165 	struct sockbuf *sb;
3166 
3167 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3168 		/* event not enabled */
3169 		return;
3170 	}
3171 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_DONTWAIT, 1, MT_DATA);
3172 	if (m_notify == NULL)
3173 		/* no space left */
3174 		return;
3175 	SCTP_BUF_LEN(m_notify) = 0;
3176 	pdapi = mtod(m_notify, struct sctp_pdapi_event *);
3177 	pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT;
3178 	pdapi->pdapi_flags = 0;
3179 	pdapi->pdapi_length = sizeof(struct sctp_pdapi_event);
3180 	pdapi->pdapi_indication = error;
3181 	pdapi->pdapi_stream = (val >> 16);
3182 	pdapi->pdapi_seq = (val & 0x0000ffff);
3183 	pdapi->pdapi_assoc_id = sctp_get_associd(stcb);
3184 
3185 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event);
3186 	SCTP_BUF_NEXT(m_notify) = NULL;
3187 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3188 	    0, 0, 0, 0, 0, 0,
3189 	    m_notify);
3190 	if (control == NULL) {
3191 		/* no memory */
3192 		sctp_m_freem(m_notify);
3193 		return;
3194 	}
3195 	control->spec_flags = M_NOTIFICATION;
3196 	control->length = SCTP_BUF_LEN(m_notify);
3197 	/* not that we need this */
3198 	control->tail_mbuf = m_notify;
3199 	control->held_length = 0;
3200 	control->length = 0;
3201 	sb = &stcb->sctp_socket->so_rcv;
3202 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3203 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify));
3204 	}
3205 	sctp_sballoc(stcb, sb, m_notify);
3206 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
3207 		sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
3208 	}
3209 	atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify));
3210 	control->end_added = 1;
3211 	if (stcb->asoc.control_pdapi)
3212 		TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next);
3213 	else {
3214 		/* we really should not see this case */
3215 		TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next);
3216 	}
3217 	if (stcb->sctp_ep && stcb->sctp_socket) {
3218 		/* This should always be the case */
3219 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3220 		struct socket *so;
3221 
3222 		so = SCTP_INP_SO(stcb->sctp_ep);
3223 		if (!so_locked) {
3224 			atomic_add_int(&stcb->asoc.refcnt, 1);
3225 			SCTP_TCB_UNLOCK(stcb);
3226 			SCTP_SOCKET_LOCK(so, 1);
3227 			SCTP_TCB_LOCK(stcb);
3228 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
3229 			if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3230 				SCTP_SOCKET_UNLOCK(so, 1);
3231 				return;
3232 			}
3233 		}
3234 #endif
3235 		sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
3236 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3237 		if (!so_locked) {
3238 			SCTP_SOCKET_UNLOCK(so, 1);
3239 		}
3240 #endif
3241 	}
3242 }
3243 
3244 static void
3245 sctp_notify_shutdown_event(struct sctp_tcb *stcb)
3246 {
3247 	struct mbuf *m_notify;
3248 	struct sctp_shutdown_event *sse;
3249 	struct sctp_queued_to_read *control;
3250 
3251 	/*
3252 	 * For TCP model AND UDP connected sockets we will send an error up
3253 	 * when an SHUTDOWN completes
3254 	 */
3255 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3256 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3257 		/* mark socket closed for read/write and wakeup! */
3258 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3259 		struct socket *so;
3260 
3261 		so = SCTP_INP_SO(stcb->sctp_ep);
3262 		atomic_add_int(&stcb->asoc.refcnt, 1);
3263 		SCTP_TCB_UNLOCK(stcb);
3264 		SCTP_SOCKET_LOCK(so, 1);
3265 		SCTP_TCB_LOCK(stcb);
3266 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3267 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3268 			SCTP_SOCKET_UNLOCK(so, 1);
3269 			return;
3270 		}
3271 #endif
3272 		socantsendmore(stcb->sctp_socket);
3273 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3274 		SCTP_SOCKET_UNLOCK(so, 1);
3275 #endif
3276 	}
3277 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) {
3278 		/* event not enabled */
3279 		return;
3280 	}
3281 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_DONTWAIT, 1, MT_DATA);
3282 	if (m_notify == NULL)
3283 		/* no space left */
3284 		return;
3285 	sse = mtod(m_notify, struct sctp_shutdown_event *);
3286 	sse->sse_type = SCTP_SHUTDOWN_EVENT;
3287 	sse->sse_flags = 0;
3288 	sse->sse_length = sizeof(struct sctp_shutdown_event);
3289 	sse->sse_assoc_id = sctp_get_associd(stcb);
3290 
3291 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event);
3292 	SCTP_BUF_NEXT(m_notify) = NULL;
3293 
3294 	/* append to socket */
3295 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3296 	    0, 0, 0, 0, 0, 0,
3297 	    m_notify);
3298 	if (control == NULL) {
3299 		/* no memory */
3300 		sctp_m_freem(m_notify);
3301 		return;
3302 	}
3303 	control->spec_flags = M_NOTIFICATION;
3304 	control->length = SCTP_BUF_LEN(m_notify);
3305 	/* not that we need this */
3306 	control->tail_mbuf = m_notify;
3307 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3308 	    control,
3309 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3310 }
3311 
3312 static void
3313 sctp_notify_sender_dry_event(struct sctp_tcb *stcb,
3314     int so_locked
3315 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3316     SCTP_UNUSED
3317 #endif
3318 )
3319 {
3320 	struct mbuf *m_notify;
3321 	struct sctp_sender_dry_event *event;
3322 	struct sctp_queued_to_read *control;
3323 
3324 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_DRYEVNT)) {
3325 		/* event not enabled */
3326 		return;
3327 	}
3328 	m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_DONTWAIT, 1, MT_DATA);
3329 	if (m_notify == NULL) {
3330 		/* no space left */
3331 		return;
3332 	}
3333 	SCTP_BUF_LEN(m_notify) = 0;
3334 	event = mtod(m_notify, struct sctp_sender_dry_event *);
3335 	event->sender_dry_type = SCTP_SENDER_DRY_EVENT;
3336 	event->sender_dry_flags = 0;
3337 	event->sender_dry_length = sizeof(struct sctp_sender_dry_event);
3338 	event->sender_dry_assoc_id = sctp_get_associd(stcb);
3339 
3340 	SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event);
3341 	SCTP_BUF_NEXT(m_notify) = NULL;
3342 
3343 	/* append to socket */
3344 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3345 	    0, 0, 0, 0, 0, 0, m_notify);
3346 	if (control == NULL) {
3347 		/* no memory */
3348 		sctp_m_freem(m_notify);
3349 		return;
3350 	}
3351 	control->length = SCTP_BUF_LEN(m_notify);
3352 	control->spec_flags = M_NOTIFICATION;
3353 	/* not that we need this */
3354 	control->tail_mbuf = m_notify;
3355 	sctp_add_to_readq(stcb->sctp_ep, stcb, control,
3356 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked);
3357 }
3358 
3359 
3360 static void
3361 sctp_notify_stream_reset_add(struct sctp_tcb *stcb, int number_entries, int flag)
3362 {
3363 	struct mbuf *m_notify;
3364 	struct sctp_queued_to_read *control;
3365 	struct sctp_stream_reset_event *strreset;
3366 	int len;
3367 
3368 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3369 		/* event not enabled */
3370 		return;
3371 	}
3372 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3373 	if (m_notify == NULL)
3374 		/* no space left */
3375 		return;
3376 	SCTP_BUF_LEN(m_notify) = 0;
3377 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3378 	if (len > M_TRAILINGSPACE(m_notify)) {
3379 		/* never enough room */
3380 		sctp_m_freem(m_notify);
3381 		return;
3382 	}
3383 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3384 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3385 	strreset->strreset_flags = SCTP_STRRESET_ADD_STREAM | flag;
3386 	strreset->strreset_length = len;
3387 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3388 	strreset->strreset_list[0] = number_entries;
3389 
3390 	SCTP_BUF_LEN(m_notify) = len;
3391 	SCTP_BUF_NEXT(m_notify) = NULL;
3392 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3393 		/* no space */
3394 		sctp_m_freem(m_notify);
3395 		return;
3396 	}
3397 	/* append to socket */
3398 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3399 	    0, 0, 0, 0, 0, 0,
3400 	    m_notify);
3401 	if (control == NULL) {
3402 		/* no memory */
3403 		sctp_m_freem(m_notify);
3404 		return;
3405 	}
3406 	control->spec_flags = M_NOTIFICATION;
3407 	control->length = SCTP_BUF_LEN(m_notify);
3408 	/* not that we need this */
3409 	control->tail_mbuf = m_notify;
3410 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3411 	    control,
3412 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3413 }
3414 
3415 
3416 static void
3417 sctp_notify_stream_reset(struct sctp_tcb *stcb,
3418     int number_entries, uint16_t * list, int flag)
3419 {
3420 	struct mbuf *m_notify;
3421 	struct sctp_queued_to_read *control;
3422 	struct sctp_stream_reset_event *strreset;
3423 	int len;
3424 
3425 	if (sctp_is_feature_off(stcb->sctp_ep, SCTP_PCB_FLAGS_STREAM_RESETEVNT)) {
3426 		/* event not enabled */
3427 		return;
3428 	}
3429 	m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA);
3430 	if (m_notify == NULL)
3431 		/* no space left */
3432 		return;
3433 	SCTP_BUF_LEN(m_notify) = 0;
3434 	len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t));
3435 	if (len > M_TRAILINGSPACE(m_notify)) {
3436 		/* never enough room */
3437 		sctp_m_freem(m_notify);
3438 		return;
3439 	}
3440 	strreset = mtod(m_notify, struct sctp_stream_reset_event *);
3441 	strreset->strreset_type = SCTP_STREAM_RESET_EVENT;
3442 	if (number_entries == 0) {
3443 		strreset->strreset_flags = flag | SCTP_STRRESET_ALL_STREAMS;
3444 	} else {
3445 		strreset->strreset_flags = flag | SCTP_STRRESET_STREAM_LIST;
3446 	}
3447 	strreset->strreset_length = len;
3448 	strreset->strreset_assoc_id = sctp_get_associd(stcb);
3449 	if (number_entries) {
3450 		int i;
3451 
3452 		for (i = 0; i < number_entries; i++) {
3453 			strreset->strreset_list[i] = ntohs(list[i]);
3454 		}
3455 	}
3456 	SCTP_BUF_LEN(m_notify) = len;
3457 	SCTP_BUF_NEXT(m_notify) = NULL;
3458 	if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) {
3459 		/* no space */
3460 		sctp_m_freem(m_notify);
3461 		return;
3462 	}
3463 	/* append to socket */
3464 	control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination,
3465 	    0, 0, 0, 0, 0, 0,
3466 	    m_notify);
3467 	if (control == NULL) {
3468 		/* no memory */
3469 		sctp_m_freem(m_notify);
3470 		return;
3471 	}
3472 	control->spec_flags = M_NOTIFICATION;
3473 	control->length = SCTP_BUF_LEN(m_notify);
3474 	/* not that we need this */
3475 	control->tail_mbuf = m_notify;
3476 	sctp_add_to_readq(stcb->sctp_ep, stcb,
3477 	    control,
3478 	    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
3479 }
3480 
3481 
3482 void
3483 sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb,
3484     uint32_t error, void *data, int so_locked
3485 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3486     SCTP_UNUSED
3487 #endif
3488 )
3489 {
3490 	if ((stcb == NULL) ||
3491 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3492 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3493 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3494 		/* If the socket is gone we are out of here */
3495 		return;
3496 	}
3497 	if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) {
3498 		return;
3499 	}
3500 	if (stcb && ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) ||
3501 	    (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED))) {
3502 		if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) ||
3503 		    (notification == SCTP_NOTIFY_INTERFACE_UP) ||
3504 		    (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) {
3505 			/* Don't report these in front states */
3506 			return;
3507 		}
3508 	}
3509 	switch (notification) {
3510 	case SCTP_NOTIFY_ASSOC_UP:
3511 		if (stcb->asoc.assoc_up_sent == 0) {
3512 			sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, so_locked);
3513 			stcb->asoc.assoc_up_sent = 1;
3514 		}
3515 		if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) {
3516 			sctp_notify_adaptation_layer(stcb, error);
3517 		}
3518 		if (stcb->asoc.peer_supports_auth == 0) {
3519 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3520 			    NULL, so_locked);
3521 		}
3522 		break;
3523 	case SCTP_NOTIFY_ASSOC_DOWN:
3524 		sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, so_locked);
3525 		break;
3526 	case SCTP_NOTIFY_INTERFACE_DOWN:
3527 		{
3528 			struct sctp_nets *net;
3529 
3530 			net = (struct sctp_nets *)data;
3531 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE,
3532 			    (struct sockaddr *)&net->ro._l_addr, error);
3533 			break;
3534 		}
3535 	case SCTP_NOTIFY_INTERFACE_UP:
3536 		{
3537 			struct sctp_nets *net;
3538 
3539 			net = (struct sctp_nets *)data;
3540 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE,
3541 			    (struct sockaddr *)&net->ro._l_addr, error);
3542 			break;
3543 		}
3544 	case SCTP_NOTIFY_INTERFACE_CONFIRMED:
3545 		{
3546 			struct sctp_nets *net;
3547 
3548 			net = (struct sctp_nets *)data;
3549 			sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED,
3550 			    (struct sockaddr *)&net->ro._l_addr, error);
3551 			break;
3552 		}
3553 	case SCTP_NOTIFY_SPECIAL_SP_FAIL:
3554 		sctp_notify_send_failed2(stcb, error,
3555 		    (struct sctp_stream_queue_pending *)data, so_locked);
3556 		break;
3557 	case SCTP_NOTIFY_DG_FAIL:
3558 		sctp_notify_send_failed(stcb, error,
3559 		    (struct sctp_tmit_chunk *)data, so_locked);
3560 		break;
3561 	case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION:
3562 		{
3563 			uint32_t val;
3564 
3565 			val = *((uint32_t *) data);
3566 
3567 			sctp_notify_partial_delivery_indication(stcb, error, val, so_locked);
3568 			break;
3569 		}
3570 	case SCTP_NOTIFY_STRDATA_ERR:
3571 		break;
3572 	case SCTP_NOTIFY_ASSOC_ABORTED:
3573 		if ((stcb) && (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) ||
3574 		    ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED))) {
3575 			sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, NULL, so_locked);
3576 		} else {
3577 			sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, NULL, so_locked);
3578 		}
3579 		break;
3580 	case SCTP_NOTIFY_PEER_OPENED_STREAM:
3581 		break;
3582 	case SCTP_NOTIFY_STREAM_OPENED_OK:
3583 		break;
3584 	case SCTP_NOTIFY_ASSOC_RESTART:
3585 		sctp_notify_assoc_change(SCTP_RESTART, stcb, error, data, so_locked);
3586 		if (stcb->asoc.peer_supports_auth == 0) {
3587 			sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0,
3588 			    NULL, so_locked);
3589 		}
3590 		break;
3591 	case SCTP_NOTIFY_HB_RESP:
3592 		break;
3593 	case SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK:
3594 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_INBOUND_STR);
3595 		break;
3596 	case SCTP_NOTIFY_STR_RESET_ADD_OK:
3597 		sctp_notify_stream_reset_add(stcb, error, SCTP_STRRESET_OUTBOUND_STR);
3598 		break;
3599 	case SCTP_NOTIFY_STR_RESET_ADD_FAIL:
3600 		sctp_notify_stream_reset_add(stcb, error, (SCTP_STRRESET_FAILED | SCTP_STRRESET_OUTBOUND_STR));
3601 		break;
3602 
3603 	case SCTP_NOTIFY_STR_RESET_SEND:
3604 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_OUTBOUND_STR);
3605 		break;
3606 	case SCTP_NOTIFY_STR_RESET_RECV:
3607 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STRRESET_INBOUND_STR);
3608 		break;
3609 	case SCTP_NOTIFY_STR_RESET_FAILED_OUT:
3610 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_OUTBOUND_STR | SCTP_STRRESET_FAILED));
3611 		break;
3612 	case SCTP_NOTIFY_STR_RESET_FAILED_IN:
3613 		sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), (SCTP_STRRESET_INBOUND_STR | SCTP_STRRESET_FAILED));
3614 		break;
3615 	case SCTP_NOTIFY_ASCONF_ADD_IP:
3616 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data,
3617 		    error);
3618 		break;
3619 	case SCTP_NOTIFY_ASCONF_DELETE_IP:
3620 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data,
3621 		    error);
3622 		break;
3623 	case SCTP_NOTIFY_ASCONF_SET_PRIMARY:
3624 		sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data,
3625 		    error);
3626 		break;
3627 	case SCTP_NOTIFY_ASCONF_SUCCESS:
3628 		break;
3629 	case SCTP_NOTIFY_ASCONF_FAILED:
3630 		break;
3631 	case SCTP_NOTIFY_PEER_SHUTDOWN:
3632 		sctp_notify_shutdown_event(stcb);
3633 		break;
3634 	case SCTP_NOTIFY_AUTH_NEW_KEY:
3635 		sctp_notify_authentication(stcb, SCTP_AUTH_NEWKEY, error,
3636 		    (uint16_t) (uintptr_t) data,
3637 		    so_locked);
3638 		break;
3639 	case SCTP_NOTIFY_AUTH_FREE_KEY:
3640 		sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error,
3641 		    (uint16_t) (uintptr_t) data,
3642 		    so_locked);
3643 		break;
3644 	case SCTP_NOTIFY_NO_PEER_AUTH:
3645 		sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error,
3646 		    (uint16_t) (uintptr_t) data,
3647 		    so_locked);
3648 		break;
3649 	case SCTP_NOTIFY_SENDER_DRY:
3650 		sctp_notify_sender_dry_event(stcb, so_locked);
3651 		break;
3652 	default:
3653 		SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n",
3654 		    __FUNCTION__, notification, notification);
3655 		break;
3656 	}			/* end switch */
3657 }
3658 
3659 void
3660 sctp_report_all_outbound(struct sctp_tcb *stcb, int holds_lock, int so_locked
3661 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3662     SCTP_UNUSED
3663 #endif
3664 )
3665 {
3666 	struct sctp_association *asoc;
3667 	struct sctp_stream_out *outs;
3668 	struct sctp_tmit_chunk *chk;
3669 	struct sctp_stream_queue_pending *sp;
3670 	int i;
3671 
3672 	asoc = &stcb->asoc;
3673 
3674 	if (stcb == NULL) {
3675 		return;
3676 	}
3677 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3678 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3679 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3680 		return;
3681 	}
3682 	/* now through all the gunk freeing chunks */
3683 	if (holds_lock == 0) {
3684 		SCTP_TCB_SEND_LOCK(stcb);
3685 	}
3686 	/* sent queue SHOULD be empty */
3687 	if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3688 		chk = TAILQ_FIRST(&asoc->sent_queue);
3689 		while (chk) {
3690 			TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3691 			asoc->sent_queue_cnt--;
3692 			if (chk->data != NULL) {
3693 				sctp_free_bufspace(stcb, asoc, chk, 1);
3694 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3695 				    SCTP_NOTIFY_DATAGRAM_SENT, chk, so_locked);
3696 				if (chk->data) {
3697 					sctp_m_freem(chk->data);
3698 					chk->data = NULL;
3699 				}
3700 			}
3701 			sctp_free_a_chunk(stcb, chk);
3702 			/* sa_ignore FREED_MEMORY */
3703 			chk = TAILQ_FIRST(&asoc->sent_queue);
3704 		}
3705 	}
3706 	/* pending send queue SHOULD be empty */
3707 	if (!TAILQ_EMPTY(&asoc->send_queue)) {
3708 		chk = TAILQ_FIRST(&asoc->send_queue);
3709 		while (chk) {
3710 			TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3711 			asoc->send_queue_cnt--;
3712 			if (chk->data != NULL) {
3713 				sctp_free_bufspace(stcb, asoc, chk, 1);
3714 				sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb,
3715 				    SCTP_NOTIFY_DATAGRAM_UNSENT, chk, so_locked);
3716 				if (chk->data) {
3717 					sctp_m_freem(chk->data);
3718 					chk->data = NULL;
3719 				}
3720 			}
3721 			sctp_free_a_chunk(stcb, chk);
3722 			/* sa_ignore FREED_MEMORY */
3723 			chk = TAILQ_FIRST(&asoc->send_queue);
3724 		}
3725 	}
3726 	for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3727 		/* For each stream */
3728 		outs = &stcb->asoc.strmout[i];
3729 		/* clean up any sends there */
3730 		stcb->asoc.locked_on_sending = NULL;
3731 		sp = TAILQ_FIRST(&outs->outqueue);
3732 		while (sp) {
3733 			stcb->asoc.stream_queue_cnt--;
3734 			TAILQ_REMOVE(&outs->outqueue, sp, next);
3735 			sctp_free_spbufspace(stcb, asoc, sp);
3736 			sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb,
3737 			    SCTP_NOTIFY_DATAGRAM_UNSENT, (void *)sp, so_locked);
3738 			if (sp->data) {
3739 				sctp_m_freem(sp->data);
3740 				sp->data = NULL;
3741 			}
3742 			if (sp->net)
3743 				sctp_free_remote_addr(sp->net);
3744 			sp->net = NULL;
3745 			/* Free the chunk */
3746 			sctp_free_a_strmoq(stcb, sp);
3747 			/* sa_ignore FREED_MEMORY */
3748 			sp = TAILQ_FIRST(&outs->outqueue);
3749 		}
3750 	}
3751 
3752 	if (holds_lock == 0) {
3753 		SCTP_TCB_SEND_UNLOCK(stcb);
3754 	}
3755 }
3756 
3757 void
3758 sctp_abort_notification(struct sctp_tcb *stcb, int error, int so_locked
3759 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3760     SCTP_UNUSED
3761 #endif
3762 )
3763 {
3764 
3765 	if (stcb == NULL) {
3766 		return;
3767 	}
3768 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
3769 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3770 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
3771 		return;
3772 	}
3773 	/* Tell them we lost the asoc */
3774 	sctp_report_all_outbound(stcb, 1, so_locked);
3775 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) ||
3776 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3777 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) {
3778 		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED;
3779 	}
3780 	sctp_ulp_notify(SCTP_NOTIFY_ASSOC_ABORTED, stcb, error, NULL, so_locked);
3781 }
3782 
3783 void
3784 sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3785     struct mbuf *m, int iphlen, struct sctphdr *sh, struct mbuf *op_err,
3786     uint32_t vrf_id, uint16_t port)
3787 {
3788 	uint32_t vtag;
3789 
3790 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3791 	struct socket *so;
3792 
3793 #endif
3794 
3795 	vtag = 0;
3796 	if (stcb != NULL) {
3797 		/* We have a TCB to abort, send notification too */
3798 		vtag = stcb->asoc.peer_vtag;
3799 		sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED);
3800 		/* get the assoc vrf id and table id */
3801 		vrf_id = stcb->asoc.vrf_id;
3802 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3803 	}
3804 	sctp_send_abort(m, iphlen, sh, vtag, op_err, vrf_id, port);
3805 	if (stcb != NULL) {
3806 		/* Ok, now lets free it */
3807 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3808 		so = SCTP_INP_SO(inp);
3809 		atomic_add_int(&stcb->asoc.refcnt, 1);
3810 		SCTP_TCB_UNLOCK(stcb);
3811 		SCTP_SOCKET_LOCK(so, 1);
3812 		SCTP_TCB_LOCK(stcb);
3813 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3814 #endif
3815 		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4);
3816 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3817 		SCTP_SOCKET_UNLOCK(so, 1);
3818 #endif
3819 	} else {
3820 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3821 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3822 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3823 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3824 			}
3825 		}
3826 	}
3827 }
3828 
3829 #ifdef SCTP_ASOCLOG_OF_TSNS
3830 void
3831 sctp_print_out_track_log(struct sctp_tcb *stcb)
3832 {
3833 #ifdef NOSIY_PRINTS
3834 	int i;
3835 
3836 	SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
3837 	SCTP_PRINTF("IN bound TSN log-aaa\n");
3838 	if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) {
3839 		SCTP_PRINTF("None rcvd\n");
3840 		goto none_in;
3841 	}
3842 	if (stcb->asoc.tsn_in_wrapped) {
3843 		for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) {
3844 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3845 			    stcb->asoc.in_tsnlog[i].tsn,
3846 			    stcb->asoc.in_tsnlog[i].strm,
3847 			    stcb->asoc.in_tsnlog[i].seq,
3848 			    stcb->asoc.in_tsnlog[i].flgs,
3849 			    stcb->asoc.in_tsnlog[i].sz);
3850 		}
3851 	}
3852 	if (stcb->asoc.tsn_in_at) {
3853 		for (i = 0; i < stcb->asoc.tsn_in_at; i++) {
3854 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3855 			    stcb->asoc.in_tsnlog[i].tsn,
3856 			    stcb->asoc.in_tsnlog[i].strm,
3857 			    stcb->asoc.in_tsnlog[i].seq,
3858 			    stcb->asoc.in_tsnlog[i].flgs,
3859 			    stcb->asoc.in_tsnlog[i].sz);
3860 		}
3861 	}
3862 none_in:
3863 	SCTP_PRINTF("OUT bound TSN log-aaa\n");
3864 	if ((stcb->asoc.tsn_out_at == 0) &&
3865 	    (stcb->asoc.tsn_out_wrapped == 0)) {
3866 		SCTP_PRINTF("None sent\n");
3867 	}
3868 	if (stcb->asoc.tsn_out_wrapped) {
3869 		for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) {
3870 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3871 			    stcb->asoc.out_tsnlog[i].tsn,
3872 			    stcb->asoc.out_tsnlog[i].strm,
3873 			    stcb->asoc.out_tsnlog[i].seq,
3874 			    stcb->asoc.out_tsnlog[i].flgs,
3875 			    stcb->asoc.out_tsnlog[i].sz);
3876 		}
3877 	}
3878 	if (stcb->asoc.tsn_out_at) {
3879 		for (i = 0; i < stcb->asoc.tsn_out_at; i++) {
3880 			SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n",
3881 			    stcb->asoc.out_tsnlog[i].tsn,
3882 			    stcb->asoc.out_tsnlog[i].strm,
3883 			    stcb->asoc.out_tsnlog[i].seq,
3884 			    stcb->asoc.out_tsnlog[i].flgs,
3885 			    stcb->asoc.out_tsnlog[i].sz);
3886 		}
3887 	}
3888 #endif
3889 }
3890 
3891 #endif
3892 
3893 void
3894 sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
3895     int error, struct mbuf *op_err,
3896     int so_locked
3897 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
3898     SCTP_UNUSED
3899 #endif
3900 )
3901 {
3902 	uint32_t vtag;
3903 
3904 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3905 	struct socket *so;
3906 
3907 #endif
3908 
3909 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3910 	so = SCTP_INP_SO(inp);
3911 #endif
3912 	if (stcb == NULL) {
3913 		/* Got to have a TCB */
3914 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3915 			if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3916 				sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3917 				    SCTP_CALLED_DIRECTLY_NOCMPSET);
3918 			}
3919 		}
3920 		return;
3921 	} else {
3922 		stcb->asoc.state |= SCTP_STATE_WAS_ABORTED;
3923 	}
3924 	vtag = stcb->asoc.peer_vtag;
3925 	/* notify the ulp */
3926 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0)
3927 		sctp_abort_notification(stcb, error, so_locked);
3928 	/* notify the peer */
3929 #if defined(SCTP_PANIC_ON_ABORT)
3930 	panic("aborting an association");
3931 #endif
3932 	sctp_send_abort_tcb(stcb, op_err, so_locked);
3933 	SCTP_STAT_INCR_COUNTER32(sctps_aborted);
3934 	if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
3935 	    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3936 		SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3937 	}
3938 	/* now free the asoc */
3939 #ifdef SCTP_ASOCLOG_OF_TSNS
3940 	sctp_print_out_track_log(stcb);
3941 #endif
3942 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3943 	if (!so_locked) {
3944 		atomic_add_int(&stcb->asoc.refcnt, 1);
3945 		SCTP_TCB_UNLOCK(stcb);
3946 		SCTP_SOCKET_LOCK(so, 1);
3947 		SCTP_TCB_LOCK(stcb);
3948 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3949 	}
3950 #endif
3951 	(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5);
3952 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3953 	if (!so_locked) {
3954 		SCTP_SOCKET_UNLOCK(so, 1);
3955 	}
3956 #endif
3957 }
3958 
3959 void
3960 sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh,
3961     struct sctp_inpcb *inp, struct mbuf *op_err, uint32_t vrf_id, uint16_t port)
3962 {
3963 	struct sctp_chunkhdr *ch, chunk_buf;
3964 	unsigned int chk_length;
3965 
3966 	SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue);
3967 	/* Generate a TO address for future reference */
3968 	if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3969 		if (LIST_FIRST(&inp->sctp_asoc_list) == NULL) {
3970 			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
3971 			    SCTP_CALLED_DIRECTLY_NOCMPSET);
3972 		}
3973 	}
3974 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
3975 	    sizeof(*ch), (uint8_t *) & chunk_buf);
3976 	while (ch != NULL) {
3977 		chk_length = ntohs(ch->chunk_length);
3978 		if (chk_length < sizeof(*ch)) {
3979 			/* break to abort land */
3980 			break;
3981 		}
3982 		switch (ch->chunk_type) {
3983 		case SCTP_COOKIE_ECHO:
3984 			/* We hit here only if the assoc is being freed */
3985 			return;
3986 		case SCTP_PACKET_DROPPED:
3987 			/* we don't respond to pkt-dropped */
3988 			return;
3989 		case SCTP_ABORT_ASSOCIATION:
3990 			/* we don't respond with an ABORT to an ABORT */
3991 			return;
3992 		case SCTP_SHUTDOWN_COMPLETE:
3993 			/*
3994 			 * we ignore it since we are not waiting for it and
3995 			 * peer is gone
3996 			 */
3997 			return;
3998 		case SCTP_SHUTDOWN_ACK:
3999 			sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port);
4000 			return;
4001 		default:
4002 			break;
4003 		}
4004 		offset += SCTP_SIZE32(chk_length);
4005 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4006 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4007 	}
4008 	sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port);
4009 }
4010 
4011 /*
4012  * check the inbound datagram to make sure there is not an abort inside it,
4013  * if there is return 1, else return 0.
4014  */
4015 int
4016 sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill)
4017 {
4018 	struct sctp_chunkhdr *ch;
4019 	struct sctp_init_chunk *init_chk, chunk_buf;
4020 	int offset;
4021 	unsigned int chk_length;
4022 
4023 	offset = iphlen + sizeof(struct sctphdr);
4024 	ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch),
4025 	    (uint8_t *) & chunk_buf);
4026 	while (ch != NULL) {
4027 		chk_length = ntohs(ch->chunk_length);
4028 		if (chk_length < sizeof(*ch)) {
4029 			/* packet is probably corrupt */
4030 			break;
4031 		}
4032 		/* we seem to be ok, is it an abort? */
4033 		if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) {
4034 			/* yep, tell them */
4035 			return (1);
4036 		}
4037 		if (ch->chunk_type == SCTP_INITIATION) {
4038 			/* need to update the Vtag */
4039 			init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m,
4040 			    offset, sizeof(*init_chk), (uint8_t *) & chunk_buf);
4041 			if (init_chk != NULL) {
4042 				*vtagfill = ntohl(init_chk->init.initiate_tag);
4043 			}
4044 		}
4045 		/* Nope, move to the next chunk */
4046 		offset += SCTP_SIZE32(chk_length);
4047 		ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset,
4048 		    sizeof(*ch), (uint8_t *) & chunk_buf);
4049 	}
4050 	return (0);
4051 }
4052 
4053 /*
4054  * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id
4055  * set (i.e. it's 0) so, create this function to compare link local scopes
4056  */
4057 #ifdef INET6
4058 uint32_t
4059 sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2)
4060 {
4061 	struct sockaddr_in6 a, b;
4062 
4063 	/* save copies */
4064 	a = *addr1;
4065 	b = *addr2;
4066 
4067 	if (a.sin6_scope_id == 0)
4068 		if (sa6_recoverscope(&a)) {
4069 			/* can't get scope, so can't match */
4070 			return (0);
4071 		}
4072 	if (b.sin6_scope_id == 0)
4073 		if (sa6_recoverscope(&b)) {
4074 			/* can't get scope, so can't match */
4075 			return (0);
4076 		}
4077 	if (a.sin6_scope_id != b.sin6_scope_id)
4078 		return (0);
4079 
4080 	return (1);
4081 }
4082 
4083 /*
4084  * returns a sockaddr_in6 with embedded scope recovered and removed
4085  */
4086 struct sockaddr_in6 *
4087 sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store)
4088 {
4089 	/* check and strip embedded scope junk */
4090 	if (addr->sin6_family == AF_INET6) {
4091 		if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) {
4092 			if (addr->sin6_scope_id == 0) {
4093 				*store = *addr;
4094 				if (!sa6_recoverscope(store)) {
4095 					/* use the recovered scope */
4096 					addr = store;
4097 				}
4098 			} else {
4099 				/* else, return the original "to" addr */
4100 				in6_clearscope(&addr->sin6_addr);
4101 			}
4102 		}
4103 	}
4104 	return (addr);
4105 }
4106 
4107 #endif
4108 
4109 /*
4110  * are the two addresses the same?  currently a "scopeless" check returns: 1
4111  * if same, 0 if not
4112  */
4113 int
4114 sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2)
4115 {
4116 
4117 	/* must be valid */
4118 	if (sa1 == NULL || sa2 == NULL)
4119 		return (0);
4120 
4121 	/* must be the same family */
4122 	if (sa1->sa_family != sa2->sa_family)
4123 		return (0);
4124 
4125 	switch (sa1->sa_family) {
4126 #ifdef INET6
4127 	case AF_INET6:
4128 		{
4129 			/* IPv6 addresses */
4130 			struct sockaddr_in6 *sin6_1, *sin6_2;
4131 
4132 			sin6_1 = (struct sockaddr_in6 *)sa1;
4133 			sin6_2 = (struct sockaddr_in6 *)sa2;
4134 			return (SCTP6_ARE_ADDR_EQUAL(sin6_1,
4135 			    sin6_2));
4136 		}
4137 #endif
4138 	case AF_INET:
4139 		{
4140 			/* IPv4 addresses */
4141 			struct sockaddr_in *sin_1, *sin_2;
4142 
4143 			sin_1 = (struct sockaddr_in *)sa1;
4144 			sin_2 = (struct sockaddr_in *)sa2;
4145 			return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr);
4146 		}
4147 	default:
4148 		/* we don't do these... */
4149 		return (0);
4150 	}
4151 }
4152 
4153 void
4154 sctp_print_address(struct sockaddr *sa)
4155 {
4156 #ifdef INET6
4157 	char ip6buf[INET6_ADDRSTRLEN];
4158 
4159 	ip6buf[0] = 0;
4160 #endif
4161 
4162 	switch (sa->sa_family) {
4163 #ifdef INET6
4164 	case AF_INET6:
4165 		{
4166 			struct sockaddr_in6 *sin6;
4167 
4168 			sin6 = (struct sockaddr_in6 *)sa;
4169 			SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n",
4170 			    ip6_sprintf(ip6buf, &sin6->sin6_addr),
4171 			    ntohs(sin6->sin6_port),
4172 			    sin6->sin6_scope_id);
4173 			break;
4174 		}
4175 #endif
4176 	case AF_INET:
4177 		{
4178 			struct sockaddr_in *sin;
4179 			unsigned char *p;
4180 
4181 			sin = (struct sockaddr_in *)sa;
4182 			p = (unsigned char *)&sin->sin_addr;
4183 			SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n",
4184 			    p[0], p[1], p[2], p[3], ntohs(sin->sin_port));
4185 			break;
4186 		}
4187 	default:
4188 		SCTP_PRINTF("?\n");
4189 		break;
4190 	}
4191 }
4192 
4193 void
4194 sctp_print_address_pkt(struct ip *iph, struct sctphdr *sh)
4195 {
4196 	switch (iph->ip_v) {
4197 		case IPVERSION:
4198 		{
4199 			struct sockaddr_in lsa, fsa;
4200 
4201 			bzero(&lsa, sizeof(lsa));
4202 			lsa.sin_len = sizeof(lsa);
4203 			lsa.sin_family = AF_INET;
4204 			lsa.sin_addr = iph->ip_src;
4205 			lsa.sin_port = sh->src_port;
4206 			bzero(&fsa, sizeof(fsa));
4207 			fsa.sin_len = sizeof(fsa);
4208 			fsa.sin_family = AF_INET;
4209 			fsa.sin_addr = iph->ip_dst;
4210 			fsa.sin_port = sh->dest_port;
4211 			SCTP_PRINTF("src: ");
4212 			sctp_print_address((struct sockaddr *)&lsa);
4213 			SCTP_PRINTF("dest: ");
4214 			sctp_print_address((struct sockaddr *)&fsa);
4215 			break;
4216 		}
4217 #ifdef INET6
4218 	case IPV6_VERSION >> 4:
4219 		{
4220 			struct ip6_hdr *ip6;
4221 			struct sockaddr_in6 lsa6, fsa6;
4222 
4223 			ip6 = (struct ip6_hdr *)iph;
4224 			bzero(&lsa6, sizeof(lsa6));
4225 			lsa6.sin6_len = sizeof(lsa6);
4226 			lsa6.sin6_family = AF_INET6;
4227 			lsa6.sin6_addr = ip6->ip6_src;
4228 			lsa6.sin6_port = sh->src_port;
4229 			bzero(&fsa6, sizeof(fsa6));
4230 			fsa6.sin6_len = sizeof(fsa6);
4231 			fsa6.sin6_family = AF_INET6;
4232 			fsa6.sin6_addr = ip6->ip6_dst;
4233 			fsa6.sin6_port = sh->dest_port;
4234 			SCTP_PRINTF("src: ");
4235 			sctp_print_address((struct sockaddr *)&lsa6);
4236 			SCTP_PRINTF("dest: ");
4237 			sctp_print_address((struct sockaddr *)&fsa6);
4238 			break;
4239 		}
4240 #endif
4241 	default:
4242 		/* TSNH */
4243 		break;
4244 	}
4245 }
4246 
4247 void
4248 sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp,
4249     struct sctp_inpcb *new_inp,
4250     struct sctp_tcb *stcb,
4251     int waitflags)
4252 {
4253 	/*
4254 	 * go through our old INP and pull off any control structures that
4255 	 * belong to stcb and move then to the new inp.
4256 	 */
4257 	struct socket *old_so, *new_so;
4258 	struct sctp_queued_to_read *control, *nctl;
4259 	struct sctp_readhead tmp_queue;
4260 	struct mbuf *m;
4261 	int error = 0;
4262 
4263 	old_so = old_inp->sctp_socket;
4264 	new_so = new_inp->sctp_socket;
4265 	TAILQ_INIT(&tmp_queue);
4266 	error = sblock(&old_so->so_rcv, waitflags);
4267 	if (error) {
4268 		/*
4269 		 * Gak, can't get sblock, we have a problem. data will be
4270 		 * left stranded.. and we don't dare look at it since the
4271 		 * other thread may be reading something. Oh well, its a
4272 		 * screwed up app that does a peeloff OR a accept while
4273 		 * reading from the main socket... actually its only the
4274 		 * peeloff() case, since I think read will fail on a
4275 		 * listening socket..
4276 		 */
4277 		return;
4278 	}
4279 	/* lock the socket buffers */
4280 	SCTP_INP_READ_LOCK(old_inp);
4281 	control = TAILQ_FIRST(&old_inp->read_queue);
4282 	/* Pull off all for out target stcb */
4283 	while (control) {
4284 		nctl = TAILQ_NEXT(control, next);
4285 		if (control->stcb == stcb) {
4286 			/* remove it we want it */
4287 			TAILQ_REMOVE(&old_inp->read_queue, control, next);
4288 			TAILQ_INSERT_TAIL(&tmp_queue, control, next);
4289 			m = control->data;
4290 			while (m) {
4291 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4292 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
4293 				}
4294 				sctp_sbfree(control, stcb, &old_so->so_rcv, m);
4295 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4296 					sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4297 				}
4298 				m = SCTP_BUF_NEXT(m);
4299 			}
4300 		}
4301 		control = nctl;
4302 	}
4303 	SCTP_INP_READ_UNLOCK(old_inp);
4304 	/* Remove the sb-lock on the old socket */
4305 
4306 	sbunlock(&old_so->so_rcv);
4307 	/* Now we move them over to the new socket buffer */
4308 	control = TAILQ_FIRST(&tmp_queue);
4309 	SCTP_INP_READ_LOCK(new_inp);
4310 	while (control) {
4311 		nctl = TAILQ_NEXT(control, next);
4312 		TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next);
4313 		m = control->data;
4314 		while (m) {
4315 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4316 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4317 			}
4318 			sctp_sballoc(stcb, &new_so->so_rcv, m);
4319 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4320 				sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4321 			}
4322 			m = SCTP_BUF_NEXT(m);
4323 		}
4324 		control = nctl;
4325 	}
4326 	SCTP_INP_READ_UNLOCK(new_inp);
4327 }
4328 
4329 void
4330 sctp_add_to_readq(struct sctp_inpcb *inp,
4331     struct sctp_tcb *stcb,
4332     struct sctp_queued_to_read *control,
4333     struct sockbuf *sb,
4334     int end,
4335     int inp_read_lock_held,
4336     int so_locked
4337 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4338     SCTP_UNUSED
4339 #endif
4340 )
4341 {
4342 	/*
4343 	 * Here we must place the control on the end of the socket read
4344 	 * queue AND increment sb_cc so that select will work properly on
4345 	 * read.
4346 	 */
4347 	struct mbuf *m, *prev = NULL;
4348 
4349 	if (inp == NULL) {
4350 		/* Gak, TSNH!! */
4351 #ifdef INVARIANTS
4352 		panic("Gak, inp NULL on add_to_readq");
4353 #endif
4354 		return;
4355 	}
4356 	if (inp_read_lock_held == 0)
4357 		SCTP_INP_READ_LOCK(inp);
4358 	if (!(control->spec_flags & M_NOTIFICATION)) {
4359 		atomic_add_int(&inp->total_recvs, 1);
4360 		if (!control->do_not_ref_stcb) {
4361 			atomic_add_int(&stcb->total_recvs, 1);
4362 		}
4363 	}
4364 	m = control->data;
4365 	control->held_length = 0;
4366 	control->length = 0;
4367 	while (m) {
4368 		if (SCTP_BUF_LEN(m) == 0) {
4369 			/* Skip mbufs with NO length */
4370 			if (prev == NULL) {
4371 				/* First one */
4372 				control->data = sctp_m_free(m);
4373 				m = control->data;
4374 			} else {
4375 				SCTP_BUF_NEXT(prev) = sctp_m_free(m);
4376 				m = SCTP_BUF_NEXT(prev);
4377 			}
4378 			if (m == NULL) {
4379 				control->tail_mbuf = prev;
4380 			}
4381 			continue;
4382 		}
4383 		prev = m;
4384 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4385 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m));
4386 		}
4387 		sctp_sballoc(stcb, sb, m);
4388 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4389 			sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4390 		}
4391 		atomic_add_int(&control->length, SCTP_BUF_LEN(m));
4392 		m = SCTP_BUF_NEXT(m);
4393 	}
4394 	if (prev != NULL) {
4395 		control->tail_mbuf = prev;
4396 	} else {
4397 		/* Everything got collapsed out?? */
4398 		if (inp_read_lock_held == 0)
4399 			SCTP_INP_READ_UNLOCK(inp);
4400 		return;
4401 	}
4402 	if (end) {
4403 		control->end_added = 1;
4404 	}
4405 	TAILQ_INSERT_TAIL(&inp->read_queue, control, next);
4406 	if (inp_read_lock_held == 0)
4407 		SCTP_INP_READ_UNLOCK(inp);
4408 	if (inp && inp->sctp_socket) {
4409 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4410 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4411 		} else {
4412 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4413 			struct socket *so;
4414 
4415 			so = SCTP_INP_SO(inp);
4416 			if (!so_locked) {
4417 				atomic_add_int(&stcb->asoc.refcnt, 1);
4418 				SCTP_TCB_UNLOCK(stcb);
4419 				SCTP_SOCKET_LOCK(so, 1);
4420 				SCTP_TCB_LOCK(stcb);
4421 				atomic_subtract_int(&stcb->asoc.refcnt, 1);
4422 				if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4423 					SCTP_SOCKET_UNLOCK(so, 1);
4424 					return;
4425 				}
4426 			}
4427 #endif
4428 			sctp_sorwakeup(inp, inp->sctp_socket);
4429 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4430 			if (!so_locked) {
4431 				SCTP_SOCKET_UNLOCK(so, 1);
4432 			}
4433 #endif
4434 		}
4435 	}
4436 }
4437 
4438 
4439 int
4440 sctp_append_to_readq(struct sctp_inpcb *inp,
4441     struct sctp_tcb *stcb,
4442     struct sctp_queued_to_read *control,
4443     struct mbuf *m,
4444     int end,
4445     int ctls_cumack,
4446     struct sockbuf *sb)
4447 {
4448 	/*
4449 	 * A partial delivery API event is underway. OR we are appending on
4450 	 * the reassembly queue.
4451 	 *
4452 	 * If PDAPI this means we need to add m to the end of the data.
4453 	 * Increase the length in the control AND increment the sb_cc.
4454 	 * Otherwise sb is NULL and all we need to do is put it at the end
4455 	 * of the mbuf chain.
4456 	 */
4457 	int len = 0;
4458 	struct mbuf *mm, *tail = NULL, *prev = NULL;
4459 
4460 	if (inp) {
4461 		SCTP_INP_READ_LOCK(inp);
4462 	}
4463 	if (control == NULL) {
4464 get_out:
4465 		if (inp) {
4466 			SCTP_INP_READ_UNLOCK(inp);
4467 		}
4468 		return (-1);
4469 	}
4470 	if (control->end_added) {
4471 		/* huh this one is complete? */
4472 		goto get_out;
4473 	}
4474 	mm = m;
4475 	if (mm == NULL) {
4476 		goto get_out;
4477 	}
4478 	while (mm) {
4479 		if (SCTP_BUF_LEN(mm) == 0) {
4480 			/* Skip mbufs with NO lenght */
4481 			if (prev == NULL) {
4482 				/* First one */
4483 				m = sctp_m_free(mm);
4484 				mm = m;
4485 			} else {
4486 				SCTP_BUF_NEXT(prev) = sctp_m_free(mm);
4487 				mm = SCTP_BUF_NEXT(prev);
4488 			}
4489 			continue;
4490 		}
4491 		prev = mm;
4492 		len += SCTP_BUF_LEN(mm);
4493 		if (sb) {
4494 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4495 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm));
4496 			}
4497 			sctp_sballoc(stcb, sb, mm);
4498 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
4499 				sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
4500 			}
4501 		}
4502 		mm = SCTP_BUF_NEXT(mm);
4503 	}
4504 	if (prev) {
4505 		tail = prev;
4506 	} else {
4507 		/* Really there should always be a prev */
4508 		if (m == NULL) {
4509 			/* Huh nothing left? */
4510 #ifdef INVARIANTS
4511 			panic("Nothing left to add?");
4512 #else
4513 			goto get_out;
4514 #endif
4515 		}
4516 		tail = m;
4517 	}
4518 	if (control->tail_mbuf) {
4519 		/* append */
4520 		SCTP_BUF_NEXT(control->tail_mbuf) = m;
4521 		control->tail_mbuf = tail;
4522 	} else {
4523 		/* nothing there */
4524 #ifdef INVARIANTS
4525 		if (control->data != NULL) {
4526 			panic("This should NOT happen");
4527 		}
4528 #endif
4529 		control->data = m;
4530 		control->tail_mbuf = tail;
4531 	}
4532 	atomic_add_int(&control->length, len);
4533 	if (end) {
4534 		/* message is complete */
4535 		if (stcb && (control == stcb->asoc.control_pdapi)) {
4536 			stcb->asoc.control_pdapi = NULL;
4537 		}
4538 		control->held_length = 0;
4539 		control->end_added = 1;
4540 	}
4541 	if (stcb == NULL) {
4542 		control->do_not_ref_stcb = 1;
4543 	}
4544 	/*
4545 	 * When we are appending in partial delivery, the cum-ack is used
4546 	 * for the actual pd-api highest tsn on this mbuf. The true cum-ack
4547 	 * is populated in the outbound sinfo structure from the true cumack
4548 	 * if the association exists...
4549 	 */
4550 	control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack;
4551 	if (inp) {
4552 		SCTP_INP_READ_UNLOCK(inp);
4553 	}
4554 	if (inp && inp->sctp_socket) {
4555 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) {
4556 			SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket);
4557 		} else {
4558 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4559 			struct socket *so;
4560 
4561 			so = SCTP_INP_SO(inp);
4562 			atomic_add_int(&stcb->asoc.refcnt, 1);
4563 			SCTP_TCB_UNLOCK(stcb);
4564 			SCTP_SOCKET_LOCK(so, 1);
4565 			SCTP_TCB_LOCK(stcb);
4566 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4567 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
4568 				SCTP_SOCKET_UNLOCK(so, 1);
4569 				return (0);
4570 			}
4571 #endif
4572 			sctp_sorwakeup(inp, inp->sctp_socket);
4573 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4574 			SCTP_SOCKET_UNLOCK(so, 1);
4575 #endif
4576 		}
4577 	}
4578 	return (0);
4579 }
4580 
4581 
4582 
4583 /*************HOLD THIS COMMENT FOR PATCH FILE OF
4584  *************ALTERNATE ROUTING CODE
4585  */
4586 
4587 /*************HOLD THIS COMMENT FOR END OF PATCH FILE OF
4588  *************ALTERNATE ROUTING CODE
4589  */
4590 
4591 struct mbuf *
4592 sctp_generate_invmanparam(int err)
4593 {
4594 	/* Return a MBUF with a invalid mandatory parameter */
4595 	struct mbuf *m;
4596 
4597 	m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
4598 	if (m) {
4599 		struct sctp_paramhdr *ph;
4600 
4601 		SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr);
4602 		ph = mtod(m, struct sctp_paramhdr *);
4603 		ph->param_length = htons(sizeof(struct sctp_paramhdr));
4604 		ph->param_type = htons(err);
4605 	}
4606 	return (m);
4607 }
4608 
4609 #ifdef SCTP_MBCNT_LOGGING
4610 void
4611 sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
4612     struct sctp_tmit_chunk *tp1, int chk_cnt)
4613 {
4614 	if (tp1->data == NULL) {
4615 		return;
4616 	}
4617 	asoc->chunks_on_out_queue -= chk_cnt;
4618 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) {
4619 		sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE,
4620 		    asoc->total_output_queue_size,
4621 		    tp1->book_size,
4622 		    0,
4623 		    tp1->mbcnt);
4624 	}
4625 	if (asoc->total_output_queue_size >= tp1->book_size) {
4626 		atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size);
4627 	} else {
4628 		asoc->total_output_queue_size = 0;
4629 	}
4630 
4631 	if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
4632 	    ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
4633 		if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
4634 			stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
4635 		} else {
4636 			stcb->sctp_socket->so_snd.sb_cc = 0;
4637 
4638 		}
4639 	}
4640 }
4641 
4642 #endif
4643 
4644 int
4645 sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
4646     int reason, int so_locked
4647 #if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING)
4648     SCTP_UNUSED
4649 #endif
4650 )
4651 {
4652 	struct sctp_stream_out *strq;
4653 	struct sctp_tmit_chunk *chk = NULL;
4654 	struct sctp_stream_queue_pending *sp;
4655 	uint16_t stream = 0, seq = 0;
4656 	uint8_t foundeom = 0;
4657 	int ret_sz = 0;
4658 	int notdone;
4659 	int do_wakeup_routine = 0;
4660 
4661 	stream = tp1->rec.data.stream_number;
4662 	seq = tp1->rec.data.stream_seq;
4663 	do {
4664 		ret_sz += tp1->book_size;
4665 		if (tp1->data != NULL) {
4666 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4667 				sctp_flight_size_decrease(tp1);
4668 				sctp_total_flight_decrease(stcb, tp1);
4669 			}
4670 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4671 			stcb->asoc.peers_rwnd += tp1->send_size;
4672 			stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
4673 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4674 			if (tp1->data) {
4675 				sctp_m_freem(tp1->data);
4676 				tp1->data = NULL;
4677 			}
4678 			do_wakeup_routine = 1;
4679 			if (PR_SCTP_BUF_ENABLED(tp1->flags)) {
4680 				stcb->asoc.sent_queue_cnt_removeable--;
4681 			}
4682 		}
4683 		tp1->sent = SCTP_FORWARD_TSN_SKIP;
4684 		if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
4685 		    SCTP_DATA_NOT_FRAG) {
4686 			/* not frag'ed we ae done   */
4687 			notdone = 0;
4688 			foundeom = 1;
4689 		} else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4690 			/* end of frag, we are done */
4691 			notdone = 0;
4692 			foundeom = 1;
4693 		} else {
4694 			/*
4695 			 * Its a begin or middle piece, we must mark all of
4696 			 * it
4697 			 */
4698 			notdone = 1;
4699 			tp1 = TAILQ_NEXT(tp1, sctp_next);
4700 		}
4701 	} while (tp1 && notdone);
4702 	if (foundeom == 0) {
4703 		/*
4704 		 * The multi-part message was scattered across the send and
4705 		 * sent queue.
4706 		 */
4707 next_on_sent:
4708 		tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
4709 		/*
4710 		 * recurse throught the send_queue too, starting at the
4711 		 * beginning.
4712 		 */
4713 		if ((tp1) &&
4714 		    (tp1->rec.data.stream_number == stream) &&
4715 		    (tp1->rec.data.stream_seq == seq)
4716 		    ) {
4717 			/*
4718 			 * save to chk in case we have some on stream out
4719 			 * queue. If so and we have an un-transmitted one we
4720 			 * don't have to fudge the TSN.
4721 			 */
4722 			chk = tp1;
4723 			ret_sz += tp1->book_size;
4724 			sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
4725 			sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
4726 			if (tp1->data) {
4727 				sctp_m_freem(tp1->data);
4728 				tp1->data = NULL;
4729 			}
4730 			/* No flight involved here book the size to 0 */
4731 			tp1->book_size = 0;
4732 			if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
4733 				foundeom = 1;
4734 			}
4735 			do_wakeup_routine = 1;
4736 			tp1->sent = SCTP_FORWARD_TSN_SKIP;
4737 			TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next);
4738 			/*
4739 			 * on to the sent queue so we can wait for it to be
4740 			 * passed by.
4741 			 */
4742 			TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1,
4743 			    sctp_next);
4744 			stcb->asoc.send_queue_cnt--;
4745 			stcb->asoc.sent_queue_cnt++;
4746 			goto next_on_sent;
4747 		}
4748 	}
4749 	if (foundeom == 0) {
4750 		/*
4751 		 * Still no eom found. That means there is stuff left on the
4752 		 * stream out queue.. yuck.
4753 		 */
4754 		strq = &stcb->asoc.strmout[stream];
4755 		SCTP_TCB_SEND_LOCK(stcb);
4756 		sp = TAILQ_FIRST(&strq->outqueue);
4757 		while (sp->strseq <= seq) {
4758 			/* Check if its our SEQ */
4759 			if (sp->strseq == seq) {
4760 				sp->discard_rest = 1;
4761 				/*
4762 				 * We may need to put a chunk on the queue
4763 				 * that holds the TSN that would have been
4764 				 * sent with the LAST bit.
4765 				 */
4766 				if (chk == NULL) {
4767 					/* Yep, we have to */
4768 					sctp_alloc_a_chunk(stcb, chk);
4769 					if (chk == NULL) {
4770 						/*
4771 						 * we are hosed. All we can
4772 						 * do is nothing.. which
4773 						 * will cause an abort if
4774 						 * the peer is paying
4775 						 * attention.
4776 						 */
4777 						goto oh_well;
4778 					}
4779 					memset(chk, 0, sizeof(*chk));
4780 					chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG;
4781 					chk->sent = SCTP_FORWARD_TSN_SKIP;
4782 					chk->asoc = &stcb->asoc;
4783 					chk->rec.data.stream_seq = sp->strseq;
4784 					chk->rec.data.stream_number = sp->stream;
4785 					chk->rec.data.payloadtype = sp->ppid;
4786 					chk->rec.data.context = sp->context;
4787 					chk->flags = sp->act_flags;
4788 					chk->whoTo = sp->net;
4789 					atomic_add_int(&chk->whoTo->ref_count, 1);
4790 					chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1);
4791 					stcb->asoc.pr_sctp_cnt++;
4792 					chk->pr_sctp_on = 1;
4793 					TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
4794 					stcb->asoc.sent_queue_cnt++;
4795 					stcb->asoc.pr_sctp_cnt++;
4796 				} else {
4797 					chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
4798 				}
4799 		oh_well:
4800 				if (sp->data) {
4801 					/*
4802 					 * Pull any data to free up the SB
4803 					 * and allow sender to "add more"
4804 					 * whilc we will throw away :-)
4805 					 */
4806 					sctp_free_spbufspace(stcb, &stcb->asoc,
4807 					    sp);
4808 					ret_sz += sp->length;
4809 					do_wakeup_routine = 1;
4810 					sp->some_taken = 1;
4811 					sctp_m_freem(sp->data);
4812 					sp->length = 0;
4813 					sp->data = NULL;
4814 					sp->tail_mbuf = NULL;
4815 				}
4816 				break;
4817 			} else {
4818 				/* Next one please */
4819 				sp = TAILQ_NEXT(sp, next);
4820 			}
4821 		}		/* End while */
4822 		SCTP_TCB_SEND_UNLOCK(stcb);
4823 	}
4824 	if (do_wakeup_routine) {
4825 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4826 		struct socket *so;
4827 
4828 		so = SCTP_INP_SO(stcb->sctp_ep);
4829 		if (!so_locked) {
4830 			atomic_add_int(&stcb->asoc.refcnt, 1);
4831 			SCTP_TCB_UNLOCK(stcb);
4832 			SCTP_SOCKET_LOCK(so, 1);
4833 			SCTP_TCB_LOCK(stcb);
4834 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
4835 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4836 				/* assoc was freed while we were unlocked */
4837 				SCTP_SOCKET_UNLOCK(so, 1);
4838 				return (ret_sz);
4839 			}
4840 		}
4841 #endif
4842 		sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket);
4843 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4844 		if (!so_locked) {
4845 			SCTP_SOCKET_UNLOCK(so, 1);
4846 		}
4847 #endif
4848 	}
4849 	return (ret_sz);
4850 }
4851 
4852 /*
4853  * checks to see if the given address, sa, is one that is currently known by
4854  * the kernel note: can't distinguish the same address on multiple interfaces
4855  * and doesn't handle multiple addresses with different zone/scope id's note:
4856  * ifa_ifwithaddr() compares the entire sockaddr struct
4857  */
4858 struct sctp_ifa *
4859 sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr,
4860     int holds_lock)
4861 {
4862 	struct sctp_laddr *laddr;
4863 
4864 	if (holds_lock == 0) {
4865 		SCTP_INP_RLOCK(inp);
4866 	}
4867 	LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4868 		if (laddr->ifa == NULL)
4869 			continue;
4870 		if (addr->sa_family != laddr->ifa->address.sa.sa_family)
4871 			continue;
4872 		if (addr->sa_family == AF_INET) {
4873 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4874 			    laddr->ifa->address.sin.sin_addr.s_addr) {
4875 				/* found him. */
4876 				if (holds_lock == 0) {
4877 					SCTP_INP_RUNLOCK(inp);
4878 				}
4879 				return (laddr->ifa);
4880 				break;
4881 			}
4882 		}
4883 #ifdef INET6
4884 		if (addr->sa_family == AF_INET6) {
4885 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4886 			    &laddr->ifa->address.sin6)) {
4887 				/* found him. */
4888 				if (holds_lock == 0) {
4889 					SCTP_INP_RUNLOCK(inp);
4890 				}
4891 				return (laddr->ifa);
4892 				break;
4893 			}
4894 		}
4895 #endif
4896 	}
4897 	if (holds_lock == 0) {
4898 		SCTP_INP_RUNLOCK(inp);
4899 	}
4900 	return (NULL);
4901 }
4902 
4903 uint32_t
4904 sctp_get_ifa_hash_val(struct sockaddr *addr)
4905 {
4906 	if (addr->sa_family == AF_INET) {
4907 		struct sockaddr_in *sin;
4908 
4909 		sin = (struct sockaddr_in *)addr;
4910 		return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16));
4911 	} else if (addr->sa_family == AF_INET6) {
4912 		struct sockaddr_in6 *sin6;
4913 		uint32_t hash_of_addr;
4914 
4915 		sin6 = (struct sockaddr_in6 *)addr;
4916 		hash_of_addr = (sin6->sin6_addr.s6_addr32[0] +
4917 		    sin6->sin6_addr.s6_addr32[1] +
4918 		    sin6->sin6_addr.s6_addr32[2] +
4919 		    sin6->sin6_addr.s6_addr32[3]);
4920 		hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16));
4921 		return (hash_of_addr);
4922 	}
4923 	return (0);
4924 }
4925 
4926 struct sctp_ifa *
4927 sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock)
4928 {
4929 	struct sctp_ifa *sctp_ifap;
4930 	struct sctp_vrf *vrf;
4931 	struct sctp_ifalist *hash_head;
4932 	uint32_t hash_of_addr;
4933 
4934 	if (holds_lock == 0)
4935 		SCTP_IPI_ADDR_RLOCK();
4936 
4937 	vrf = sctp_find_vrf(vrf_id);
4938 	if (vrf == NULL) {
4939 stage_right:
4940 		if (holds_lock == 0)
4941 			SCTP_IPI_ADDR_RUNLOCK();
4942 		return (NULL);
4943 	}
4944 	hash_of_addr = sctp_get_ifa_hash_val(addr);
4945 
4946 	hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)];
4947 	if (hash_head == NULL) {
4948 		SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ",
4949 		    hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark,
4950 		    (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark));
4951 		sctp_print_address(addr);
4952 		SCTP_PRINTF("No such bucket for address\n");
4953 		if (holds_lock == 0)
4954 			SCTP_IPI_ADDR_RUNLOCK();
4955 
4956 		return (NULL);
4957 	}
4958 	LIST_FOREACH(sctp_ifap, hash_head, next_bucket) {
4959 		if (sctp_ifap == NULL) {
4960 #ifdef INVARIANTS
4961 			panic("Huh LIST_FOREACH corrupt");
4962 			goto stage_right;
4963 #else
4964 			SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n");
4965 			goto stage_right;
4966 #endif
4967 		}
4968 		if (addr->sa_family != sctp_ifap->address.sa.sa_family)
4969 			continue;
4970 		if (addr->sa_family == AF_INET) {
4971 			if (((struct sockaddr_in *)addr)->sin_addr.s_addr ==
4972 			    sctp_ifap->address.sin.sin_addr.s_addr) {
4973 				/* found him. */
4974 				if (holds_lock == 0)
4975 					SCTP_IPI_ADDR_RUNLOCK();
4976 				return (sctp_ifap);
4977 				break;
4978 			}
4979 		}
4980 #ifdef INET6
4981 		if (addr->sa_family == AF_INET6) {
4982 			if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr,
4983 			    &sctp_ifap->address.sin6)) {
4984 				/* found him. */
4985 				if (holds_lock == 0)
4986 					SCTP_IPI_ADDR_RUNLOCK();
4987 				return (sctp_ifap);
4988 				break;
4989 			}
4990 		}
4991 #endif
4992 	}
4993 	if (holds_lock == 0)
4994 		SCTP_IPI_ADDR_RUNLOCK();
4995 	return (NULL);
4996 }
4997 
4998 static void
4999 sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
5000     uint32_t rwnd_req)
5001 {
5002 	/* User pulled some data, do we need a rwnd update? */
5003 	int r_unlocked = 0;
5004 	uint32_t dif, rwnd;
5005 	struct socket *so = NULL;
5006 
5007 	if (stcb == NULL)
5008 		return;
5009 
5010 	atomic_add_int(&stcb->asoc.refcnt, 1);
5011 
5012 	if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED |
5013 	    SCTP_STATE_SHUTDOWN_RECEIVED |
5014 	    SCTP_STATE_SHUTDOWN_ACK_SENT)) {
5015 		/* Pre-check If we are freeing no update */
5016 		goto no_lock;
5017 	}
5018 	SCTP_INP_INCR_REF(stcb->sctp_ep);
5019 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5020 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5021 		goto out;
5022 	}
5023 	so = stcb->sctp_socket;
5024 	if (so == NULL) {
5025 		goto out;
5026 	}
5027 	atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far);
5028 	/* Have you have freed enough to look */
5029 	*freed_so_far = 0;
5030 	/* Yep, its worth a look and the lock overhead */
5031 
5032 	/* Figure out what the rwnd would be */
5033 	rwnd = sctp_calc_rwnd(stcb, &stcb->asoc);
5034 	if (rwnd >= stcb->asoc.my_last_reported_rwnd) {
5035 		dif = rwnd - stcb->asoc.my_last_reported_rwnd;
5036 	} else {
5037 		dif = 0;
5038 	}
5039 	if (dif >= rwnd_req) {
5040 		if (hold_rlock) {
5041 			SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5042 			r_unlocked = 1;
5043 		}
5044 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5045 			/*
5046 			 * One last check before we allow the guy possibly
5047 			 * to get in. There is a race, where the guy has not
5048 			 * reached the gate. In that case
5049 			 */
5050 			goto out;
5051 		}
5052 		SCTP_TCB_LOCK(stcb);
5053 		if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5054 			/* No reports here */
5055 			SCTP_TCB_UNLOCK(stcb);
5056 			goto out;
5057 		}
5058 		SCTP_STAT_INCR(sctps_wu_sacks_sent);
5059 		/*
5060 		 * EY if nr_sacks used then send an nr-sack , a sack
5061 		 * otherwise
5062 		 */
5063 		if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
5064 			sctp_send_nr_sack(stcb);
5065 		else
5066 			sctp_send_sack(stcb);
5067 
5068 		sctp_chunk_output(stcb->sctp_ep, stcb,
5069 		    SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
5070 		/* make sure no timer is running */
5071 		sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6);
5072 		SCTP_TCB_UNLOCK(stcb);
5073 	} else {
5074 		/* Update how much we have pending */
5075 		stcb->freed_by_sorcv_sincelast = dif;
5076 	}
5077 out:
5078 	if (so && r_unlocked && hold_rlock) {
5079 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5080 	}
5081 	SCTP_INP_DECR_REF(stcb->sctp_ep);
5082 no_lock:
5083 	atomic_add_int(&stcb->asoc.refcnt, -1);
5084 	return;
5085 }
5086 
5087 int
5088 sctp_sorecvmsg(struct socket *so,
5089     struct uio *uio,
5090     struct mbuf **mp,
5091     struct sockaddr *from,
5092     int fromlen,
5093     int *msg_flags,
5094     struct sctp_sndrcvinfo *sinfo,
5095     int filling_sinfo)
5096 {
5097 	/*
5098 	 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO.
5099 	 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy
5100 	 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ??
5101 	 * On the way out we may send out any combination of:
5102 	 * MSG_NOTIFICATION MSG_EOR
5103 	 *
5104 	 */
5105 	struct sctp_inpcb *inp = NULL;
5106 	int my_len = 0;
5107 	int cp_len = 0, error = 0;
5108 	struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL;
5109 	struct mbuf *m = NULL, *embuf = NULL;
5110 	struct sctp_tcb *stcb = NULL;
5111 	int wakeup_read_socket = 0;
5112 	int freecnt_applied = 0;
5113 	int out_flags = 0, in_flags = 0;
5114 	int block_allowed = 1;
5115 	uint32_t freed_so_far = 0;
5116 	uint32_t copied_so_far = 0;
5117 	int in_eeor_mode = 0;
5118 	int no_rcv_needed = 0;
5119 	uint32_t rwnd_req = 0;
5120 	int hold_sblock = 0;
5121 	int hold_rlock = 0;
5122 	int slen = 0;
5123 	uint32_t held_length = 0;
5124 	int sockbuf_lock = 0;
5125 
5126 	if (uio == NULL) {
5127 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5128 		return (EINVAL);
5129 	}
5130 	if (msg_flags) {
5131 		in_flags = *msg_flags;
5132 		if (in_flags & MSG_PEEK)
5133 			SCTP_STAT_INCR(sctps_read_peeks);
5134 	} else {
5135 		in_flags = 0;
5136 	}
5137 	slen = uio->uio_resid;
5138 
5139 	/* Pull in and set up our int flags */
5140 	if (in_flags & MSG_OOB) {
5141 		/* Out of band's NOT supported */
5142 		return (EOPNOTSUPP);
5143 	}
5144 	if ((in_flags & MSG_PEEK) && (mp != NULL)) {
5145 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
5146 		return (EINVAL);
5147 	}
5148 	if ((in_flags & (MSG_DONTWAIT
5149 	    | MSG_NBIO
5150 	    )) ||
5151 	    SCTP_SO_IS_NBIO(so)) {
5152 		block_allowed = 0;
5153 	}
5154 	/* setup the endpoint */
5155 	inp = (struct sctp_inpcb *)so->so_pcb;
5156 	if (inp == NULL) {
5157 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT);
5158 		return (EFAULT);
5159 	}
5160 	rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT);
5161 	/* Must be at least a MTU's worth */
5162 	if (rwnd_req < SCTP_MIN_RWND)
5163 		rwnd_req = SCTP_MIN_RWND;
5164 	in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
5165 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5166 		sctp_misc_ints(SCTP_SORECV_ENTER,
5167 		    rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
5168 	}
5169 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
5170 		sctp_misc_ints(SCTP_SORECV_ENTERPL,
5171 		    rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
5172 	}
5173 	error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
5174 	sockbuf_lock = 1;
5175 	if (error) {
5176 		goto release_unlocked;
5177 	}
5178 restart:
5179 
5180 
5181 restart_nosblocks:
5182 	if (hold_sblock == 0) {
5183 		SOCKBUF_LOCK(&so->so_rcv);
5184 		hold_sblock = 1;
5185 	}
5186 	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
5187 	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
5188 		goto out;
5189 	}
5190 	if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5191 		if (so->so_error) {
5192 			error = so->so_error;
5193 			if ((in_flags & MSG_PEEK) == 0)
5194 				so->so_error = 0;
5195 			goto out;
5196 		} else {
5197 			if (so->so_rcv.sb_cc == 0) {
5198 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5199 				/* indicate EOF */
5200 				error = 0;
5201 				goto out;
5202 			}
5203 		}
5204 	}
5205 	if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
5206 		/* we need to wait for data */
5207 		if ((so->so_rcv.sb_cc == 0) &&
5208 		    ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5209 		    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
5210 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5211 				/*
5212 				 * For active open side clear flags for
5213 				 * re-use passive open is blocked by
5214 				 * connect.
5215 				 */
5216 				if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5217 					/*
5218 					 * You were aborted, passive side
5219 					 * always hits here
5220 					 */
5221 					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5222 					error = ECONNRESET;
5223 					/*
5224 					 * You get this once if you are
5225 					 * active open side
5226 					 */
5227 					if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5228 						/*
5229 						 * Remove flag if on the
5230 						 * active open side
5231 						 */
5232 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5233 					}
5234 				}
5235 				so->so_state &= ~(SS_ISCONNECTING |
5236 				    SS_ISDISCONNECTING |
5237 				    SS_ISCONFIRMING |
5238 				    SS_ISCONNECTED);
5239 				if (error == 0) {
5240 					if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5241 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5242 						error = ENOTCONN;
5243 					} else {
5244 						inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5245 					}
5246 				}
5247 				goto out;
5248 			}
5249 		}
5250 		error = sbwait(&so->so_rcv);
5251 		if (error) {
5252 			goto out;
5253 		}
5254 		held_length = 0;
5255 		goto restart_nosblocks;
5256 	} else if (so->so_rcv.sb_cc == 0) {
5257 		if (so->so_error) {
5258 			error = so->so_error;
5259 			if ((in_flags & MSG_PEEK) == 0)
5260 				so->so_error = 0;
5261 		} else {
5262 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
5263 			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5264 				if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
5265 					/*
5266 					 * For active open side clear flags
5267 					 * for re-use passive open is
5268 					 * blocked by connect.
5269 					 */
5270 					if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) {
5271 						/*
5272 						 * You were aborted, passive
5273 						 * side always hits here
5274 						 */
5275 						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET);
5276 						error = ECONNRESET;
5277 						/*
5278 						 * You get this once if you
5279 						 * are active open side
5280 						 */
5281 						if (!(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
5282 							/*
5283 							 * Remove flag if on
5284 							 * the active open
5285 							 * side
5286 							 */
5287 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_ABORTED;
5288 						}
5289 					}
5290 					so->so_state &= ~(SS_ISCONNECTING |
5291 					    SS_ISDISCONNECTING |
5292 					    SS_ISCONFIRMING |
5293 					    SS_ISCONNECTED);
5294 					if (error == 0) {
5295 						if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) {
5296 							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN);
5297 							error = ENOTCONN;
5298 						} else {
5299 							inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAS_CONNECTED;
5300 						}
5301 					}
5302 					goto out;
5303 				}
5304 			}
5305 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK);
5306 			error = EWOULDBLOCK;
5307 		}
5308 		goto out;
5309 	}
5310 	if (hold_sblock == 1) {
5311 		SOCKBUF_UNLOCK(&so->so_rcv);
5312 		hold_sblock = 0;
5313 	}
5314 	/* we possibly have data we can read */
5315 	/* sa_ignore FREED_MEMORY */
5316 	control = TAILQ_FIRST(&inp->read_queue);
5317 	if (control == NULL) {
5318 		/*
5319 		 * This could be happening since the appender did the
5320 		 * increment but as not yet did the tailq insert onto the
5321 		 * read_queue
5322 		 */
5323 		if (hold_rlock == 0) {
5324 			SCTP_INP_READ_LOCK(inp);
5325 			hold_rlock = 1;
5326 		}
5327 		control = TAILQ_FIRST(&inp->read_queue);
5328 		if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
5329 #ifdef INVARIANTS
5330 			panic("Huh, its non zero and nothing on control?");
5331 #endif
5332 			so->so_rcv.sb_cc = 0;
5333 		}
5334 		SCTP_INP_READ_UNLOCK(inp);
5335 		hold_rlock = 0;
5336 		goto restart;
5337 	}
5338 	if ((control->length == 0) &&
5339 	    (control->do_not_ref_stcb)) {
5340 		/*
5341 		 * Clean up code for freeing assoc that left behind a
5342 		 * pdapi.. maybe a peer in EEOR that just closed after
5343 		 * sending and never indicated a EOR.
5344 		 */
5345 		if (hold_rlock == 0) {
5346 			hold_rlock = 1;
5347 			SCTP_INP_READ_LOCK(inp);
5348 		}
5349 		control->held_length = 0;
5350 		if (control->data) {
5351 			/* Hmm there is data here .. fix */
5352 			struct mbuf *m_tmp;
5353 			int cnt = 0;
5354 
5355 			m_tmp = control->data;
5356 			while (m_tmp) {
5357 				cnt += SCTP_BUF_LEN(m_tmp);
5358 				if (SCTP_BUF_NEXT(m_tmp) == NULL) {
5359 					control->tail_mbuf = m_tmp;
5360 					control->end_added = 1;
5361 				}
5362 				m_tmp = SCTP_BUF_NEXT(m_tmp);
5363 			}
5364 			control->length = cnt;
5365 		} else {
5366 			/* remove it */
5367 			TAILQ_REMOVE(&inp->read_queue, control, next);
5368 			/* Add back any hiddend data */
5369 			sctp_free_remote_addr(control->whoFrom);
5370 			sctp_free_a_readq(stcb, control);
5371 		}
5372 		if (hold_rlock) {
5373 			hold_rlock = 0;
5374 			SCTP_INP_READ_UNLOCK(inp);
5375 		}
5376 		goto restart;
5377 	}
5378 	if ((control->length == 0) &&
5379 	    (control->end_added == 1)) {
5380 		/*
5381 		 * Do we also need to check for (control->pdapi_aborted ==
5382 		 * 1)?
5383 		 */
5384 		if (hold_rlock == 0) {
5385 			hold_rlock = 1;
5386 			SCTP_INP_READ_LOCK(inp);
5387 		}
5388 		TAILQ_REMOVE(&inp->read_queue, control, next);
5389 		if (control->data) {
5390 #ifdef INVARIANTS
5391 			panic("control->data not null but control->length == 0");
5392 #else
5393 			SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n");
5394 			sctp_m_freem(control->data);
5395 			control->data = NULL;
5396 #endif
5397 		}
5398 		if (control->aux_data) {
5399 			sctp_m_free(control->aux_data);
5400 			control->aux_data = NULL;
5401 		}
5402 		sctp_free_remote_addr(control->whoFrom);
5403 		sctp_free_a_readq(stcb, control);
5404 		if (hold_rlock) {
5405 			hold_rlock = 0;
5406 			SCTP_INP_READ_UNLOCK(inp);
5407 		}
5408 		goto restart;
5409 	}
5410 	if (control->length == 0) {
5411 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) &&
5412 		    (filling_sinfo)) {
5413 			/* find a more suitable one then this */
5414 			ctl = TAILQ_NEXT(control, next);
5415 			while (ctl) {
5416 				if ((ctl->stcb != control->stcb) && (ctl->length) &&
5417 				    (ctl->some_taken ||
5418 				    (ctl->spec_flags & M_NOTIFICATION) ||
5419 				    ((ctl->do_not_ref_stcb == 0) &&
5420 				    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5421 				    ) {
5422 					/*-
5423 					 * If we have a different TCB next, and there is data
5424 					 * present. If we have already taken some (pdapi), OR we can
5425 					 * ref the tcb and no delivery as started on this stream, we
5426 					 * take it. Note we allow a notification on a different
5427 					 * assoc to be delivered..
5428 					 */
5429 					control = ctl;
5430 					goto found_one;
5431 				} else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) &&
5432 					    (ctl->length) &&
5433 					    ((ctl->some_taken) ||
5434 					    ((ctl->do_not_ref_stcb == 0) &&
5435 					    ((ctl->spec_flags & M_NOTIFICATION) == 0) &&
5436 					    (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
5437 				    ) {
5438 					/*-
5439 					 * If we have the same tcb, and there is data present, and we
5440 					 * have the strm interleave feature present. Then if we have
5441 					 * taken some (pdapi) or we can refer to tht tcb AND we have
5442 					 * not started a delivery for this stream, we can take it.
5443 					 * Note we do NOT allow a notificaiton on the same assoc to
5444 					 * be delivered.
5445 					 */
5446 					control = ctl;
5447 					goto found_one;
5448 				}
5449 				ctl = TAILQ_NEXT(ctl, next);
5450 			}
5451 		}
5452 		/*
5453 		 * if we reach here, not suitable replacement is available
5454 		 * <or> fragment interleave is NOT on. So stuff the sb_cc
5455 		 * into the our held count, and its time to sleep again.
5456 		 */
5457 		held_length = so->so_rcv.sb_cc;
5458 		control->held_length = so->so_rcv.sb_cc;
5459 		goto restart;
5460 	}
5461 	/* Clear the held length since there is something to read */
5462 	control->held_length = 0;
5463 	if (hold_rlock) {
5464 		SCTP_INP_READ_UNLOCK(inp);
5465 		hold_rlock = 0;
5466 	}
5467 found_one:
5468 	/*
5469 	 * If we reach here, control has a some data for us to read off.
5470 	 * Note that stcb COULD be NULL.
5471 	 */
5472 	control->some_taken++;
5473 	if (hold_sblock) {
5474 		SOCKBUF_UNLOCK(&so->so_rcv);
5475 		hold_sblock = 0;
5476 	}
5477 	stcb = control->stcb;
5478 	if (stcb) {
5479 		if ((control->do_not_ref_stcb == 0) &&
5480 		    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) {
5481 			if (freecnt_applied == 0)
5482 				stcb = NULL;
5483 		} else if (control->do_not_ref_stcb == 0) {
5484 			/* you can't free it on me please */
5485 			/*
5486 			 * The lock on the socket buffer protects us so the
5487 			 * free code will stop. But since we used the
5488 			 * socketbuf lock and the sender uses the tcb_lock
5489 			 * to increment, we need to use the atomic add to
5490 			 * the refcnt
5491 			 */
5492 			if (freecnt_applied) {
5493 #ifdef INVARIANTS
5494 				panic("refcnt already incremented");
5495 #else
5496 				printf("refcnt already incremented?\n");
5497 #endif
5498 			} else {
5499 				atomic_add_int(&stcb->asoc.refcnt, 1);
5500 				freecnt_applied = 1;
5501 			}
5502 			/*
5503 			 * Setup to remember how much we have not yet told
5504 			 * the peer our rwnd has opened up. Note we grab the
5505 			 * value from the tcb from last time. Note too that
5506 			 * sack sending clears this when a sack is sent,
5507 			 * which is fine. Once we hit the rwnd_req, we then
5508 			 * will go to the sctp_user_rcvd() that will not
5509 			 * lock until it KNOWs it MUST send a WUP-SACK.
5510 			 */
5511 			freed_so_far = stcb->freed_by_sorcv_sincelast;
5512 			stcb->freed_by_sorcv_sincelast = 0;
5513 		}
5514 	}
5515 	if (stcb &&
5516 	    ((control->spec_flags & M_NOTIFICATION) == 0) &&
5517 	    control->do_not_ref_stcb == 0) {
5518 		stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
5519 	}
5520 	/* First lets get off the sinfo and sockaddr info */
5521 	if ((sinfo) && filling_sinfo) {
5522 		memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
5523 		nxt = TAILQ_NEXT(control, next);
5524 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
5525 			struct sctp_extrcvinfo *s_extra;
5526 
5527 			s_extra = (struct sctp_extrcvinfo *)sinfo;
5528 			if ((nxt) &&
5529 			    (nxt->length)) {
5530 				s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL;
5531 				if (nxt->sinfo_flags & SCTP_UNORDERED) {
5532 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED;
5533 				}
5534 				if (nxt->spec_flags & M_NOTIFICATION) {
5535 					s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION;
5536 				}
5537 				s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id;
5538 				s_extra->sreinfo_next_length = nxt->length;
5539 				s_extra->sreinfo_next_ppid = nxt->sinfo_ppid;
5540 				s_extra->sreinfo_next_stream = nxt->sinfo_stream;
5541 				if (nxt->tail_mbuf != NULL) {
5542 					if (nxt->end_added) {
5543 						s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE;
5544 					}
5545 				}
5546 			} else {
5547 				/*
5548 				 * we explicitly 0 this, since the memcpy
5549 				 * got some other things beyond the older
5550 				 * sinfo_ that is on the control's structure
5551 				 * :-D
5552 				 */
5553 				nxt = NULL;
5554 				s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
5555 				s_extra->sreinfo_next_aid = 0;
5556 				s_extra->sreinfo_next_length = 0;
5557 				s_extra->sreinfo_next_ppid = 0;
5558 				s_extra->sreinfo_next_stream = 0;
5559 			}
5560 		}
5561 		/*
5562 		 * update off the real current cum-ack, if we have an stcb.
5563 		 */
5564 		if ((control->do_not_ref_stcb == 0) && stcb)
5565 			sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
5566 		/*
5567 		 * mask off the high bits, we keep the actual chunk bits in
5568 		 * there.
5569 		 */
5570 		sinfo->sinfo_flags &= 0x00ff;
5571 		if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) {
5572 			sinfo->sinfo_flags |= SCTP_UNORDERED;
5573 		}
5574 	}
5575 #ifdef SCTP_ASOCLOG_OF_TSNS
5576 	{
5577 		int index, newindex;
5578 		struct sctp_pcbtsn_rlog *entry;
5579 
5580 		do {
5581 			index = inp->readlog_index;
5582 			newindex = index + 1;
5583 			if (newindex >= SCTP_READ_LOG_SIZE) {
5584 				newindex = 0;
5585 			}
5586 		} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
5587 		entry = &inp->readlog[index];
5588 		entry->vtag = control->sinfo_assoc_id;
5589 		entry->strm = control->sinfo_stream;
5590 		entry->seq = control->sinfo_ssn;
5591 		entry->sz = control->length;
5592 		entry->flgs = control->sinfo_flags;
5593 	}
5594 #endif
5595 	if (fromlen && from) {
5596 		struct sockaddr *to;
5597 
5598 #ifdef INET
5599 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin.sin_len);
5600 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5601 		((struct sockaddr_in *)from)->sin_port = control->port_from;
5602 #else
5603 		/* No AF_INET use AF_INET6 */
5604 		cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sin6.sin6_len);
5605 		memcpy(from, &control->whoFrom->ro._l_addr, cp_len);
5606 		((struct sockaddr_in6 *)from)->sin6_port = control->port_from;
5607 #endif
5608 
5609 		to = from;
5610 #if defined(INET) && defined(INET6)
5611 		if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
5612 		    (to->sa_family == AF_INET) &&
5613 		    ((size_t)fromlen >= sizeof(struct sockaddr_in6))) {
5614 			struct sockaddr_in *sin;
5615 			struct sockaddr_in6 sin6;
5616 
5617 			sin = (struct sockaddr_in *)to;
5618 			bzero(&sin6, sizeof(sin6));
5619 			sin6.sin6_family = AF_INET6;
5620 			sin6.sin6_len = sizeof(struct sockaddr_in6);
5621 			sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
5622 			bcopy(&sin->sin_addr,
5623 			    &sin6.sin6_addr.s6_addr32[3],
5624 			    sizeof(sin6.sin6_addr.s6_addr32[3]));
5625 			sin6.sin6_port = sin->sin_port;
5626 			memcpy(from, (caddr_t)&sin6, sizeof(sin6));
5627 		}
5628 #endif
5629 #if defined(INET6)
5630 		{
5631 			struct sockaddr_in6 lsa6, *to6;
5632 
5633 			to6 = (struct sockaddr_in6 *)to;
5634 			sctp_recover_scope_mac(to6, (&lsa6));
5635 		}
5636 #endif
5637 	}
5638 	/* now copy out what data we can */
5639 	if (mp == NULL) {
5640 		/* copy out each mbuf in the chain up to length */
5641 get_more_data:
5642 		m = control->data;
5643 		while (m) {
5644 			/* Move out all we can */
5645 			cp_len = (int)uio->uio_resid;
5646 			my_len = (int)SCTP_BUF_LEN(m);
5647 			if (cp_len > my_len) {
5648 				/* not enough in this buf */
5649 				cp_len = my_len;
5650 			}
5651 			if (hold_rlock) {
5652 				SCTP_INP_READ_UNLOCK(inp);
5653 				hold_rlock = 0;
5654 			}
5655 			if (cp_len > 0)
5656 				error = uiomove(mtod(m, char *), cp_len, uio);
5657 			/* re-read */
5658 			if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
5659 				goto release;
5660 			}
5661 			if ((control->do_not_ref_stcb == 0) && stcb &&
5662 			    stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
5663 				no_rcv_needed = 1;
5664 			}
5665 			if (error) {
5666 				/* error we are out of here */
5667 				goto release;
5668 			}
5669 			if ((SCTP_BUF_NEXT(m) == NULL) &&
5670 			    (cp_len >= SCTP_BUF_LEN(m)) &&
5671 			    ((control->end_added == 0) ||
5672 			    (control->end_added &&
5673 			    (TAILQ_NEXT(control, next) == NULL)))
5674 			    ) {
5675 				SCTP_INP_READ_LOCK(inp);
5676 				hold_rlock = 1;
5677 			}
5678 			if (cp_len == SCTP_BUF_LEN(m)) {
5679 				if ((SCTP_BUF_NEXT(m) == NULL) &&
5680 				    (control->end_added)) {
5681 					out_flags |= MSG_EOR;
5682 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5683 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5684 				}
5685 				if (control->spec_flags & M_NOTIFICATION) {
5686 					out_flags |= MSG_NOTIFICATION;
5687 				}
5688 				/* we ate up the mbuf */
5689 				if (in_flags & MSG_PEEK) {
5690 					/* just looking */
5691 					m = SCTP_BUF_NEXT(m);
5692 					copied_so_far += cp_len;
5693 				} else {
5694 					/* dispose of the mbuf */
5695 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5696 						sctp_sblog(&so->so_rcv,
5697 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5698 					}
5699 					sctp_sbfree(control, stcb, &so->so_rcv, m);
5700 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5701 						sctp_sblog(&so->so_rcv,
5702 						    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5703 					}
5704 					embuf = m;
5705 					copied_so_far += cp_len;
5706 					freed_so_far += cp_len;
5707 					freed_so_far += MSIZE;
5708 					atomic_subtract_int(&control->length, cp_len);
5709 					control->data = sctp_m_free(m);
5710 					m = control->data;
5711 					/*
5712 					 * been through it all, must hold sb
5713 					 * lock ok to null tail
5714 					 */
5715 					if (control->data == NULL) {
5716 #ifdef INVARIANTS
5717 						if ((control->end_added == 0) ||
5718 						    (TAILQ_NEXT(control, next) == NULL)) {
5719 							/*
5720 							 * If the end is not
5721 							 * added, OR the
5722 							 * next is NOT null
5723 							 * we MUST have the
5724 							 * lock.
5725 							 */
5726 							if (mtx_owned(&inp->inp_rdata_mtx) == 0) {
5727 								panic("Hmm we don't own the lock?");
5728 							}
5729 						}
5730 #endif
5731 						control->tail_mbuf = NULL;
5732 #ifdef INVARIANTS
5733 						if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) {
5734 							panic("end_added, nothing left and no MSG_EOR");
5735 						}
5736 #endif
5737 					}
5738 				}
5739 			} else {
5740 				/* Do we need to trim the mbuf? */
5741 				if (control->spec_flags & M_NOTIFICATION) {
5742 					out_flags |= MSG_NOTIFICATION;
5743 				}
5744 				if ((in_flags & MSG_PEEK) == 0) {
5745 					SCTP_BUF_RESV_UF(m, cp_len);
5746 					SCTP_BUF_LEN(m) -= cp_len;
5747 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5748 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
5749 					}
5750 					atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
5751 					if ((control->do_not_ref_stcb == 0) &&
5752 					    stcb) {
5753 						atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
5754 					}
5755 					copied_so_far += cp_len;
5756 					embuf = m;
5757 					freed_so_far += cp_len;
5758 					freed_so_far += MSIZE;
5759 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5760 						sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
5761 						    SCTP_LOG_SBRESULT, 0);
5762 					}
5763 					atomic_subtract_int(&control->length, cp_len);
5764 				} else {
5765 					copied_so_far += cp_len;
5766 				}
5767 			}
5768 			if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) {
5769 				break;
5770 			}
5771 			if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5772 			    (control->do_not_ref_stcb == 0) &&
5773 			    (freed_so_far >= rwnd_req)) {
5774 				sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5775 			}
5776 		}		/* end while(m) */
5777 		/*
5778 		 * At this point we have looked at it all and we either have
5779 		 * a MSG_EOR/or read all the user wants... <OR>
5780 		 * control->length == 0.
5781 		 */
5782 		if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) {
5783 			/* we are done with this control */
5784 			if (control->length == 0) {
5785 				if (control->data) {
5786 #ifdef INVARIANTS
5787 					panic("control->data not null at read eor?");
5788 #else
5789 					SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n");
5790 					sctp_m_freem(control->data);
5791 					control->data = NULL;
5792 #endif
5793 				}
5794 		done_with_control:
5795 				if (TAILQ_NEXT(control, next) == NULL) {
5796 					/*
5797 					 * If we don't have a next we need a
5798 					 * lock, if there is a next
5799 					 * interrupt is filling ahead of us
5800 					 * and we don't need a lock to
5801 					 * remove this guy (which is the
5802 					 * head of the queue).
5803 					 */
5804 					if (hold_rlock == 0) {
5805 						SCTP_INP_READ_LOCK(inp);
5806 						hold_rlock = 1;
5807 					}
5808 				}
5809 				TAILQ_REMOVE(&inp->read_queue, control, next);
5810 				/* Add back any hiddend data */
5811 				if (control->held_length) {
5812 					held_length = 0;
5813 					control->held_length = 0;
5814 					wakeup_read_socket = 1;
5815 				}
5816 				if (control->aux_data) {
5817 					sctp_m_free(control->aux_data);
5818 					control->aux_data = NULL;
5819 				}
5820 				no_rcv_needed = control->do_not_ref_stcb;
5821 				sctp_free_remote_addr(control->whoFrom);
5822 				control->data = NULL;
5823 				sctp_free_a_readq(stcb, control);
5824 				control = NULL;
5825 				if ((freed_so_far >= rwnd_req) &&
5826 				    (no_rcv_needed == 0))
5827 					sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5828 
5829 			} else {
5830 				/*
5831 				 * The user did not read all of this
5832 				 * message, turn off the returned MSG_EOR
5833 				 * since we are leaving more behind on the
5834 				 * control to read.
5835 				 */
5836 #ifdef INVARIANTS
5837 				if (control->end_added &&
5838 				    (control->data == NULL) &&
5839 				    (control->tail_mbuf == NULL)) {
5840 					panic("Gak, control->length is corrupt?");
5841 				}
5842 #endif
5843 				no_rcv_needed = control->do_not_ref_stcb;
5844 				out_flags &= ~MSG_EOR;
5845 			}
5846 		}
5847 		if (out_flags & MSG_EOR) {
5848 			goto release;
5849 		}
5850 		if ((uio->uio_resid == 0) ||
5851 		    ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1)))
5852 		    ) {
5853 			goto release;
5854 		}
5855 		/*
5856 		 * If I hit here the receiver wants more and this message is
5857 		 * NOT done (pd-api). So two questions. Can we block? if not
5858 		 * we are done. Did the user NOT set MSG_WAITALL?
5859 		 */
5860 		if (block_allowed == 0) {
5861 			goto release;
5862 		}
5863 		/*
5864 		 * We need to wait for more data a few things: - We don't
5865 		 * sbunlock() so we don't get someone else reading. - We
5866 		 * must be sure to account for the case where what is added
5867 		 * is NOT to our control when we wakeup.
5868 		 */
5869 
5870 		/*
5871 		 * Do we need to tell the transport a rwnd update might be
5872 		 * needed before we go to sleep?
5873 		 */
5874 		if (((stcb) && (in_flags & MSG_PEEK) == 0) &&
5875 		    ((freed_so_far >= rwnd_req) &&
5876 		    (control->do_not_ref_stcb == 0) &&
5877 		    (no_rcv_needed == 0))) {
5878 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
5879 		}
5880 wait_some_more:
5881 		if (so->so_rcv.sb_state & SBS_CANTRCVMORE) {
5882 			goto release;
5883 		}
5884 		if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)
5885 			goto release;
5886 
5887 		if (hold_rlock == 1) {
5888 			SCTP_INP_READ_UNLOCK(inp);
5889 			hold_rlock = 0;
5890 		}
5891 		if (hold_sblock == 0) {
5892 			SOCKBUF_LOCK(&so->so_rcv);
5893 			hold_sblock = 1;
5894 		}
5895 		if ((copied_so_far) && (control->length == 0) &&
5896 		    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
5897 		    ) {
5898 			goto release;
5899 		}
5900 		if (so->so_rcv.sb_cc <= control->held_length) {
5901 			error = sbwait(&so->so_rcv);
5902 			if (error) {
5903 				goto release;
5904 			}
5905 			control->held_length = 0;
5906 		}
5907 		if (hold_sblock) {
5908 			SOCKBUF_UNLOCK(&so->so_rcv);
5909 			hold_sblock = 0;
5910 		}
5911 		if (control->length == 0) {
5912 			/* still nothing here */
5913 			if (control->end_added == 1) {
5914 				/* he aborted, or is done i.e.did a shutdown */
5915 				out_flags |= MSG_EOR;
5916 				if (control->pdapi_aborted) {
5917 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5918 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5919 
5920 					out_flags |= MSG_TRUNC;
5921 				} else {
5922 					if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5923 						control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5924 				}
5925 				goto done_with_control;
5926 			}
5927 			if (so->so_rcv.sb_cc > held_length) {
5928 				control->held_length = so->so_rcv.sb_cc;
5929 				held_length = 0;
5930 			}
5931 			goto wait_some_more;
5932 		} else if (control->data == NULL) {
5933 			/*
5934 			 * we must re-sync since data is probably being
5935 			 * added
5936 			 */
5937 			SCTP_INP_READ_LOCK(inp);
5938 			if ((control->length > 0) && (control->data == NULL)) {
5939 				/*
5940 				 * big trouble.. we have the lock and its
5941 				 * corrupt?
5942 				 */
5943 #ifdef INVARIANTS
5944 				panic("Impossible data==NULL length !=0");
5945 #endif
5946 				out_flags |= MSG_EOR;
5947 				out_flags |= MSG_TRUNC;
5948 				control->length = 0;
5949 				SCTP_INP_READ_UNLOCK(inp);
5950 				goto done_with_control;
5951 			}
5952 			SCTP_INP_READ_UNLOCK(inp);
5953 			/* We will fall around to get more data */
5954 		}
5955 		goto get_more_data;
5956 	} else {
5957 		/*-
5958 		 * Give caller back the mbuf chain,
5959 		 * store in uio_resid the length
5960 		 */
5961 		wakeup_read_socket = 0;
5962 		if ((control->end_added == 0) ||
5963 		    (TAILQ_NEXT(control, next) == NULL)) {
5964 			/* Need to get rlock */
5965 			if (hold_rlock == 0) {
5966 				SCTP_INP_READ_LOCK(inp);
5967 				hold_rlock = 1;
5968 			}
5969 		}
5970 		if (control->end_added) {
5971 			out_flags |= MSG_EOR;
5972 			if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0))
5973 				control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0;
5974 		}
5975 		if (control->spec_flags & M_NOTIFICATION) {
5976 			out_flags |= MSG_NOTIFICATION;
5977 		}
5978 		uio->uio_resid = control->length;
5979 		*mp = control->data;
5980 		m = control->data;
5981 		while (m) {
5982 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5983 				sctp_sblog(&so->so_rcv,
5984 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m));
5985 			}
5986 			sctp_sbfree(control, stcb, &so->so_rcv, m);
5987 			freed_so_far += SCTP_BUF_LEN(m);
5988 			freed_so_far += MSIZE;
5989 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
5990 				sctp_sblog(&so->so_rcv,
5991 				    control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0);
5992 			}
5993 			m = SCTP_BUF_NEXT(m);
5994 		}
5995 		control->data = control->tail_mbuf = NULL;
5996 		control->length = 0;
5997 		if (out_flags & MSG_EOR) {
5998 			/* Done with this control */
5999 			goto done_with_control;
6000 		}
6001 	}
6002 release:
6003 	if (hold_rlock == 1) {
6004 		SCTP_INP_READ_UNLOCK(inp);
6005 		hold_rlock = 0;
6006 	}
6007 	if (hold_sblock == 1) {
6008 		SOCKBUF_UNLOCK(&so->so_rcv);
6009 		hold_sblock = 0;
6010 	}
6011 	sbunlock(&so->so_rcv);
6012 	sockbuf_lock = 0;
6013 
6014 release_unlocked:
6015 	if (hold_sblock) {
6016 		SOCKBUF_UNLOCK(&so->so_rcv);
6017 		hold_sblock = 0;
6018 	}
6019 	if ((stcb) && (in_flags & MSG_PEEK) == 0) {
6020 		if ((freed_so_far >= rwnd_req) &&
6021 		    (control && (control->do_not_ref_stcb == 0)) &&
6022 		    (no_rcv_needed == 0))
6023 			sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req);
6024 	}
6025 out:
6026 	if (msg_flags) {
6027 		*msg_flags = out_flags;
6028 	}
6029 	if (((out_flags & MSG_EOR) == 0) &&
6030 	    ((in_flags & MSG_PEEK) == 0) &&
6031 	    (sinfo) &&
6032 	    (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO))) {
6033 		struct sctp_extrcvinfo *s_extra;
6034 
6035 		s_extra = (struct sctp_extrcvinfo *)sinfo;
6036 		s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG;
6037 	}
6038 	if (hold_rlock == 1) {
6039 		SCTP_INP_READ_UNLOCK(inp);
6040 		hold_rlock = 0;
6041 	}
6042 	if (hold_sblock) {
6043 		SOCKBUF_UNLOCK(&so->so_rcv);
6044 		hold_sblock = 0;
6045 	}
6046 	if (sockbuf_lock) {
6047 		sbunlock(&so->so_rcv);
6048 	}
6049 	if (freecnt_applied) {
6050 		/*
6051 		 * The lock on the socket buffer protects us so the free
6052 		 * code will stop. But since we used the socketbuf lock and
6053 		 * the sender uses the tcb_lock to increment, we need to use
6054 		 * the atomic add to the refcnt.
6055 		 */
6056 		if (stcb == NULL) {
6057 #ifdef INVARIANTS
6058 			panic("stcb for refcnt has gone NULL?");
6059 			goto stage_left;
6060 #else
6061 			goto stage_left;
6062 #endif
6063 		}
6064 		atomic_add_int(&stcb->asoc.refcnt, -1);
6065 		freecnt_applied = 0;
6066 		/* Save the value back for next time */
6067 		stcb->freed_by_sorcv_sincelast = freed_so_far;
6068 	}
6069 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
6070 		if (stcb) {
6071 			sctp_misc_ints(SCTP_SORECV_DONE,
6072 			    freed_so_far,
6073 			    ((uio) ? (slen - uio->uio_resid) : slen),
6074 			    stcb->asoc.my_rwnd,
6075 			    so->so_rcv.sb_cc);
6076 		} else {
6077 			sctp_misc_ints(SCTP_SORECV_DONE,
6078 			    freed_so_far,
6079 			    ((uio) ? (slen - uio->uio_resid) : slen),
6080 			    0,
6081 			    so->so_rcv.sb_cc);
6082 		}
6083 	}
6084 stage_left:
6085 	if (wakeup_read_socket) {
6086 		sctp_sorwakeup(inp, so);
6087 	}
6088 	return (error);
6089 }
6090 
6091 
6092 #ifdef SCTP_MBUF_LOGGING
6093 struct mbuf *
6094 sctp_m_free(struct mbuf *m)
6095 {
6096 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
6097 		if (SCTP_BUF_IS_EXTENDED(m)) {
6098 			sctp_log_mb(m, SCTP_MBUF_IFREE);
6099 		}
6100 	}
6101 	return (m_free(m));
6102 }
6103 
6104 void
6105 sctp_m_freem(struct mbuf *mb)
6106 {
6107 	while (mb != NULL)
6108 		mb = sctp_m_free(mb);
6109 }
6110 
6111 #endif
6112 
6113 int
6114 sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id)
6115 {
6116 	/*
6117 	 * Given a local address. For all associations that holds the
6118 	 * address, request a peer-set-primary.
6119 	 */
6120 	struct sctp_ifa *ifa;
6121 	struct sctp_laddr *wi;
6122 
6123 	ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0);
6124 	if (ifa == NULL) {
6125 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL);
6126 		return (EADDRNOTAVAIL);
6127 	}
6128 	/*
6129 	 * Now that we have the ifa we must awaken the iterator with this
6130 	 * message.
6131 	 */
6132 	wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr);
6133 	if (wi == NULL) {
6134 		SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
6135 		return (ENOMEM);
6136 	}
6137 	/* Now incr the count and int wi structure */
6138 	SCTP_INCR_LADDR_COUNT();
6139 	bzero(wi, sizeof(*wi));
6140 	(void)SCTP_GETTIME_TIMEVAL(&wi->start_time);
6141 	wi->ifa = ifa;
6142 	wi->action = SCTP_SET_PRIM_ADDR;
6143 	atomic_add_int(&ifa->refcount, 1);
6144 
6145 	/* Now add it to the work queue */
6146 	SCTP_IPI_ITERATOR_WQ_LOCK();
6147 	/*
6148 	 * Should this really be a tailq? As it is we will process the
6149 	 * newest first :-0
6150 	 */
6151 	LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr);
6152 	SCTP_IPI_ITERATOR_WQ_UNLOCK();
6153 	sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ,
6154 	    (struct sctp_inpcb *)NULL,
6155 	    (struct sctp_tcb *)NULL,
6156 	    (struct sctp_nets *)NULL);
6157 	return (0);
6158 }
6159 
6160 
6161 int
6162 sctp_soreceive(struct socket *so,
6163     struct sockaddr **psa,
6164     struct uio *uio,
6165     struct mbuf **mp0,
6166     struct mbuf **controlp,
6167     int *flagsp)
6168 {
6169 	int error, fromlen;
6170 	uint8_t sockbuf[256];
6171 	struct sockaddr *from;
6172 	struct sctp_extrcvinfo sinfo;
6173 	int filling_sinfo = 1;
6174 	struct sctp_inpcb *inp;
6175 
6176 	inp = (struct sctp_inpcb *)so->so_pcb;
6177 	/* pickup the assoc we are reading from */
6178 	if (inp == NULL) {
6179 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6180 		return (EINVAL);
6181 	}
6182 	if ((sctp_is_feature_off(inp,
6183 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6184 	    (controlp == NULL)) {
6185 		/* user does not want the sndrcv ctl */
6186 		filling_sinfo = 0;
6187 	}
6188 	if (psa) {
6189 		from = (struct sockaddr *)sockbuf;
6190 		fromlen = sizeof(sockbuf);
6191 		from->sa_len = 0;
6192 	} else {
6193 		from = NULL;
6194 		fromlen = 0;
6195 	}
6196 
6197 	error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp,
6198 	    (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo);
6199 	if ((controlp) && (filling_sinfo)) {
6200 		/* copy back the sinfo in a CMSG format */
6201 		if (filling_sinfo)
6202 			*controlp = sctp_build_ctl_nchunk(inp,
6203 			    (struct sctp_sndrcvinfo *)&sinfo);
6204 		else
6205 			*controlp = NULL;
6206 	}
6207 	if (psa) {
6208 		/* copy back the address info */
6209 		if (from && from->sa_len) {
6210 			*psa = sodupsockaddr(from, M_NOWAIT);
6211 		} else {
6212 			*psa = NULL;
6213 		}
6214 	}
6215 	return (error);
6216 }
6217 
6218 
6219 int
6220 sctp_l_soreceive(struct socket *so,
6221     struct sockaddr **name,
6222     struct uio *uio,
6223     char **controlp,
6224     int *controllen,
6225     int *flag)
6226 {
6227 	int error, fromlen;
6228 	uint8_t sockbuf[256];
6229 	struct sockaddr *from;
6230 	struct sctp_extrcvinfo sinfo;
6231 	int filling_sinfo = 1;
6232 	struct sctp_inpcb *inp;
6233 
6234 	inp = (struct sctp_inpcb *)so->so_pcb;
6235 	/* pickup the assoc we are reading from */
6236 	if (inp == NULL) {
6237 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6238 		return (EINVAL);
6239 	}
6240 	if ((sctp_is_feature_off(inp,
6241 	    SCTP_PCB_FLAGS_RECVDATAIOEVNT)) ||
6242 	    (controlp == NULL)) {
6243 		/* user does not want the sndrcv ctl */
6244 		filling_sinfo = 0;
6245 	}
6246 	if (name) {
6247 		from = (struct sockaddr *)sockbuf;
6248 		fromlen = sizeof(sockbuf);
6249 		from->sa_len = 0;
6250 	} else {
6251 		from = NULL;
6252 		fromlen = 0;
6253 	}
6254 
6255 	error = sctp_sorecvmsg(so, uio,
6256 	    (struct mbuf **)NULL,
6257 	    from, fromlen, flag,
6258 	    (struct sctp_sndrcvinfo *)&sinfo,
6259 	    filling_sinfo);
6260 	if ((controlp) && (filling_sinfo)) {
6261 		/*
6262 		 * copy back the sinfo in a CMSG format note that the caller
6263 		 * has reponsibility for freeing the memory.
6264 		 */
6265 		if (filling_sinfo)
6266 			*controlp = sctp_build_ctl_cchunk(inp,
6267 			    controllen,
6268 			    (struct sctp_sndrcvinfo *)&sinfo);
6269 	}
6270 	if (name) {
6271 		/* copy back the address info */
6272 		if (from && from->sa_len) {
6273 			*name = sodupsockaddr(from, M_WAIT);
6274 		} else {
6275 			*name = NULL;
6276 		}
6277 	}
6278 	return (error);
6279 }
6280 
6281 
6282 
6283 
6284 
6285 
6286 
6287 int
6288 sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr,
6289     int totaddr, int *error)
6290 {
6291 	int added = 0;
6292 	int i;
6293 	struct sctp_inpcb *inp;
6294 	struct sockaddr *sa;
6295 	size_t incr = 0;
6296 
6297 	sa = addr;
6298 	inp = stcb->sctp_ep;
6299 	*error = 0;
6300 	for (i = 0; i < totaddr; i++) {
6301 		if (sa->sa_family == AF_INET) {
6302 			incr = sizeof(struct sockaddr_in);
6303 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6304 				/* assoc gone no un-lock */
6305 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6306 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
6307 				*error = ENOBUFS;
6308 				goto out_now;
6309 			}
6310 			added++;
6311 		} else if (sa->sa_family == AF_INET6) {
6312 			incr = sizeof(struct sockaddr_in6);
6313 			if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) {
6314 				/* assoc gone no un-lock */
6315 				SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS);
6316 				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8);
6317 				*error = ENOBUFS;
6318 				goto out_now;
6319 			}
6320 			added++;
6321 		}
6322 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6323 	}
6324 out_now:
6325 	return (added);
6326 }
6327 
6328 struct sctp_tcb *
6329 sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr,
6330     int *totaddr, int *num_v4, int *num_v6, int *error,
6331     int limit, int *bad_addr)
6332 {
6333 	struct sockaddr *sa;
6334 	struct sctp_tcb *stcb = NULL;
6335 	size_t incr, at, i;
6336 
6337 	at = incr = 0;
6338 	sa = addr;
6339 	*error = *num_v6 = *num_v4 = 0;
6340 	/* account and validate addresses */
6341 	for (i = 0; i < (size_t)*totaddr; i++) {
6342 		if (sa->sa_family == AF_INET) {
6343 			(*num_v4) += 1;
6344 			incr = sizeof(struct sockaddr_in);
6345 			if (sa->sa_len != incr) {
6346 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6347 				*error = EINVAL;
6348 				*bad_addr = 1;
6349 				return (NULL);
6350 			}
6351 		} else if (sa->sa_family == AF_INET6) {
6352 			struct sockaddr_in6 *sin6;
6353 
6354 			sin6 = (struct sockaddr_in6 *)sa;
6355 			if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6356 				/* Must be non-mapped for connectx */
6357 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6358 				*error = EINVAL;
6359 				*bad_addr = 1;
6360 				return (NULL);
6361 			}
6362 			(*num_v6) += 1;
6363 			incr = sizeof(struct sockaddr_in6);
6364 			if (sa->sa_len != incr) {
6365 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6366 				*error = EINVAL;
6367 				*bad_addr = 1;
6368 				return (NULL);
6369 			}
6370 		} else {
6371 			*totaddr = i;
6372 			/* we are done */
6373 			break;
6374 		}
6375 		SCTP_INP_INCR_REF(inp);
6376 		stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL);
6377 		if (stcb != NULL) {
6378 			/* Already have or am bring up an association */
6379 			return (stcb);
6380 		} else {
6381 			SCTP_INP_DECR_REF(inp);
6382 		}
6383 		if ((at + incr) > (size_t)limit) {
6384 			*totaddr = i;
6385 			break;
6386 		}
6387 		sa = (struct sockaddr *)((caddr_t)sa + incr);
6388 	}
6389 	return ((struct sctp_tcb *)NULL);
6390 }
6391 
6392 /*
6393  * sctp_bindx(ADD) for one address.
6394  * assumes all arguments are valid/checked by caller.
6395  */
6396 void
6397 sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
6398     struct sockaddr *sa, sctp_assoc_t assoc_id,
6399     uint32_t vrf_id, int *error, void *p)
6400 {
6401 	struct sockaddr *addr_touse;
6402 
6403 #ifdef INET6
6404 	struct sockaddr_in sin;
6405 
6406 #endif
6407 
6408 	/* see if we're bound all already! */
6409 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6410 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6411 		*error = EINVAL;
6412 		return;
6413 	}
6414 	addr_touse = sa;
6415 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6416 	if (sa->sa_family == AF_INET6) {
6417 		struct sockaddr_in6 *sin6;
6418 
6419 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6420 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6421 			*error = EINVAL;
6422 			return;
6423 		}
6424 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6425 			/* can only bind v6 on PF_INET6 sockets */
6426 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6427 			*error = EINVAL;
6428 			return;
6429 		}
6430 		sin6 = (struct sockaddr_in6 *)addr_touse;
6431 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6432 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6433 			    SCTP_IPV6_V6ONLY(inp)) {
6434 				/* can't bind v4-mapped on PF_INET sockets */
6435 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6436 				*error = EINVAL;
6437 				return;
6438 			}
6439 			in6_sin6_2_sin(&sin, sin6);
6440 			addr_touse = (struct sockaddr *)&sin;
6441 		}
6442 	}
6443 #endif
6444 	if (sa->sa_family == AF_INET) {
6445 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6446 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6447 			*error = EINVAL;
6448 			return;
6449 		}
6450 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6451 		    SCTP_IPV6_V6ONLY(inp)) {
6452 			/* can't bind v4 on PF_INET sockets */
6453 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6454 			*error = EINVAL;
6455 			return;
6456 		}
6457 	}
6458 	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
6459 		if (p == NULL) {
6460 			/* Can't get proc for Net/Open BSD */
6461 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6462 			*error = EINVAL;
6463 			return;
6464 		}
6465 		*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
6466 		return;
6467 	}
6468 	/*
6469 	 * No locks required here since bind and mgmt_ep_sa all do their own
6470 	 * locking. If we do something for the FIX: below we may need to
6471 	 * lock in that case.
6472 	 */
6473 	if (assoc_id == 0) {
6474 		/* add the address */
6475 		struct sctp_inpcb *lep;
6476 		struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse;
6477 
6478 		/* validate the incoming port */
6479 		if ((lsin->sin_port != 0) &&
6480 		    (lsin->sin_port != inp->sctp_lport)) {
6481 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6482 			*error = EINVAL;
6483 			return;
6484 		} else {
6485 			/* user specified 0 port, set it to existing port */
6486 			lsin->sin_port = inp->sctp_lport;
6487 		}
6488 
6489 		lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
6490 		if (lep != NULL) {
6491 			/*
6492 			 * We must decrement the refcount since we have the
6493 			 * ep already and are binding. No remove going on
6494 			 * here.
6495 			 */
6496 			SCTP_INP_DECR_REF(lep);
6497 		}
6498 		if (lep == inp) {
6499 			/* already bound to it.. ok */
6500 			return;
6501 		} else if (lep == NULL) {
6502 			((struct sockaddr_in *)addr_touse)->sin_port = 0;
6503 			*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6504 			    SCTP_ADD_IP_ADDRESS,
6505 			    vrf_id, NULL);
6506 		} else {
6507 			*error = EADDRINUSE;
6508 		}
6509 		if (*error)
6510 			return;
6511 	} else {
6512 		/*
6513 		 * FIX: decide whether we allow assoc based bindx
6514 		 */
6515 	}
6516 }
6517 
6518 /*
6519  * sctp_bindx(DELETE) for one address.
6520  * assumes all arguments are valid/checked by caller.
6521  */
6522 void
6523 sctp_bindx_delete_address(struct socket *so, struct sctp_inpcb *inp,
6524     struct sockaddr *sa, sctp_assoc_t assoc_id,
6525     uint32_t vrf_id, int *error)
6526 {
6527 	struct sockaddr *addr_touse;
6528 
6529 #ifdef INET6
6530 	struct sockaddr_in sin;
6531 
6532 #endif
6533 
6534 	/* see if we're bound all already! */
6535 	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6536 		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6537 		*error = EINVAL;
6538 		return;
6539 	}
6540 	addr_touse = sa;
6541 #if defined(INET6) && !defined(__Userspace__)	/* TODO port in6_sin6_2_sin */
6542 	if (sa->sa_family == AF_INET6) {
6543 		struct sockaddr_in6 *sin6;
6544 
6545 		if (sa->sa_len != sizeof(struct sockaddr_in6)) {
6546 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6547 			*error = EINVAL;
6548 			return;
6549 		}
6550 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
6551 			/* can only bind v6 on PF_INET6 sockets */
6552 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6553 			*error = EINVAL;
6554 			return;
6555 		}
6556 		sin6 = (struct sockaddr_in6 *)addr_touse;
6557 		if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6558 			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6559 			    SCTP_IPV6_V6ONLY(inp)) {
6560 				/* can't bind mapped-v4 on PF_INET sockets */
6561 				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6562 				*error = EINVAL;
6563 				return;
6564 			}
6565 			in6_sin6_2_sin(&sin, sin6);
6566 			addr_touse = (struct sockaddr *)&sin;
6567 		}
6568 	}
6569 #endif
6570 	if (sa->sa_family == AF_INET) {
6571 		if (sa->sa_len != sizeof(struct sockaddr_in)) {
6572 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6573 			*error = EINVAL;
6574 			return;
6575 		}
6576 		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
6577 		    SCTP_IPV6_V6ONLY(inp)) {
6578 			/* can't bind v4 on PF_INET sockets */
6579 			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL);
6580 			*error = EINVAL;
6581 			return;
6582 		}
6583 	}
6584 	/*
6585 	 * No lock required mgmt_ep_sa does its own locking. If the FIX:
6586 	 * below is ever changed we may need to lock before calling
6587 	 * association level binding.
6588 	 */
6589 	if (assoc_id == 0) {
6590 		/* delete the address */
6591 		*error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
6592 		    SCTP_DEL_IP_ADDRESS,
6593 		    vrf_id, NULL);
6594 	} else {
6595 		/*
6596 		 * FIX: decide whether we allow assoc based bindx
6597 		 */
6598 	}
6599 }
6600 
6601 /*
6602  * returns the valid local address count for an assoc, taking into account
6603  * all scoping rules
6604  */
6605 int
6606 sctp_local_addr_count(struct sctp_tcb *stcb)
6607 {
6608 	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
6609 	int ipv4_addr_legal, ipv6_addr_legal;
6610 	struct sctp_vrf *vrf;
6611 	struct sctp_ifn *sctp_ifn;
6612 	struct sctp_ifa *sctp_ifa;
6613 	int count = 0;
6614 
6615 	/* Turn on all the appropriate scopes */
6616 	loopback_scope = stcb->asoc.loopback_scope;
6617 	ipv4_local_scope = stcb->asoc.ipv4_local_scope;
6618 	local_scope = stcb->asoc.local_scope;
6619 	site_scope = stcb->asoc.site_scope;
6620 	ipv4_addr_legal = ipv6_addr_legal = 0;
6621 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
6622 		ipv6_addr_legal = 1;
6623 		if (SCTP_IPV6_V6ONLY(stcb->sctp_ep) == 0) {
6624 			ipv4_addr_legal = 1;
6625 		}
6626 	} else {
6627 		ipv4_addr_legal = 1;
6628 	}
6629 
6630 	SCTP_IPI_ADDR_RLOCK();
6631 	vrf = sctp_find_vrf(stcb->asoc.vrf_id);
6632 	if (vrf == NULL) {
6633 		/* no vrf, no addresses */
6634 		SCTP_IPI_ADDR_RUNLOCK();
6635 		return (0);
6636 	}
6637 	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
6638 		/*
6639 		 * bound all case: go through all ifns on the vrf
6640 		 */
6641 		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
6642 			if ((loopback_scope == 0) &&
6643 			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
6644 				continue;
6645 			}
6646 			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
6647 				if (sctp_is_addr_restricted(stcb, sctp_ifa))
6648 					continue;
6649 				switch (sctp_ifa->address.sa.sa_family) {
6650 				case AF_INET:
6651 					if (ipv4_addr_legal) {
6652 						struct sockaddr_in *sin;
6653 
6654 						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
6655 						if (sin->sin_addr.s_addr == 0) {
6656 							/*
6657 							 * skip unspecified
6658 							 * addrs
6659 							 */
6660 							continue;
6661 						}
6662 						if ((ipv4_local_scope == 0) &&
6663 						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
6664 							continue;
6665 						}
6666 						/* count this one */
6667 						count++;
6668 					} else {
6669 						continue;
6670 					}
6671 					break;
6672 #ifdef INET6
6673 				case AF_INET6:
6674 					if (ipv6_addr_legal) {
6675 						struct sockaddr_in6 *sin6;
6676 
6677 						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
6678 						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
6679 							continue;
6680 						}
6681 						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6682 							if (local_scope == 0)
6683 								continue;
6684 							if (sin6->sin6_scope_id == 0) {
6685 								if (sa6_recoverscope(sin6) != 0)
6686 									/*
6687 									 *
6688 									 * bad
6689 									 *
6690 									 * li
6691 									 * nk
6692 									 *
6693 									 * loc
6694 									 * al
6695 									 *
6696 									 * add
6697 									 * re
6698 									 * ss
6699 									 * */
6700 									continue;
6701 							}
6702 						}
6703 						if ((site_scope == 0) &&
6704 						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
6705 							continue;
6706 						}
6707 						/* count this one */
6708 						count++;
6709 					}
6710 					break;
6711 #endif
6712 				default:
6713 					/* TSNH */
6714 					break;
6715 				}
6716 			}
6717 		}
6718 	} else {
6719 		/*
6720 		 * subset bound case
6721 		 */
6722 		struct sctp_laddr *laddr;
6723 
6724 		LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list,
6725 		    sctp_nxt_addr) {
6726 			if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
6727 				continue;
6728 			}
6729 			/* count this one */
6730 			count++;
6731 		}
6732 	}
6733 	SCTP_IPI_ADDR_RUNLOCK();
6734 	return (count);
6735 }
6736 
6737 #if defined(SCTP_LOCAL_TRACE_BUF)
6738 
6739 void
6740 sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f)
6741 {
6742 	uint32_t saveindex, newindex;
6743 
6744 	do {
6745 		saveindex = SCTP_BASE_SYSCTL(sctp_log).index;
6746 		if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6747 			newindex = 1;
6748 		} else {
6749 			newindex = saveindex + 1;
6750 		}
6751 	} while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0);
6752 	if (saveindex >= SCTP_MAX_LOGGING_SIZE) {
6753 		saveindex = 0;
6754 	}
6755 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT;
6756 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys;
6757 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a;
6758 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b;
6759 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c;
6760 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d;
6761 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e;
6762 	SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f;
6763 }
6764 
6765 #endif
6766 /* We will need to add support
6767  * to bind the ports and such here
6768  * so we can do UDP tunneling. In
6769  * the mean-time, we return error
6770  */
6771 #include <netinet/udp.h>
6772 #include <netinet/udp_var.h>
6773 #include <sys/proc.h>
6774 #ifdef INET6
6775 #include <netinet6/sctp6_var.h>
6776 #endif
6777 
6778 static void
6779 sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored)
6780 {
6781 	struct ip *iph;
6782 	struct mbuf *sp, *last;
6783 	struct udphdr *uhdr;
6784 	uint16_t port = 0, len;
6785 	int header_size = sizeof(struct udphdr) + sizeof(struct sctphdr);
6786 
6787 	/*
6788 	 * Split out the mbuf chain. Leave the IP header in m, place the
6789 	 * rest in the sp.
6790 	 */
6791 	if ((m->m_flags & M_PKTHDR) == 0) {
6792 		/* Can't handle one that is not a pkt hdr */
6793 		goto out;
6794 	}
6795 	/* pull the src port */
6796 	iph = mtod(m, struct ip *);
6797 	uhdr = (struct udphdr *)((caddr_t)iph + off);
6798 
6799 	port = uhdr->uh_sport;
6800 	sp = m_split(m, off, M_DONTWAIT);
6801 	if (sp == NULL) {
6802 		/* Gak, drop packet, we can't do a split */
6803 		goto out;
6804 	}
6805 	if (sp->m_pkthdr.len < header_size) {
6806 		/* Gak, packet can't have an SCTP header in it - to small */
6807 		m_freem(sp);
6808 		goto out;
6809 	}
6810 	/* ok now pull up the UDP header and SCTP header together */
6811 	sp = m_pullup(sp, header_size);
6812 	if (sp == NULL) {
6813 		/* Gak pullup failed */
6814 		goto out;
6815 	}
6816 	/* trim out the UDP header */
6817 	m_adj(sp, sizeof(struct udphdr));
6818 
6819 	/* Now reconstruct the mbuf chain */
6820 	/* 1) find last one */
6821 	last = m;
6822 	while (last->m_next != NULL) {
6823 		last = last->m_next;
6824 	}
6825 	last->m_next = sp;
6826 	m->m_pkthdr.len += sp->m_pkthdr.len;
6827 	last = m;
6828 	while (last != NULL) {
6829 		last = last->m_next;
6830 	}
6831 	/* Now its ready for sctp_input or sctp6_input */
6832 	iph = mtod(m, struct ip *);
6833 	switch (iph->ip_v) {
6834 	case IPVERSION:
6835 		{
6836 			/* its IPv4 */
6837 			len = SCTP_GET_IPV4_LENGTH(iph);
6838 			len -= sizeof(struct udphdr);
6839 			SCTP_GET_IPV4_LENGTH(iph) = len;
6840 			sctp_input_with_port(m, off, port);
6841 			break;
6842 		}
6843 #ifdef INET6
6844 	case IPV6_VERSION >> 4:
6845 		{
6846 			/* its IPv6 - NOT supported */
6847 			goto out;
6848 			break;
6849 
6850 		}
6851 #endif
6852 	default:
6853 		{
6854 			m_freem(m);
6855 			break;
6856 		}
6857 	}
6858 	return;
6859 out:
6860 	m_freem(m);
6861 }
6862 
6863 void
6864 sctp_over_udp_stop(void)
6865 {
6866 	struct socket *sop;
6867 
6868 	/*
6869 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6870 	 * for writting!
6871 	 */
6872 	if (SCTP_BASE_INFO(udp_tun_socket) == NULL) {
6873 		/* Nothing to do */
6874 		return;
6875 	}
6876 	sop = SCTP_BASE_INFO(udp_tun_socket);
6877 	soclose(sop);
6878 	SCTP_BASE_INFO(udp_tun_socket) = NULL;
6879 }
6880 int
6881 sctp_over_udp_start(void)
6882 {
6883 	uint16_t port;
6884 	int ret;
6885 	struct sockaddr_in sin;
6886 	struct socket *sop = NULL;
6887 	struct thread *th;
6888 	struct ucred *cred;
6889 
6890 	/*
6891 	 * This function assumes sysctl caller holds sctp_sysctl_info_lock()
6892 	 * for writting!
6893 	 */
6894 	port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port);
6895 	if (port == 0) {
6896 		/* Must have a port set */
6897 		return (EINVAL);
6898 	}
6899 	if (SCTP_BASE_INFO(udp_tun_socket) != NULL) {
6900 		/* Already running -- must stop first */
6901 		return (EALREADY);
6902 	}
6903 	th = curthread;
6904 	cred = th->td_ucred;
6905 	if ((ret = socreate(PF_INET, &sop,
6906 	    SOCK_DGRAM, IPPROTO_UDP, cred, th))) {
6907 		return (ret);
6908 	}
6909 	SCTP_BASE_INFO(udp_tun_socket) = sop;
6910 	/* call the special UDP hook */
6911 	ret = udp_set_kernel_tunneling(sop, sctp_recv_udp_tunneled_packet);
6912 	if (ret) {
6913 		goto exit_stage_left;
6914 	}
6915 	/* Ok we have a socket, bind it to the port */
6916 	memset(&sin, 0, sizeof(sin));
6917 	sin.sin_len = sizeof(sin);
6918 	sin.sin_family = AF_INET;
6919 	sin.sin_port = htons(port);
6920 	ret = sobind(sop, (struct sockaddr *)&sin, th);
6921 	if (ret) {
6922 		/* Close up we cant get the port */
6923 exit_stage_left:
6924 		sctp_over_udp_stop();
6925 		return (ret);
6926 	}
6927 	/*
6928 	 * Ok we should now get UDP packets directly to our input routine
6929 	 * sctp_recv_upd_tunneled_packet().
6930 	 */
6931 	return (0);
6932 }
6933